repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
with-git/tensorflow | tensorflow/examples/tutorials/mnist/fully_connected_feed.py | 33 | 9650 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=missing-docstring
import argparse
import os
import sys
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
def main(_):
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=2000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/input_data'),
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/logs/fully_connected_feed'),
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
mariusvniekerk/ibis | ibis/expr/tests/test_timestamp.py | 3 | 3624 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import ibis
import ibis.expr.api as api
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.expr.tests.mocks import MockConnection
from ibis.compat import unittest
class TestTimestamp(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
self.alltypes = self.con.table('alltypes')
self.col = self.alltypes.i
def test_field_select(self):
assert isinstance(self.col, ir.TimestampArray)
def test_string_cast_to_timestamp(self):
casted = self.alltypes.g.cast('timestamp')
assert isinstance(casted, ir.TimestampArray)
string = api.literal('2000-01-01')
casted = string.cast('timestamp')
assert isinstance(casted, ir.TimestampScalar)
def test_extract_fields(self):
# type-size may be database specific
cases = [
('year', ops.ExtractYear, ir.Int32Array),
('month', ops.ExtractMonth, ir.Int32Array),
('day', ops.ExtractDay, ir.Int32Array),
('hour', ops.ExtractHour, ir.Int32Array),
('minute', ops.ExtractMinute, ir.Int32Array),
('second', ops.ExtractSecond, ir.Int32Array),
('millisecond', ops.ExtractMillisecond, ir.Int32Array),
]
for attr, ex_op, ex_type in cases:
result = getattr(self.col, attr)()
assert result.get_name() == attr
assert isinstance(result, ex_type)
assert isinstance(result.op(), ex_op)
def test_now(self):
result = api.now()
assert isinstance(result, ir.TimestampScalar)
assert isinstance(result.op(), ops.TimestampNow)
def test_timestamp_literals(self):
ts_str = '2015-01-01 00:00:00'
val = pd.Timestamp(ts_str)
expr = ibis.literal(val)
assert isinstance(expr, ir.TimestampScalar)
expr = ibis.timestamp(ts_str)
assert isinstance(expr, ir.TimestampScalar)
self.assertRaises(ValueError, ibis.timestamp, '2015-01-01 00:71')
def test_integer_to_timestamp(self):
# #246
pass
def test_comparison_timestamp(self):
expr = self.col > (self.col.min() + ibis.day(3))
assert isinstance(expr, ir.BooleanArray)
def test_comparisons_string(self):
val = '2015-01-01 00:00:00'
expr = self.col > val
op = expr.op()
assert isinstance(op.right, ir.TimestampScalar)
expr2 = val < self.col
op = expr2.op()
assert isinstance(op, ops.Greater)
assert isinstance(op.right, ir.TimestampScalar)
def test_comparisons_pandas_timestamp(self):
val = pd.Timestamp('2015-01-01 00:00:00')
expr = self.col > val
op = expr.op()
assert isinstance(op.right, ir.TimestampScalar)
# TODO: this is broken for now because of upstream pandas problems
# expr2 = val < self.col
# op = expr2.op()
# assert isinstance(op, ops.Greater)
# assert isinstance(op.right, ir.TimestampScalar)
| apache-2.0 |
dedupeio/dedupe | tests/test_training.py | 2 | 3269 | import dedupe
import dedupe.training as training
import unittest
class TrainingTest(unittest.TestCase):
def setUp(self):
field_definition = [{'field': 'name', 'type': 'String'}]
self.data_model = dedupe.Dedupe(field_definition).data_model
self.training_pairs = {
'match': [({"name": "Bob", "age": "50"},
{"name": "Bob", "age": "75"}),
({"name": "Meredith", "age": "40"},
{"name": "Sue", "age": "10"})],
'distinct': [({"name": "Jimmy", "age": "20"},
{"name": "Jimbo", "age": "21"}),
({"name": "Willy", "age": "35"},
{"name": "William", "age": "35"}),
({"name": "William", "age": "36"},
{"name": "William", "age": "35"})]
}
self.training = self.training_pairs['match'] + \
self.training_pairs['distinct']
self.training_records = []
for pair in self.training:
for record in pair:
if record not in self.training_records:
self.training_records.append(record)
self.simple = lambda x: set([str(k) for k in x
if "CompoundPredicate" not in str(k)])
self.block_learner = training.BlockLearner
self.block_learner.blocker = dedupe.blocking.Fingerprinter(self.data_model.predicates())
self.block_learner.blocker.index_all({i: x for i, x in enumerate(self.training_records)})
def test_dedupe_coverage(self):
coverage = self.block_learner.cover(self.block_learner, self.training)
assert self.simple(coverage.keys()).issuperset(
set(["SimplePredicate: (tokenFieldPredicate, name)",
"SimplePredicate: (commonSixGram, name)",
"TfidfTextCanopyPredicate: (0.4, name)",
"SimplePredicate: (sortedAcronym, name)",
"SimplePredicate: (sameThreeCharStartPredicate, name)",
"TfidfTextCanopyPredicate: (0.2, name)",
"SimplePredicate: (sameFiveCharStartPredicate, name)",
"TfidfTextCanopyPredicate: (0.6, name)",
"SimplePredicate: (wholeFieldPredicate, name)",
"TfidfTextCanopyPredicate: (0.8, name)",
"SimplePredicate: (commonFourGram, name)",
"SimplePredicate: (firstTokenPredicate, name)",
"SimplePredicate: (sameSevenCharStartPredicate, name)"]))
def test_uncovered_by(self):
before = {1: {1, 2, 3}, 2: {1, 2}, 3: {3}}
after = {1: {1, 2}, 2: {1, 2}}
before_copy = before.copy()
assert training.BranchBound.uncovered_by(before, set()) == before
assert training.BranchBound.uncovered_by(before, {3}) == after
assert before == before_copy
def test_covered_pairs(self):
p1 = lambda x, target=None: (1,) # noqa: E 731
self.block_learner.blocker.predicates = (p1,)
cover = self.block_learner.cover(self.block_learner,
[('a', 'b')] * 2)
assert cover[p1] == {0, 1}
if __name__ == "__main__":
unittest.main()
| mit |
Maseratigsg/kohencoin | test/functional/test_framework/script.py | 52 | 25958 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as SignatureHash().
This file is modified from python-bitcoinlib.
"""
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0' : OP_0,
'OP_PUSHDATA1' : OP_PUSHDATA1,
'OP_PUSHDATA2' : OP_PUSHDATA2,
'OP_PUSHDATA4' : OP_PUSHDATA4,
'OP_1NEGATE' : OP_1NEGATE,
'OP_RESERVED' : OP_RESERVED,
'OP_1' : OP_1,
'OP_2' : OP_2,
'OP_3' : OP_3,
'OP_4' : OP_4,
'OP_5' : OP_5,
'OP_6' : OP_6,
'OP_7' : OP_7,
'OP_8' : OP_8,
'OP_9' : OP_9,
'OP_10' : OP_10,
'OP_11' : OP_11,
'OP_12' : OP_12,
'OP_13' : OP_13,
'OP_14' : OP_14,
'OP_15' : OP_15,
'OP_16' : OP_16,
'OP_NOP' : OP_NOP,
'OP_VER' : OP_VER,
'OP_IF' : OP_IF,
'OP_NOTIF' : OP_NOTIF,
'OP_VERIF' : OP_VERIF,
'OP_VERNOTIF' : OP_VERNOTIF,
'OP_ELSE' : OP_ELSE,
'OP_ENDIF' : OP_ENDIF,
'OP_VERIFY' : OP_VERIFY,
'OP_RETURN' : OP_RETURN,
'OP_TOALTSTACK' : OP_TOALTSTACK,
'OP_FROMALTSTACK' : OP_FROMALTSTACK,
'OP_2DROP' : OP_2DROP,
'OP_2DUP' : OP_2DUP,
'OP_3DUP' : OP_3DUP,
'OP_2OVER' : OP_2OVER,
'OP_2ROT' : OP_2ROT,
'OP_2SWAP' : OP_2SWAP,
'OP_IFDUP' : OP_IFDUP,
'OP_DEPTH' : OP_DEPTH,
'OP_DROP' : OP_DROP,
'OP_DUP' : OP_DUP,
'OP_NIP' : OP_NIP,
'OP_OVER' : OP_OVER,
'OP_PICK' : OP_PICK,
'OP_ROLL' : OP_ROLL,
'OP_ROT' : OP_ROT,
'OP_SWAP' : OP_SWAP,
'OP_TUCK' : OP_TUCK,
'OP_CAT' : OP_CAT,
'OP_SUBSTR' : OP_SUBSTR,
'OP_LEFT' : OP_LEFT,
'OP_RIGHT' : OP_RIGHT,
'OP_SIZE' : OP_SIZE,
'OP_INVERT' : OP_INVERT,
'OP_AND' : OP_AND,
'OP_OR' : OP_OR,
'OP_XOR' : OP_XOR,
'OP_EQUAL' : OP_EQUAL,
'OP_EQUALVERIFY' : OP_EQUALVERIFY,
'OP_RESERVED1' : OP_RESERVED1,
'OP_RESERVED2' : OP_RESERVED2,
'OP_1ADD' : OP_1ADD,
'OP_1SUB' : OP_1SUB,
'OP_2MUL' : OP_2MUL,
'OP_2DIV' : OP_2DIV,
'OP_NEGATE' : OP_NEGATE,
'OP_ABS' : OP_ABS,
'OP_NOT' : OP_NOT,
'OP_0NOTEQUAL' : OP_0NOTEQUAL,
'OP_ADD' : OP_ADD,
'OP_SUB' : OP_SUB,
'OP_MUL' : OP_MUL,
'OP_DIV' : OP_DIV,
'OP_MOD' : OP_MOD,
'OP_LSHIFT' : OP_LSHIFT,
'OP_RSHIFT' : OP_RSHIFT,
'OP_BOOLAND' : OP_BOOLAND,
'OP_BOOLOR' : OP_BOOLOR,
'OP_NUMEQUAL' : OP_NUMEQUAL,
'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
'OP_LESSTHAN' : OP_LESSTHAN,
'OP_GREATERTHAN' : OP_GREATERTHAN,
'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
'OP_MIN' : OP_MIN,
'OP_MAX' : OP_MAX,
'OP_WITHIN' : OP_WITHIN,
'OP_RIPEMD160' : OP_RIPEMD160,
'OP_SHA1' : OP_SHA1,
'OP_SHA256' : OP_SHA256,
'OP_HASH160' : OP_HASH160,
'OP_HASH256' : OP_HASH256,
'OP_CODESEPARATOR' : OP_CODESEPARATOR,
'OP_CHECKSIG' : OP_CHECKSIG,
'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
'OP_NOP1' : OP_NOP1,
'OP_CHECKLOCKTIMEVERIFY' : OP_CHECKLOCKTIMEVERIFY,
'OP_CHECKSEQUENCEVERIFY' : OP_CHECKSEQUENCEVERIFY,
'OP_NOP4' : OP_NOP4,
'OP_NOP5' : OP_NOP5,
'OP_NOP6' : OP_NOP6,
'OP_NOP7' : OP_NOP7,
'OP_NOP8' : OP_NOP8,
'OP_NOP9' : OP_NOP9,
'OP_NOP10' : OP_NOP10,
'OP_SMALLINTEGER' : OP_SMALLINTEGER,
'OP_PUBKEYS' : OP_PUBKEYS,
'OP_PUBKEYHASH' : OP_PUBKEYHASH,
'OP_PUBKEY' : OP_PUBKEY,
}
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum(object):
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return b"x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| mit |
aabbox/kbengine | kbe/src/lib/python/Lib/distutils/archive_util.py | 85 | 7945 | """distutils.archive_util
Utility functions for creating archive files (tarballs, zip files,
that sort of thing)."""
import os
from warnings import warn
import sys
try:
import zipfile
except ImportError:
zipfile = None
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
from distutils.dir_util import mkpath
from distutils import log
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "compress", "bzip2", or None.
(compress will be deprecated in Python 3.2)
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_dir' + ".tar", possibly plus
the appropriate compression extension (".gz", ".bz2" or ".Z").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', None: '', 'compress': ''}
compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'compress': '.Z'}
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext.keys():
raise ValueError(
"bad value for 'compress': must be None, 'gzip', 'bzip2' "
"or 'compress'")
archive_name = base_name + '.tar'
if compress != 'compress':
archive_name += compress_ext.get(compress, '')
mkpath(os.path.dirname(archive_name), dry_run=dry_run)
# creating the tarball
import tarfile # late import so Python build itself doesn't break
log.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
# compression using `compress`
if compress == 'compress':
warn("'compress' will be deprecated.", PendingDeprecationWarning)
# the option varies depending on the platform
compressed_name = archive_name + compress_ext[compress]
if sys.platform == 'win32':
cmd = [compress, archive_name, compressed_name]
else:
cmd = [compress, '-f', archive_name]
spawn(cmd, dry_run=dry_run)
return compressed_name
return archive_name
def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises DistutilsExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
# If zipfile module is not available, try spawning an external
# 'zip' command.
if zipfile is None:
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
try:
spawn(["zip", zipoptions, zip_filename, base_dir],
dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise DistutilsExecError(("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename)
else:
log.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
try:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
except RuntimeError:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_STORED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
log.info("adding '%s'" % path)
zip.close()
return zip_filename
ARCHIVE_FORMATS = {
'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (make_zipfile, [],"ZIP file")
}
def check_archive_formats(formats):
"""Returns the first format from the 'format' list that is unknown.
If all formats are known, returns None
"""
for format in formats:
if format not in ARCHIVE_FORMATS:
return format
return None
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "ztar",
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
log.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run}
try:
format_info = ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
log.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
| lgpl-3.0 |
andrei-karalionak/ggrc-core | src/ggrc/fulltext/mysql.py | 6 | 8193 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from sqlalchemy import and_
from sqlalchemy import case
from sqlalchemy import distinct
from sqlalchemy import event
from sqlalchemy import func
from sqlalchemy import literal
from sqlalchemy import or_
from sqlalchemy import union
from sqlalchemy.sql import false
from sqlalchemy.schema import DDL
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import select
from ggrc import db
from ggrc.login import is_creator
from ggrc.models import all_models
from ggrc.utils import query_helpers
from ggrc.rbac import context_query_filter
from ggrc.fulltext.sql import SqlIndexer
class MysqlRecordProperty(db.Model):
__tablename__ = 'fulltext_record_properties'
key = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(64), primary_key=True)
context_id = db.Column(db.Integer)
tags = db.Column(db.String)
property = db.Column(db.String(64), primary_key=True)
content = db.Column(db.Text)
@declared_attr
def __table_args__(self):
return (
# NOTE
# This is here to prevent Alembic from wanting to drop the index, but
# the DDL below or a similar Alembic migration should be used to create
# the index.
db.Index('{}_text_idx'.format(self.__tablename__), 'content'),
# These are real indexes
db.Index('ix_{}_key'.format(self.__tablename__), 'key'),
db.Index('ix_{}_type'.format(self.__tablename__), 'type'),
db.Index('ix_{}_tags'.format(self.__tablename__), 'tags'),
db.Index('ix_{}_context_id'.format(self.__tablename__), 'context_id'),
# Only MyISAM supports fulltext indexes until newer MySQL/MariaDB
{'mysql_engine': 'myisam'},
)
event.listen(
MysqlRecordProperty.__table__,
'after_create',
DDL('ALTER TABLE {tablename} ADD FULLTEXT INDEX {tablename}_text_idx '
'(content)'.format(tablename=MysqlRecordProperty.__tablename__))
)
class MysqlIndexer(SqlIndexer):
record_type = MysqlRecordProperty
def _get_filter_query(self, terms):
"""Get the whitelist of fields to filter in full text table."""
whitelist = MysqlRecordProperty.property.in_(
['title', 'name', 'email', 'notes', 'description', 'slug'])
if not terms:
return whitelist
elif terms:
return and_(whitelist, MysqlRecordProperty.content.contains(terms))
def get_permissions_query(self, model_names, permission_type='read',
permission_model=None):
"""Prepare the query based on the allowed contexts and resources for
each of the required objects(models).
"""
type_queries = []
for model_name in model_names:
contexts, resources = query_helpers.get_context_resource(
model_name=model_name,
permission_type=permission_type,
permission_model=permission_model
)
if contexts is not None:
if resources:
resource_sql = and_(
MysqlRecordProperty.type == model_name,
MysqlRecordProperty.key.in_(resources))
else:
resource_sql = false()
type_query = or_(
and_(
MysqlRecordProperty.type == model_name,
context_query_filter(MysqlRecordProperty.context_id, contexts)
),
resource_sql)
type_queries.append(type_query)
return and_(
MysqlRecordProperty.type.in_(model_names),
or_(*type_queries))
def search_get_owner_query(self, query, types=None, contact_id=None):
"""Prepare the search query based on the contact_id to return my
objects. This method is used only for dashboard and returns objects
the user is the owner.
"""
if not contact_id:
return query
union_query = query_helpers.get_myobjects_query(
types=types,
contact_id=contact_id,
is_creator=is_creator()
)
return query.join(
union_query,
and_(
union_query.c.id == MysqlRecordProperty.key,
union_query.c.type == MysqlRecordProperty.type),
)
def _add_extra_params_query(self, query, type, extra_param):
"""Prepare the query for handling extra params."""
if not extra_param:
return query
models = [m for m in all_models.all_models if m.__name__ == type]
if len(models) == 0:
return query
model = models[0]
return query.filter(self.record_type.key.in_(
db.session.query(
model.id.label('id')
).filter_by(**extra_param)
))
def _get_grouped_types(self, types, extra_params=None):
model_names = [model.__name__ for model in all_models.all_models]
if types is not None:
model_names = [m for m in model_names if m in types]
if extra_params is not None:
model_names = [m for m in model_names if m not in extra_params]
return model_names
def search(self, terms, types=None, permission_type='read',
permission_model=None, contact_id=None, extra_params={}):
"""Prepare the search query and return the results set based on the
full text table."""
model_names = self._get_grouped_types(types, extra_params)
columns = (
self.record_type.key.label('key'),
self.record_type.type.label('type'),
self.record_type.property.label('property'),
self.record_type.content.label('content'),
case(
[(self.record_type.property == 'title', literal(0))],
else_=literal(1)).label('sort_key'))
query = db.session.query(*columns)
query = query.filter(self.get_permissions_query(
model_names, permission_type, permission_model))
query = query.filter(self._get_filter_query(terms))
query = self.search_get_owner_query(query, types, contact_id)
model_names = [model.__name__ for model in all_models.all_models]
if types is not None:
model_names = [m for m in model_names if m in types]
unions = [query]
# Add extra_params and extra_colums:
for k, v in extra_params.iteritems():
if k not in model_names:
continue
q = db.session.query(*columns)
q = q.filter(
self.get_permissions_query([k], permission_type, permission_model))
q = q.filter(self._get_filter_query(terms))
q = self.search_get_owner_query(q, [k], contact_id)
q = self._add_extra_params_query(q, k, v)
unions.append(q)
all_queries = union(*unions)
all_queries = aliased(all_queries.order_by(
all_queries.c.sort_key, all_queries.c.content))
return db.session.execute(
select([all_queries.c.key, all_queries.c.type]).distinct())
def counts(self, terms, types=None, contact_id=None,
extra_params={}, extra_columns={}):
"""Prepare the search query, but return only count for each of
the requested objects."""
model_names = self._get_grouped_types(types, extra_params)
query = db.session.query(
self.record_type.type, func.count(distinct(
self.record_type.key)), literal(""))
query = query.filter(self.get_permissions_query(model_names))
query = query.filter(self._get_filter_query(terms))
query = self.search_get_owner_query(query, types, contact_id)
query = query.group_by(self.record_type.type)
all_extra_columns = dict(extra_columns.items() +
[(p, p) for p in extra_params
if p not in extra_columns])
if not all_extra_columns:
return query.all()
# Add extra_params and extra_colums:
for k, v in all_extra_columns.iteritems():
q = db.session.query(
self.record_type.type, func.count(
distinct(self.record_type.key)), literal(k))
q = q.filter(self.get_permissions_query([v]))
q = q.filter(self._get_filter_query(terms))
q = self.search_get_owner_query(q, [v], contact_id)
q = self._add_extra_params_query(q, v, extra_params.get(k, None))
q = q.group_by(self.record_type.type)
query = query.union(q)
return query.all()
Indexer = MysqlIndexer
| apache-2.0 |
LarsFronius/ansible | lib/ansible/modules/cloud/amazon/lambda_alias.py | 77 | 12327 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda_alias
short_description: Creates, updates or deletes AWS Lambda function aliases.
description:
- This module allows the management of AWS Lambda functions aliases via the Ansible
framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
itself and M(lambda_event) to manage event source mappings.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
options:
function_name:
description:
- The name of the function alias.
required: true
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
name:
description:
- Name of the function alias.
required: true
aliases: ['alias_name']
description:
description:
- A short, user-defined function alias description.
required: false
version:
description:
- Version associated with the Lambda function alias.
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
required: false
aliases: ['function_version']
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
production_version: 5
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
- name: show results
debug:
var: lambda_facts
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Dev
description: Development is $LATEST version
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
- name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: QA
version: "{{ lambda_facts.Version }}"
description: "QA is version {{ lambda_facts.Version }}"
when: lambda_facts.Version != "$LATEST"
# The Prod alias will have a fixed version based on a variable
- name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Prod
version: "{{ production_version }}"
description: "Production is version {{ production_version }}"
'''
RETURN = '''
---
alias_arn:
description: Full ARN of the function, including the alias
returned: success
type: string
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
description:
description: A short description of the alias
returned: success
type: string
sample: The development stage for my hot new app
function_version:
description: The qualifier that the alias refers to
returned: success
type: string
sample: $LATEST
name:
description: The name of the alias assigned
returned: success
type: string
sample: dev
'''
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[pc(param)] = module_param
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
if module.params['function_version'] == 0:
module.params['function_version'] = '$LATEST'
else:
module.params['function_version'] = str(module.params['function_version'])
return
def get_lambda_alias(module, aws):
"""
Returns the lambda function alias if it exists.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
client = aws.client('lambda')
# set API parameters
api_params = set_api_params(module, ('function_name', 'name'))
# check if alias exists and get facts
try:
results = client.get_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
results = None
else:
module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
return results
def lambda_alias(module, aws):
"""
Adds, updates or deletes lambda function aliases.
:param module: Ansible module reference
:param aws: AWS client connection
:return dict:
"""
client = aws.client('lambda')
results = dict()
changed = False
current_state = 'absent'
state = module.params['state']
facts = get_lambda_alias(module, aws)
if facts:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if alias has changed -- only version and description can change
alias_params = ('function_version', 'description')
for param in alias_params:
if module.params.get(param) != facts.get(pc(param)):
changed = True
break
if changed:
api_params = set_api_params(module, ('function_name', 'name'))
api_params.update(set_api_params(module, alias_params))
if not module.check_mode:
try:
results = client.update_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating function alias: {0}'.format(e))
else:
# create new function alias
api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
try:
if not module.check_mode:
results = client.create_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating function alias: {0}'.format(e))
else: # state = 'absent'
if current_state == 'present':
# delete the function
api_params = set_api_params(module, ('function_name', 'name'))
try:
if not module.check_mode:
results = client.delete_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error deleting function alias: {0}'.format(e))
return dict(changed=changed, **dict(results or facts))
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
function_name=dict(required=True, default=None),
name=dict(required=True, default=None, aliases=['alias_name']),
function_version=dict(type='int', required=False, default=0, aliases=['version']),
description=dict(required=False, default=None),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
results = lambda_alias(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results))
# ansible import module(s) kept at ~eof as recommended
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
nlaurens/budgetRapportage | model/users.py | 1 | 4253 | from auth import auth
import model.ordergroup
def protected(**pars):
"""
.protected()
Decorator to only allowed login users. We use the auth.protect method. Note
that this method can only be used on the 'top' level function (GET) as it re-
directs directly and thus fails on nested functions.
@protected([perm][, captcha_on][, test])
Decorator for limiting the access to pages.
'perm' can be either a single permission (string) or a sequence of them.
'captcha_on' is a Boole value('True' or 'False') to turn on or off the
captcha validation.
'test' must be a function that takes a user object and returns
True or False.
"""
return auth.protected(**pars)
def get_users():
"""
.get_users()
Returns a list of all users and their permissions/info
"""
return auth.get_all_users()
def get_permissions():
"""
.get_permissions()
Returns a list of all permissions and their description
"""
return auth.get_all_permissions()
def get_username():
"""
.get_username()
Returns a the name (str) of the logged in user
"""
return auth.get_user().user_login
def get_permission():
"""
.get_permission()
Returns a list of all permissions of the logged in user
"""
permissions = auth.get_permissions()
if isinstance(permissions, str):
return [permissions]
return permissions
def check_permission(perm):
"""
.check_permissions(perm)
input: perm as string or list of strings
output: True/False
Checks if current user has (all) permissions: True, otherwise
returns False
"""
return auth.has_perm(perm)
def check_ordergroup(og_file, og_name):
"""
.check_ordegroup(og_file, og_name)
input: og_file as string, og_name as string
output: True/False
Checks if user has access to ordegroup 'og_name' in ordegroupfile 'og_file'
returns True if both match, otherwise False
"""
ordergroups_allowed = model.users.ordergroups_allowed()
for ordergroup_file_allowed, ordergroup_allowed in ordergroups_allowed:
ordergroup = model.ordergroup.load(ordergroup_file_allowed).find(ordergroup_allowed)
for subordergroup in ordergroup.list_groups():
if subordergroup.name == og_name and ordergroup_file_allowed == og_file:
return True
return False
def ordergroups_allowed():
"""
.orders_allowed()
input: none
output: List of ordersgroups and the file that user has permission for:
[ (ordergroup-file, ordergroup), (.. , ..)]
"""
ordergroups_allowed = []
permissions = get_permission()
for permission in permissions:
if permission[:10] == 'ordergroup':
ordergroup, group = __parse_ordergroup_permission(permission)
ordergroups_allowed.append((ordergroup, group))
return ordergroups_allowed
def orders_allowed():
"""
.orders_allowed()
input: none
output: List of orders (int) that user has access too based
on the ordergroups he has permission for.
"""
orders = []
# Based on Ordergroups
for ordergroup, group in ordergroups_allowed():
ordergroup = model.ordergroup.load(ordergroup).find(group)
orders.extend(ordergroup.list_orders_recursive().keys())
# Based on BudgetHolder
budgetHolders = []
permissions = get_permission()
for permission in permissions:
if permission[:3] == 'BH-':
budgetHolders.append(permission[3:])
if budgetHolders:
orderlist = model.orders.load(BH_load=budgetHolders)
for order in orderlist.orders:
if order.ordernummer not in orders:
orders.append(int(order.ordernummer))
return orders
def __parse_ordergroup_permission(permission):
"""
.__parse_ordergroup_permission(permission)
input: permission as str
output: ordgergroup as str, group in ordergroup as str
"""
permission_as_list = permission.split('-')
ordergroup = permission_as_list[1]
group = '-'.join(permission_as_list[2:])
return ordergroup, group
| mit |
zcbenz/cefode-chromium | chrome/common/extensions/docs/server2/sidenav_data_source_test.py | 6 | 1924 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from compiled_file_system import CompiledFileSystem
from in_memory_object_store import InMemoryObjectStore
from local_file_system import LocalFileSystem
from sidenav_data_source import SidenavDataSource
class SamplesDataSourceTest(unittest.TestCase):
def setUp(self):
self._base_path = os.path.join(sys.path[0],
'test_data',
'sidenav_data_source')
self._cache_factory = CompiledFileSystem.Factory(
LocalFileSystem(self._base_path),
InMemoryObjectStore('fake_branch'))
def _CheckLevels(self, items, level=2):
for item in items:
self.assertEqual(level, item['level'])
if 'items' in item:
self._CheckLevels(item['items'], level=level + 1)
def testLevels(self):
sidenav_data_source = SidenavDataSource.Factory(self._cache_factory,
self._base_path).Create('')
sidenav_json = sidenav_data_source.get('test')
self._CheckLevels(sidenav_json)
def testSelected(self):
sidenav_data_source = SidenavDataSource.Factory(
self._cache_factory,
self._base_path).Create('www.b.com')
sidenav_json = sidenav_data_source.get('test')
# This will be prettier once JSON is loaded with an OrderedDict.
for item in sidenav_json:
if item['title'] == 'Jim':
self.assertTrue(item.get('child_selected', False))
for next_item in item['items']:
if next_item['title'] == 'B':
self.assertTrue(next_item.get('selected', False))
return
# If we didn't return already, we should fail.
self.fail()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mpdevilleres/tbpc_app | tbpc/contract_mgt/urls.py | 1 | 1539 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from .tables_ajax import ContractTableJson, ContractorTableJson, SupportContractTableJson, \
ContractorContactTableJson
from .views import ContractTable, ContractorTable, SupportContractTable, ContractorContactTable
urlpatterns = [
# Support Contract
# ------------------------------------------------------------------------------
url(r'SupportContract/$', SupportContractTable.as_view(), name='support-contract-table'),
url(r'SupportContract/table-json/$', SupportContractTableJson.as_view(), name='support-contract-table-json'),
# Contract
# ------------------------------------------------------------------------------
url(r'Contract/$', ContractTable.as_view(), name='contract-table'),
url(r'Contract/table-json/$', ContractTableJson.as_view(), name='contract-table-json'),
# Contractor
# ------------------------------------------------------------------------------
url(r'ContractorContact/$', ContractorContactTable.as_view(), name='contractor-contact-table'),
url(r'ContractorContact/table-json/$', ContractorContactTableJson.as_view(), name='contractor-contact-table-json'),
# Contractor
# ------------------------------------------------------------------------------
url(r'Contractor/$', ContractorTable.as_view(), name='contractor-table'),
url(r'Contractor/table-json/$', ContractorTableJson.as_view(), name='contractor-table-json'),
]
| mit |
susansalkeld/discsongs | discsongs/lib/python2.7/site-packages/setuptools/tests/doctest.py | 332 | 99828 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
try:
basestring
except NameError:
basestring = str
try:
enumerate
except NameError:
def enumerate(seq):
return zip(range(len(seq)),seq)
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from setuptools.compat import StringIO, execfile, func_code, im_func
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, basestring):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if want.find(ELLIPSIS_MARKER)==-1:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is func_globals(object)
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = im_func(getattr(obj, valname))
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = im_func(obj)
if inspect.isfunction(obj): obj = func_code(obj)
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
elif func_code(self.save_linecache_getlines).co_argcount > 1:
return self.save_linecache_getlines(filename, module_globals)
else:
return self.save_linecache_getlines(filename)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print(len(notests), "items had no tests:")
notests.sort()
for thing in notests:
print(" ", thing)
if passed:
print(len(passed), "items passed all tests:")
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print(len(failed), "items had failures:")
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print(totalt, "tests in", len(self._name2ft), "items.")
print(totalt - totalf, "passed and", totalf, "failed.")
if totalf:
print("***Test Failed***", totalf, "failures.")
elif verbose:
print("Test passed.")
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print("*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
f = open(filename)
s = f.read()
f.close()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print("Running string", name)
(f,t) = self.testrunner.run(test)
if self.verbose:
print(f, "of", t, "examples failed in string", name)
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
f = open(path)
doc = f.read()
f.close()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print(sys.exc_info()[1])
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| mit |
magyarm/bluetooth-next | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
dyrock/trafficserver | tests/gold_tests/cont_schedule/schedule_on_thread.test.py | 1 | 2510 | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = 'Test TSContScheduleOnThread API'
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess('ts')
server = Test.MakeOriginServer('server')
Test.testName = ''
request_header = {
'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
response_header = {
'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 32,
'proxy.config.accept_threads': 1,
'proxy.config.task_threads': 2,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'TSContSchedule_test'
})
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}'.format(server.Variables.Port)
)
# Load plugin
Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'cont_schedule.cc'), ts, 'thread')
# www.example.com Host
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: Keep-Alive" --verbose'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Check Plugin Results
ts.Streams.All = "gold/schedule_on_thread.gold"
ts.Streams.All += Testers.ExcludesExpression('fail', 'should not contain "fail"')
| apache-2.0 |
fyndsi/Django-facebook | facebook_example/facebook_example/urls.py | 21 | 1529 | try:
from django.conf.urls import include, patterns, url
except ImportError:
from django.conf.urls.defaults import include, patterns, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# facebook and registration urls
(r'^facebook/', include('django_facebook.urls')),
(r'^accounts/', include('django_facebook.auth_urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
if settings.MODE == 'userena':
urlpatterns += patterns('',
(r'^accounts/', include('userena.urls')),
)
elif settings.MODE == 'django_registration':
urlpatterns += patterns('',
(r'^accounts/', include(
'registration.backends.default.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
| bsd-3-clause |
visualputty/Landing-Lights | django/contrib/contenttypes/generic.py | 155 | 17218 | """
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import signals
from django.db import models, router, DEFAULT_DB_ALIAS
from django.db.models.fields.related import RelatedField, Field, ManyToManyRel
from django.db.models.loading import get_model
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet, modelformset_factory, save_instance
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.utils.encoding import smart_unicode
from django.utils.functional import curry
from django.contrib.contenttypes.models import ContentType
class GenericForeignKey(object):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id"):
self.ct_field = ct_field
self.fk_field = fk_field
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instaed of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None, using=None):
# Convenience function using get_model avoids a circular import when
# using this model
ContentType = get_model("contenttypes", "contenttype")
if obj:
return ContentType.objects.db_manager(obj._state.db).get_for_model(obj)
elif id:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError(u"%s must be accessed via instance" % self.related.opts.object_name)
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(RelatedField, Field):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True))
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
Field.__init__(self, **kwargs)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_unicode([instance._get_pk_val() for instance in qs])
def m2m_db_table(self):
return self.rel.to._meta.db_table
def m2m_column_name(self):
return self.object_id_field_name
def m2m_reverse_name(self):
return self.rel.to._meta.pk.column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def db_type(self, connection):
# Since we're simulating a ManyToManyField, in effect, best return the
# same db_type as well.
return None
def extra_filters(self, pieces, pos, negate):
"""
Return an extra filter to the queryset so that the results are filtered
on the appropriate content type.
"""
if negate:
return []
ContentType = get_model("contenttypes", "contenttype")
content_type = ContentType.objects.get_for_model(self.model)
prefix = "__".join(pieces[:pos + 1])
return [("%s__%s" % (prefix, self.content_type_field_name),
content_type)]
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name:
ContentType.objects.db_manager(using).get_for_model(self.model).pk,
"%s__in" % self.object_id_field_name:
[obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# This import is done here to avoid circular import importing this module
from django.contrib.contenttypes.models import ContentType
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
manager = RelatedManager(
model = rel_model,
instance = instance,
symmetrical = (self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table = qn(self.field.m2m_db_table()),
source_col_name = qn(self.field.m2m_column_name()),
target_col_name = qn(self.field.m2m_reverse_name()),
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(instance),
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.core_filters = core_filters or {}
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.join_table = model._meta.db_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.pk_val = self.instance._get_pk_val()
def get_query_set(self):
db = self._db or router.db_for_read(self.model, instance=self.instance)
query = {
'%s__pk' % self.content_type_field_name : self.content_type.id,
'%s__exact' % self.object_id_field_name : self.pk_val,
}
return superclass.get_query_set(self).using(db).filter(**query)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
for obj in objs:
obj.delete(using=db)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.model, instance=self.instance)
for obj in self.all():
obj.delete(using=db)
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ManyToManyRel):
def __init__(self, to, related_name=None, limit_choices_to=None, symmetrical=True):
self.to = to
self.related_name = related_name
self.limit_choices_to = limit_choices_to or {}
self.symmetrical = symmetrical
self.multiple = True
self.through = None
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.object_name.lower(),
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(self.instance),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix
)
#@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.object_name.lower(),
cls.ct_field.name, cls.ct_fk_field.name,
))
get_default_prefix = classmethod(get_default_prefix)
def save_new(self, form, commit=True):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=lambda f: f.formfield()):
"""
Returns an ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``object_id`` if they different from the
defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
exclude = exclude or None
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.extra,
"can_delete": self.can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| bsd-3-clause |
sauliusl/scipy | scipy/stats/tests/test_mstats_basic.py | 18 | 49446 | """
Tests for the stats.mstats module (support for masked arrays)
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from scipy import stats
from common_tests import check_named_results
from numpy.testing import TestCase, run_module_suite
from numpy.testing.decorators import skipif
from numpy.ma.testutils import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
assert_allclose, assert_raises)
class TestMquantiles(TestCase):
def test_mquantiles_limit_keyword(self):
# Regression test for Trac ticket #867
data = np.array([[6., 7., 1.],
[47., 15., 2.],
[49., 36., 3.],
[15., 39., 4.],
[42., 40., -999.],
[41., 41., -999.],
[7., -999., -999.],
[39., -999., -999.],
[43., -999., -999.],
[40., -999., -999.],
[36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
class TestGMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.gmean(a)
desired = np.power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
assert_(not isinstance(desired1, ma.MaskedArray))
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.gmean(a)
desired = np.power(1*2*3,1./3.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.gmean(a, dtype=np.float96)
desired_dt = np.power(1 * 2 * 3, 1. / 3.).astype(np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)),
mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0)))
actual = mstats.gmean(a)
desired = np.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
actual = mstats.gmean(a, -1)
desired = ma.array((np.power(1*2*3*4,1./4.),
np.power(2*3,1./2.),
np.power(1*4,1./2.)))
assert_array_almost_equal(actual, desired, decimal=14)
class TestHMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.hmean(ma.array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.hmean(a)
desired = 3. / (1./1 + 1./2 + 1./3)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.hmean(a, dtype=np.float96)
desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3),
dtype=np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),
mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))
actual = mstats.hmean(a)
desired = ma.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
actual1 = mstats.hmean(a,axis=-1)
desired = (4./(1/1.+1/2.+1/3.+1/4.),
2./(1/2.+1/3.),
2./(1/1.+1/4.)
)
assert_array_almost_equal(actual1, desired, decimal=14)
class TestRanking(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
class TestCorr(TestCase):
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
# The tests in this context are edge cases, with perfect
# correlation or anticorrelation, or totally masked data.
# None of these should trigger a RuntimeWarning.
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
# (x2, y2) have the same unmasked data as (x1, y1).
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_spearmanr(self):
# Tests some computations of Spearman's rho
(x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
# test for namedtuple attributes
res = mstats.spearmanr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kendalltau(self):
# Tests some computations of Kendall's tau
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x,y)),
[+0.3333333,0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x,z)),
[-0.5477226,0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0,20,20, 0,60, 0,20,
10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0,80,80,80,10,33,60, 0,67,27,
25,80,80,80,80,80,80, 0,10,45, np.nan, 0])
result = mstats.kendalltau(x,y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
# test for namedtuple attributes
res = mstats.kendalltau(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
def test_pointbiserial(self):
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1,-1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attributes
res = mstats.pointbiserialr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestTrimming(TestCase):
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x.shape = (10,10)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
class TestMoments(TestCase):
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=bool))
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
# for compatibility with Matlab)
y = mstats.kurtosis(self.testmathworks,0,fisher=0,bias=1)
assert_almost_equal(y, 2.1658856802973,10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = mstats.kurtosis(self.testmathworks,fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047,10)
y = mstats.kurtosis(self.testcase,0,0)
assert_almost_equal(y,1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]))
def test_mode(self):
a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
a2 = np.reshape(a1, (3,5))
a3 = np.array([1,2,3,4,5,6])
a4 = np.reshape(a3, (3,2))
ma1 = ma.masked_where(ma.array(a1) > 2, a1)
ma2 = ma.masked_where(a2 > 2, a2)
ma3 = ma.masked_where(a3 < 2, a3)
ma4 = ma.masked_where(ma.array(a4) < 2, a4)
assert_equal(mstats.mode(a1, axis=None), (3,4))
assert_equal(mstats.mode(a1, axis=0), (3,4))
assert_equal(mstats.mode(ma1, axis=None), (0,3))
assert_equal(mstats.mode(a2, axis=None), (3,4))
assert_equal(mstats.mode(ma2, axis=None), (0,3))
assert_equal(mstats.mode(a3, axis=None), (1,1))
assert_equal(mstats.mode(ma3, axis=None), (2,1))
assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
a1_res = mstats.mode(a1, axis=None)
# test for namedtuple attributes
attributes = ('mode', 'count')
check_named_results(a1_res, attributes, ma=True)
class TestPercentile(TestCase):
def setUp(self):
self.a1 = [3,4,5,10,-3,-5,6]
self.a2 = [3,-6,-2,8,7,4,2,1]
self.a3 = [3.,4,5,10,-3,-5,-6,7.0]
def test_percentile(self):
x = np.arange(8) * 0.5
assert_equal(mstats.scoreatpercentile(x, 0), 0.)
assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = ma.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_equal(mstats.scoreatpercentile(x,50), [1,1,1])
class TestVariability(TestCase):
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_signaltonoise(self):
# This is not in R, so used:
# mean(testcase, axis=0) / (sqrt(var(testcase)*3/4))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
y = mstats.signaltonoise(self.testcase)
assert_almost_equal(y, 2.236067977)
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12)
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
class TestMisc(TestCase):
def test_obrientransform(self):
args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
[6]+[7]*2+[8]*4+[9]*9+[10]*16]
result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
[10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
assert_almost_equal(np.round(mstats.obrientransform(*args).T,4),
result,4)
def test_kstwosamp(self):
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
(winter,spring,summer,fall) = x.T
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4),
(0.1818,0.9892))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4),
(0.1469,0.7734))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4),
(0.1818,0.6744))
def test_friedmanchisq(self):
# No missing values
args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
[7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
[6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
result = mstats.friedmanchisquare(*args)
assert_almost_equal(result[0], 10.4737, 4)
assert_almost_equal(result[1], 0.005317, 6)
# Missing values
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x)
result = mstats.friedmanchisquare(*x)
assert_almost_equal(result[0], 2.0156, 4)
assert_almost_equal(result[1], 0.5692, 4)
# test for namedtuple attributes
attributes = ('statistic', 'pvalue')
check_named_results(result, attributes, ma=True)
def test_regress_simple():
# Regress a line with sinusoidal noise. Test for #1273.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y)
assert_almost_equal(slope, 0.19644990055858422)
assert_almost_equal(intercept, 10.211269918932341)
# test for namedtuple attributes
res = mstats.linregress(x, y)
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(res, attributes, ma=True)
def test_theilslopes():
# Test for basic slope and intercept.
slope, intercept, lower, upper = mstats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test for correct masking.
y = np.ma.array([0,1,100,1], mask=[False, False, True, False])
slope, intercept, lower, upper = mstats.theilslopes(y)
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 2./3)
# Test of confidence intervals from example in Sen (1968).
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_plotting_positions():
# Regression test for #1256
pos = mstats.plotting_positions(np.arange(3), 0, 0)
assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
class TestNormalitytests():
def test_vs_nonmasked(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
assert_raises(ValueError, func, x)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2,-1,0,1,2,3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
def test_normaltest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.normaltest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kurtosistest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.kurtosistest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestFOneway():
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = mstats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestMannwhitneyu():
def test_result_attributes(self):
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
res = mstats.mannwhitneyu(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestKruskal():
def test_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = mstats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
#TODO: for all ttest functions, add tests with masked array inputs
class TestTtest_rel():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_invalid_input_size(self):
assert_raises(ValueError, mstats.ttest_rel,
np.arange(10), np.arange(11))
x = np.arange(24)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
def test_empty(self):
res1 = mstats.ttest_rel([], [])
assert_(np.all(np.isnan(res1)))
class TestTtest_ind():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
# Check equal_var
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
assert_allclose(res4, res5)
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
assert_allclose(res4, res5)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_ind([], [])
assert_(np.all(np.isnan(res1)))
class TestTtest_1samp():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], 1)
res2 = mstats.ttest_1samp(outcome[:, 0], 1)
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_1samp(outcome[:, 0], 1)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_1samp([], 1)
assert_(np.all(np.isnan(res1)))
class TestCompareWithStats(TestCase):
"""
Class to compare mstats results with stats results.
It is in general assumed that scipy.stats is at a more mature stage than
stats.mstats. If a routine in mstats results in similar results like in
scipy.stats, this is considered also as a proper validation of scipy.mstats
routine.
Different sample sizes are used for testing, as some problems between stats
and mstats are dependent on sample size.
Author: Alexander Loew
NOTE that some tests fail. This might be caused by
a) actual differences or bugs between stats and mstats
b) numerical inaccuracies
c) different definitions of routine interfaces
These failures need to be checked. Current workaround is to have disabled these tests,
but issuing reports on scipy-dev
"""
def get_n(self):
""" Returns list of sample sizes to be used for comparison. """
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
# This routine generates numpy arrays and corresponding masked arrays
# with the same data, but additional masked values
np.random.seed(1234567)
x = np.random.randn(n)
y = x + np.random.randn(n)
xm = np.ones(len(x) + 5) * 1e16
ym = np.ones(len(y) + 5) * 1e16
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.ones((n, nx)) * np.nan
y = np.ones((n, nx)) * np.nan
xm = np.ones((n+5, nx)) * np.nan
ym = np.ones((n+5, nx)) * np.nan
for i in range(nx):
x[:,i], y[:,i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.linregress(x, y)
res2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(res1), np.asarray(res2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_signaltonoise(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.signaltonoise(x)
rm = stats.mstats.signaltonoise(xm)
assert_almost_equal(r, rm, 10)
r = stats.signaltonoise(y)
rm = stats.mstats.signaltonoise(ym)
assert_almost_equal(r, rm, 10)
def test_betai(self):
np.random.seed(12345)
for i in range(10):
a = np.random.rand() * 5.
b = np.random.rand() * 200.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
assert_equal(stats.betai(a, b, 0.), 0.)
assert_equal(stats.betai(a, b, 1.), 1.)
assert_equal(stats.mstats.betai(a, b, 0.), 0.)
assert_equal(stats.mstats.betai(a, b, 1.), 1.)
x = np.random.rand()
assert_almost_equal(stats.betai(a, b, x),
stats.mstats.betai(a, b, x), decimal=13)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
#reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
#validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
#compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5,4)
am = np.ma.array(a)
r = stats.sem(a,ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_describe_result_attributes(self):
actual = mstats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes, ma=True)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x),stats.mstats.tmin(xm))
assert_equal(stats.tmin(y),stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x,lowerlimit=-1.),
stats.mstats.tmin(xm,lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y,lowerlimit=-1.),
stats.mstats.tmin(ym,lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x,y)
zm = stats.mstats.zmap(xm,ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(np.sort(b), bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x),stats.mstats.tsem(xm), decimal=14)
assert_almost_equal(stats.tsem(y),stats.mstats.tsem(ym), decimal=14)
assert_almost_equal(stats.tsem(x,limits=(-2.,2.)),
stats.mstats.tsem(xm,limits=(-2.,2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r[0], rm[0], rtol=1e-15)
# TODO this test is not performed as it is a known issue that
# mstats returns a slightly different p-value what is a bit
# strange is that other tests like test_maskedarray_input don't
# fail!
#~ assert_almost_equal(r[1], rm[1])
def test_skewtest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.skewtest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
x = np.random.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_equal(r[0][0],rm[0][0])
assert_equal(r[0][1],rm[0][1])
def test_normaltest(self):
np.seterr(over='raise')
for n in self.get_n():
if n > 8:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1,1,2,2,3,3,3,4,4,4,4]).astype('float')
tmp = np.asarray([1,1,2,2,3,3,3,4,4,4,4,5,5,5,5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
r = stats.find_repeats(x)
rm = stats.mstats.find_repeats(xm)
assert_equal(r,rm)
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ToonTownInfiniteRepo/ToontownInfinite | toontown/coghq/DistributedSwitchAI.py | 6 | 3722 | from direct.distributed.ClockDelta import *
from direct.directnotify import DirectNotifyGlobal
import DistributedSwitchBase
from direct.task import Task
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from otp.level import DistributedEntityAI
class DistributedSwitchAI(DistributedSwitchBase.DistributedSwitchBase, DistributedEntityAI.DistributedEntityAI):
def __init__(self, level, entId, zoneId = None):
DistributedEntityAI.DistributedEntityAI.__init__(self, level, entId)
self.fsm = ClassicFSM.ClassicFSM('DistributedSwitch', [State.State('off', self.enterOff, self.exitOff, ['playing']), State.State('attract', self.enterAttract, self.exitAttract, ['playing']), State.State('playing', self.enterPlaying, self.exitPlaying, ['attract'])], 'off', 'off')
self.fsm.enterInitialState()
self.avatarId = 0
self.doLaterTask = None
if zoneId is not None:
self.generateWithRequired(zoneId)
return
def setup(self):
pass
def takedown(self):
pass
setScale = DistributedSwitchBase.stubFunction
def delete(self):
if self.doLaterTask:
self.doLaterTask.remove()
self.doLaterTask = None
del self.fsm
DistributedEntityAI.DistributedEntityAI.delete(self)
return
def getAvatarInteract(self):
return self.avatarId
def getState(self):
r = [self.fsm.getCurrentState().getName(), globalClockDelta.getRealNetworkTime()]
return r
def sendState(self):
self.sendUpdate('setState', self.getState())
def setIsOn(self, isOn):
if self.isOn != isOn:
self.isOn = isOn
stateName = self.fsm.getCurrentState().getName()
if isOn:
if stateName != 'playing':
self.fsm.request('playing')
elif stateName != 'attract':
self.fsm.request('attract')
messenger.send(self.getOutputEventName(), [isOn])
def getIsOn(self):
return self.isOn
def getName(self):
return 'switch-%s' % (self.entId,)
def switchOffTask(self, task):
self.setIsOn(0)
self.fsm.request('attract')
return Task.done
def requestInteract(self):
avatarId = self.air.getAvatarIdFromSender()
stateName = self.fsm.getCurrentState().getName()
if stateName != 'playing':
self.sendUpdate('setAvatarInteract', [avatarId])
self.avatarId = avatarId
self.fsm.request('playing')
else:
self.sendUpdateToAvatarId(avatarId, 'rejectInteract', [])
def requestExit(self):
avatarId = self.air.getAvatarIdFromSender()
if self.avatarId and avatarId == self.avatarId:
stateName = self.fsm.getCurrentState().getName()
if stateName == 'playing':
self.sendUpdate('avatarExit', [avatarId])
self.avatarId = None
if self.isOn and self.secondsOn != -1.0 and self.secondsOn >= 0.0:
self.doLaterTask = taskMgr.doMethodLater(self.secondsOn, self.switchOffTask, self.uniqueName('switch-timer'))
return
def enterOff(self):
pass
def exitOff(self):
pass
def enterAttract(self):
self.sendState()
def exitAttract(self):
pass
def enterPlaying(self):
self.sendState()
self.setIsOn(1)
def exitPlaying(self):
if self.doLaterTask:
self.doLaterTask.remove()
self.doLaterTask = None
return
if __dev__:
def attribChanged(self, attrib, value):
self.takedown()
self.setup()
| mit |
jcpowermac/ansible | lib/ansible/utils/module_docs_fragments/dellos10.py | 75 | 2591 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
default: 10
notes:
- For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking).
"""
| gpl-3.0 |
eickenberg/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
bigdatauniversity/edx-platform | common/djangoapps/util/cache.py | 62 | 3810 | """
This module aims to give a little more fine-tuned control of caching and cache
invalidation. Import these instead of django.core.cache.
Note that 'default' is being preserved for user session caching, which we're
not migrating so as not to inconvenience users by logging them all out.
"""
import urllib
from functools import wraps
from django.conf import settings
from django.core import cache
# If we can't find a 'general' CACHE defined in settings.py, we simply fall back
# to returning the default cache. This will happen with dev machines.
from django.utils.translation import get_language
try:
cache = cache.caches['general'] # pylint: disable=invalid-name
except Exception:
cache = cache.cache
def cache_if_anonymous(*get_parameters):
"""Cache a page for anonymous users.
Many of the pages in edX are identical when the user is not logged
in, but should not be cached when the user is logged in (because
of the navigation bar at the top with the username).
The django middleware cache does not handle this correctly, because
we access the session to put the csrf token in the header. This adds
the cookie to the vary header, and so every page is cached seperately
for each user (because each user has a different csrf token).
Optionally, provide a series of GET parameters as arguments to cache
pages with these GET parameters separately.
Note that this decorator should only be used on views that do not
contain the csrftoken within the html. The csrf token can be included
in the header by ordering the decorators as such:
@ensure_csrftoken
@cache_if_anonymous()
def myView(request):
"""
def decorator(view_func):
"""The outer wrapper, used to allow the decorator to take optional arguments."""
@wraps(view_func)
def wrapper(request, *args, **kwargs):
"""The inner wrapper, which wraps the view function."""
# Certificate authentication uses anonymous pages,
# specifically the branding index, to do authentication.
# If that page is cached the authentication doesn't
# happen, so we disable the cache when that feature is enabled.
if (
not request.user.is_authenticated() and
not settings.FEATURES['AUTH_USE_CERTIFICATES']
):
# Use the cache. The same view accessed through different domain names may
# return different things, so include the domain name in the key.
domain = str(request.META.get('HTTP_HOST')) + '.'
cache_key = domain + "cache_if_anonymous." + get_language() + '.' + request.path
# Include the values of GET parameters in the cache key.
for get_parameter in get_parameters:
parameter_value = request.GET.get(get_parameter)
if parameter_value is not None:
# urlencode expects data to be of type str, and doesn't deal well with Unicode data
# since it doesn't provide a way to specify an encoding.
cache_key = cache_key + '.' + urllib.urlencode({
get_parameter: unicode(parameter_value).encode('utf-8')
})
response = cache.get(cache_key) # pylint: disable=maybe-no-member
if not response:
response = view_func(request, *args, **kwargs)
cache.set(cache_key, response, 60 * 3) # pylint: disable=maybe-no-member
return response
else:
# Don't use the cache.
return view_func(request, *args, **kwargs)
return wrapper
return decorator
| agpl-3.0 |
buildbot/supybot | src/utils/seq.py | 8 | 2146 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
def window(L, size):
"""list * size -> window iterable
Returns a sliding 'window' through the list L of size size."""
assert not isinstance(L, int), 'Argument order swapped: window(L, size)'
if size < 1:
raise ValueError, 'size <= 0 disallowed.'
for i in xrange(len(L) - (size-1)):
yield L[i:i+size]
def mapinto(f, L):
for (i, x) in enumerate(L):
L[i] = f(x)
def renumerate(L):
for i in xrange(len(L)-1, -1, -1):
yield (i, L[i])
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
policycompass/policycompass-services | apps/referencepool/migrations/0001_initial.py | 3 | 3047 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ExternalResource',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('title', models.CharField(unique=True, max_length=100)),
('url', models.URLField()),
('api_url', models.URLField()),
],
options={
'verbose_name': 'External Resource',
'verbose_name_plural': 'External Resources',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('code', models.CharField(unique=True, max_length=2)),
('title', models.CharField(unique=True, max_length=100)),
],
options={
'verbose_name': 'Language',
'verbose_name_plural': 'Languages',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PolicyDomain',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('title', models.CharField(unique=True, max_length=100)),
('description', models.TextField()),
],
options={
'verbose_name': 'Policy Domain',
'verbose_name_plural': 'Policy Domains',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('title', models.CharField(unique=True, max_length=50)),
('description', models.TextField()),
],
options={
'verbose_name': 'Unit',
'verbose_name_plural': 'Units',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UnitCategory',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('title', models.CharField(unique=True, max_length=100)),
],
options={
'verbose_name': 'Unit Category',
'verbose_name_plural': 'Unit Categories',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='unit',
name='unit_category',
field=models.ForeignKey(to='referencepool.UnitCategory'),
preserve_default=True,
),
]
| agpl-3.0 |
jwhitlock/web-platform-compat | webplatformcompat/serializers.py | 2 | 29254 | # -*- coding: utf-8 -*-
"""API Serializers."""
from collections import OrderedDict
from copy import deepcopy
from django.db.models import CharField
from django.contrib.auth.models import User
from rest_framework.serializers import (
CurrentUserDefault, DateTimeField, IntegerField,
ModelSerializer, SerializerMethodField, ValidationError)
from . import fields
from .drf_fields import (
CurrentHistoryField, HistoricalObjectField, HistoryField,
MPTTRelationField, OptionalCharField, OptionalIntegerField,
PrimaryKeyRelatedField, TranslatedTextField)
from .history import Changeset
from .models import (
Browser, Feature, Maturity, Reference, Section, Specification, Support,
Version)
from .validators import VersionAndStatusValidator
#
# "Regular" Serializers
#
class WriteRestrictedMixin(object):
def get_fields(self):
"""Add read_only flag for write-restricted fields."""
fields = super(WriteRestrictedMixin, self).get_fields()
# Some fields are read-only based on view action
view = self.context.get('view', None)
if view and view.action in ('list', 'create'):
set_to_readonly = 'update_only'
elif view and view.action in ('update', 'partial_update'):
set_to_readonly = 'create_only'
else:
set_to_readonly = None
# Set fields to read-only based on view action
if set_to_readonly:
fields_extra = getattr(self.Meta, 'fields_extra', {})
for field_name, field in fields.items():
field_extra = fields_extra.get(field_name, {})
writable = field_extra.get('writable', True)
if writable == set_to_readonly:
assert not field.read_only, (
('%s was requested to be set read-only for %s,'
' but is already read-only by default.')
% (field_name, view and view.action or 'unknown'))
field.read_only = True
return fields
class FieldMapMixin(object):
"""Automatically handle fields used by this project."""
serializer_field_mapping = ModelSerializer.serializer_field_mapping
serializer_field_mapping[fields.TranslatedField] = TranslatedTextField
serializer_field_mapping[CharField] = OptionalCharField
serializer_related_field = PrimaryKeyRelatedField
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(
FieldMapMixin, self).build_standard_field(
field_name, model_field)
if isinstance(model_field, fields.TranslatedField):
if not (model_field.blank or model_field.null):
field_kwargs['required'] = True
if model_field.allow_canonical:
field_kwargs['allow_canonical'] = True
return field_class, field_kwargs
class FieldsExtraMixin(object):
"""Add fields_extra property to serializer."""
@classmethod
def get_fields_extra(cls):
return getattr(cls.Meta, 'fields_extra', {})
class HistoricalModelSerializer(
WriteRestrictedMixin, FieldMapMixin, FieldsExtraMixin,
ModelSerializer):
"""Model serializer with history manager."""
omit_historical_fields = (
'id', 'history_id', 'history_date', 'history_user', 'history_type',
'history_changeset')
def build_property_field(self, field_name, model_class):
"""Handle history field.
The history field is a list of PKs for all the history records.
"""
assert field_name == 'history', (
'Expected field name to be "history", got "%s"'
% field_name)
field_kwargs = {'many': True, 'read_only': True}
return HistoryField, field_kwargs
def build_unknown_field(self, field_name, model_class):
"""Handle history_current field.
history_current returns the PK of the most recent history record.
It is treated as read-only unless it is an update view.
"""
assert field_name == 'history_current', (
'Expected field name to be "history_current", got "%s"'
% field_name)
return CurrentHistoryField, {}
def to_internal_value(self, data):
"""If history_current in data, load historical data into instance."""
if data and 'history_current' in data:
if data['history_current'] is not None:
history_id = int(data['history_current'])
current_history = self.instance.history.all()[0]
if current_history.history_id != history_id:
try:
historical = self.instance.history.get(
history_id=history_id)
except self.instance.history.model.DoesNotExist:
err = 'Invalid history ID for this object'
raise ValidationError({'history_current': [err]})
else:
for field in historical._meta.fields:
if field.attname in self.omit_historical_fields:
continue
attname = field.attname
hist_value = getattr(historical, attname)
data_name = attname
if data_name.endswith('_id'):
data_name = data_name[:-len('_id')]
data[data_name] = hist_value
else:
err = 'Invalid history ID for this object'
raise ValidationError({'history_current': [err]})
return super(HistoricalModelSerializer, self).to_internal_value(data)
class BrowserSerializer(HistoricalModelSerializer):
"""Browser Serializer."""
def update(self, instance, validated_data):
versions = validated_data.pop('versions', None)
instance = super(BrowserSerializer, self).update(
instance, validated_data)
if versions:
v_pks = [v.pk for v in versions]
current_order = instance.get_version_order()
if v_pks != current_order:
instance.set_version_order(v_pks)
return instance
class Meta:
model = Browser
fields = (
'id', 'slug', 'name', 'note', 'versions', 'history_current',
'history')
fields_extra = {
'id': {
'link': 'self',
'resource': 'browsers',
},
'slug': {
'writable': 'create_only',
},
'versions': {
'link': 'from_many',
'writable': 'update_only',
},
'history_current': {
'archive': 'history_id',
'link': 'from_one',
'resource': 'historical_browsers',
'writable': 'update_only',
},
'history': {
'archive': 'omit',
'link': 'from_many',
'resource': 'historical_browsers',
},
}
class FeatureSerializer(HistoricalModelSerializer):
"""Feature Serializer."""
children = MPTTRelationField(
many=True, queryset=Feature.objects.all(), required=False)
def update(self, instance, validated_data):
"""Handle updating of sorted related items."""
references = validated_data.pop('references', None)
children = validated_data.pop('children', None)
if children:
current_order = list(instance.get_children())
if current_order == children:
children = None
instance = super(FeatureSerializer, self).update(
instance, validated_data)
if children:
instance.set_children_order(children)
if references:
current_ref_order = instance.get_reference_order()
new_ref_order = [ref.pk for ref in references]
if current_ref_order != new_ref_order:
instance.set_reference_order(new_ref_order)
return instance
def validate_children(self, value):
if self.instance:
current_children = list(self.instance.get_children())
current_set = set([child.pk for child in current_children])
new_set = set([child.pk for child in value])
if current_set - new_set:
raise ValidationError(
'All child features must be included in children.')
if new_set - current_set:
raise ValidationError(
'Set child.parent to add a child feature.')
else:
if value != []: # pragma: no cover
# Because children is in update_only_fields, never happens
raise ValidationError(
'Can not set children when creating a feature.')
return value
class Meta:
model = Feature
fields = (
'id', 'slug', 'mdn_uri', 'experimental', 'standardized',
'stable', 'obsolete', 'name', 'parent', 'children',
'references', 'supports', 'history_current', 'history')
read_only_fields = ('supports',)
extra_kwargs = {
'references': {
'default': []
}
}
fields_extra = {
'id': {
'link': 'self',
'resource': 'features',
},
'parent': {
'link': 'to_one',
'resource': 'features',
},
'children': {
'link': 'from_many',
'resource': 'features',
'writable': 'update_only'
},
'references': {
'link': 'from_many',
},
'supports': {
'archive': 'omit',
'link': 'from_many',
},
'history_current': {
'archive': 'history_id',
'link': 'from_one',
'resource': 'historical_features',
'writable': 'update_only',
},
'history': {
'archive': 'omit',
'link': 'from_many',
'resource': 'historical_features',
},
}
class MaturitySerializer(HistoricalModelSerializer):
"""Specification Maturity Serializer."""
class Meta:
model = Maturity
fields = (
'id', 'slug', 'name', 'specifications',
'history_current', 'history')
read_only_fields = ('specifications',)
fields_extra = {
'id': {
'link': 'self',
'resource': 'maturities',
'singular': 'maturity',
},
'specifications': {
'archive': 'omit',
'link': 'from_many',
},
'history_current': {
'archive': 'history_id',
'link': 'from_one',
'resource': 'historical_maturities',
'writable': 'update_only',
},
'history': {
'archive': 'omit',
'link': 'from_many',
'resource': 'historical_maturities',
},
}
class ReferenceSerializer(HistoricalModelSerializer):
"""Reference (Feature to Section) Serializer."""
class Meta:
model = Reference
fields = (
'id', 'note', 'feature', 'section', 'history_current', 'history')
fields_extra = {
'id': {
'link': 'self',
'resource': 'references',
},
'feature': {
'link': 'to_one',
'resource': 'features',
},
'section': {
'link': 'to_one',
'resource': 'sections',
},
'history_current': {
'archive': 'history_id',
'link': 'from_one',
'resource': 'historical_references',
'writable': 'update_only',
},
'history': {
'archive': 'omit',
'link': 'from_many',
'resource': 'historical_references',
},
}
class SectionSerializer(HistoricalModelSerializer):
"""Specification Section Serializer."""
class Meta:
model = Section
fields = (
'id', 'number', 'name', 'subpath', 'references', 'specification',
'history_current', 'history')
read_only_fields = ('references',)
extra_kwargs = {
'features': {
'default': []
}
}
fields_extra = {
'id': {
'link': 'self',
'resource': 'sections',
},
'references': {
'archive': 'omit',
'link': 'from_many',
},
'specification': {
'link': 'to_one',
'resource': 'specifications',
},
'history_current': {
'archive': 'history_id',
'link': 'from_one',
'resource': 'historical_sections',
'writable': 'update_only',
},
'history': {
'archive': 'omit',
'link': 'from_many',
'resource': 'historical_sections',
},
}
class SpecificationSerializer(HistoricalModelSerializer):
"""Specification Serializer."""
def update(self, instance, validated_data):
sections = validated_data.pop('sections', None)
instance = super(SpecificationSerializer, self).update(
instance, validated_data)
if sections:
s_pks = [s.pk for s in sections]
current_order = instance.get_section_order()
if s_pks != current_order:
instance.set_section_order(s_pks)
return instance
class Meta:
model = Specification
fields = (
'id', 'slug', 'mdn_key', 'name', 'uri', 'maturity', 'sections',
'history_current', 'history')
extra_kwargs = {
'sections': {
'default': []
}
}
fields_extra = {
'id': {
'link': 'self',
'resource': 'specifications',
},
'maturity': {
'link': 'to_one',
'resource': 'maturities',
},
'sections': {
'link': 'from_many',
},
'history_current': {
'archive': 'history_id',
'link': 'from_one',
'resource': 'historical_specifications',
'writable': 'update_only',
},
'history': {
'archive': 'omit',
'link': 'from_many',
'resource': 'historical_specifications',
},
}
class SupportSerializer(HistoricalModelSerializer):
"""Support Serializer."""
class Meta:
model = Support
fields = (
'id', 'support', 'prefix',
'prefix_mandatory', 'alternate_name', 'alternate_mandatory',
'requires_config', 'default_config', 'protected', 'note',
'version', 'feature', 'history_current', 'history')
fields_extra = {
'id': {
'link': 'self',
'resource': 'supports',
},
'version': {
'link': 'to_one',
'resource': 'versions',
},
'feature': {
'link': 'to_one',
'resource': 'features',
},
'history_current': {
'archive': 'history_id',
'link': 'from_one',
'resource': 'historical_supports',
'writable': 'update_only',
},
'history': {
'archive': 'omit',
'link': 'from_many',
'resource': 'historical_supports',
},
}
class VersionSerializer(HistoricalModelSerializer):
"""Browser Version Serializer."""
order = IntegerField(read_only=True, source='_order')
class Meta:
model = Version
fields = (
'id', 'version', 'release_day', 'retirement_day',
'status', 'release_notes_uri', 'note', 'order', 'browser',
'supports', 'history_current', 'history')
extra_kwargs = {
'version': {
'allow_blank': False
}
}
read_only_fields = ('supports',)
validators = [VersionAndStatusValidator()]
fields_extra = {
'id': {
'link': 'self',
'resource': 'versions',
},
'version': {
'writable': 'create_only',
},
'browser': {
'link': 'to_one',
'resource': 'browsers',
},
'supports': {
'archive': 'omit',
'link': 'from_many',
},
'history_current': {
'archive': 'history_id',
'link': 'from_one',
'resource': 'historical_versions',
'writable': 'update_only',
},
'history': {
'archive': 'omit',
'link': 'from_many',
'resource': 'historical_versions',
},
}
#
# Change control object serializers
#
class ChangesetSerializer(FieldsExtraMixin, ModelSerializer):
"""Changeset Serializer."""
target_resource_type = OptionalCharField(required=False)
target_resource_id = OptionalIntegerField(required=False)
class Meta:
# TODO bug 1216786: Add historical_references
model = Changeset
fields = (
'id', 'created', 'modified', 'closed', 'target_resource_type',
'target_resource_id', 'user', 'historical_browsers',
'historical_features', 'historical_maturities',
'historical_references', 'historical_sections',
'historical_specifications', 'historical_supports',
'historical_versions')
read_only_fields = (
'id', 'created', 'modified', 'historical_browsers',
'historical_features', 'historical_maturities',
'historical_references', 'historical_sections',
'historical_specifications', 'historical_supports',
'historical_versions')
extra_kwargs = {
'user': {
'default': CurrentUserDefault()
}
}
fields_extra = {
'id': {
'link': 'self',
'resource': 'changesets',
},
'user': {
'link': 'to_one',
'resource': 'users',
'writable': 'update_only',
},
'target_resource_type': {
'writable': 'update_only',
},
'target_resource_id': {
'writable': 'update_only',
},
'historical_browsers': {
'link': 'from_many',
},
'historical_features': {
'link': 'from_many',
},
'historical_maturities': {
'link': 'from_many',
},
'historical_references': {
'link': 'from_many',
},
'historical_specifications': {
'link': 'from_many',
},
'historical_sections': {
'link': 'from_many',
},
'historical_supports': {
'link': 'from_many',
},
'historical_versions': {
'link': 'from_many',
},
}
class UserSerializer(FieldsExtraMixin, ModelSerializer):
"""User Serializer."""
created = DateTimeField(source='date_joined', read_only=True)
agreement = SerializerMethodField()
permissions = SerializerMethodField()
def get_agreement(self, obj):
"""Return the version of the contribution terms the user agreed to.
Placeholder for when we have a license agreement.
"""
return 0
def get_permissions(self, obj):
"""Return names of django.contrib.auth Groups."""
try:
# Cached objects (or those that have run through
# cache.user_v1_serializer) have this property
return obj.group_names
except AttributeError:
return sorted(obj.groups.values_list('name', flat=True))
class Meta:
model = User
fields = (
'id', 'username', 'created', 'agreement', 'permissions',
'changesets')
read_only_fields = ('username', 'changesets')
fields_extra = {
'id': {
'link': 'self',
'resource': 'users',
},
'changesets': {
'link': 'from_many',
},
}
#
# Historical object serializers
#
class ArchiveMixin(object):
def get_fields(self):
"""Modify fields when loading or preparing an archive."""
fields = super(ArchiveMixin, self).get_fields()
fields_extra = getattr(self.Meta, 'fields_extra', {})
to_delete = []
append_id = []
for field_name, field in fields.items():
field_extra = fields_extra.get(field_name, {})
archive = field_extra.get('archive')
link = field_extra.get('link')
if archive == 'omit':
# Does not appear in archived representation
to_delete.append(field_name)
elif link in ('from_one', 'from_many'):
# Defer loading until HistoricalObjectSerializer.get_archive
to_delete.append(field_name)
elif link == 'to_one':
# Use the name_id field
append_id.append(field_name)
for field_name in to_delete:
del fields[field_name]
for field_name in append_id:
fields[field_name].source = field_name + '_id'
return fields
class HistoricalObjectSerializer(ModelSerializer):
"""Common serializer attributes for Historical models."""
id = IntegerField(source='history_id')
date = DateTimeField(source='history_date')
event = SerializerMethodField()
changeset = PrimaryKeyRelatedField(
source='history_changeset', read_only=True)
object_id = HistoricalObjectField()
archived_representation = SerializerMethodField()
EVENT_CHOICES = {
'+': 'created',
'~': 'changed',
'-': 'deleted',
}
def get_event(self, obj):
return self.EVENT_CHOICES[obj.history_type]
@classmethod
def get_fields_extra(cls):
extra = deepcopy(cls.Meta.fields_extra)
archive_extra = cls.Meta.archive_extra
extra['id']['resource'] = archive_extra['history_resource']
history_resource_singular = archive_extra.get(
'history_resource_singular')
if history_resource_singular:
extra['id']['singular'] = history_resource_singular
object_resource = archive_extra['object_resource']
extra['object_id']['resource'] = object_resource
singular = archive_extra.get('singular', object_resource[:-1])
extra['object_id']['name'] = singular
extra['archived_representation']['resource'] = object_resource
extra['archived_representation']['is_archive_of'] = cls.ArchivedObject
extra['archived_representation']['name'] = object_resource
return extra
def get_archived_representation(self, obj):
serializer = self.ArchivedObject(obj)
raw_data = serializer.data
data = OrderedDict()
links = OrderedDict()
fields = serializer.Meta.fields
fields_extra = getattr(serializer.Meta, 'fields_extra', {})
for name in fields:
field_extra = fields_extra.get(name, {})
archive = field_extra.get('archive', 'include')
if archive == 'include':
link = field_extra.get('link')
if link is None:
# Archived attribute
data[name] = raw_data[name]
elif link == 'self':
# Archived self-id
data['id'] = str(raw_data[name])
elif link == 'to_one':
value = getattr(obj, name + '_id')
if value is not None:
value = str(value)
links[name] = value
else:
assert link == 'from_many', 'Unhandled link "%s"' % link
related = getattr(obj, name)
links[name] = [str(rel.pk) for rel in related]
elif archive == 'history_id':
links[name] = str(obj.history_id)
else:
assert archive == 'omit', (
'Unknown value "%s" for fields_extra["%s"]["archive"]'
% (archive, name))
data['links'] = links
return data
class Meta:
fields = (
'id', 'date', 'event', 'changeset', 'object_id',
'archived_representation')
fields_extra = {
'id': {
'link': 'self',
'archived_resource': True,
},
'changeset': {
'link': 'to_one',
'resource': 'changesets',
},
'object_id': {
'link': 'to_one',
'archived_resource': True,
},
'archived_representation': {
'archived_resource': True,
},
}
class HistoricalBrowserSerializer(HistoricalObjectSerializer):
class ArchivedObject(ArchiveMixin, BrowserSerializer):
pass
class Meta(HistoricalObjectSerializer.Meta):
model = Browser.history.model
archive_extra = {
'history_resource': 'historical_browsers',
'object_resource': 'browsers',
}
class HistoricalFeatureSerializer(HistoricalObjectSerializer):
class ArchivedObject(ArchiveMixin, FeatureSerializer):
pass
class Meta(HistoricalObjectSerializer.Meta):
model = Feature.history.model
archive_extra = {
'history_resource': 'historical_features',
'object_resource': 'features',
}
class HistoricalMaturitySerializer(HistoricalObjectSerializer):
class ArchivedObject(ArchiveMixin, MaturitySerializer):
pass
class Meta(HistoricalObjectSerializer.Meta):
model = Maturity.history.model
archive_extra = {
'history_resource': 'historical_maturities',
'history_resource_singular': 'historical_maturity',
'object_resource': 'maturities',
'singular': 'maturity',
}
class HistoricalReferenceSerializer(HistoricalObjectSerializer):
class ArchivedObject(ArchiveMixin, ReferenceSerializer):
pass
class Meta(HistoricalObjectSerializer.Meta):
model = Reference.history.model
archive_extra = {
'history_resource': 'historical_references',
'history_resource_singular': 'historical_reference',
'object_resource': 'references',
'singular': 'reference',
}
class HistoricalSectionSerializer(HistoricalObjectSerializer):
class ArchivedObject(ArchiveMixin, SectionSerializer):
pass
class Meta(HistoricalObjectSerializer.Meta):
model = Section.history.model
archive_extra = {
'history_resource': 'historical_sections',
'object_resource': 'sections',
}
class HistoricalSpecificationSerializer(HistoricalObjectSerializer):
class ArchivedObject(ArchiveMixin, SpecificationSerializer):
pass
class Meta(HistoricalObjectSerializer.Meta):
model = Specification.history.model
archive_extra = {
'history_resource': 'historical_specifications',
'object_resource': 'specifications',
}
class HistoricalSupportSerializer(HistoricalObjectSerializer):
class ArchivedObject(ArchiveMixin, SupportSerializer):
pass
class Meta(HistoricalObjectSerializer.Meta):
model = Support.history.model
archive_extra = {
'history_resource': 'historical_supports',
'object_resource': 'supports',
}
class HistoricalVersionSerializer(HistoricalObjectSerializer):
class ArchivedObject(ArchiveMixin, VersionSerializer):
pass
class Meta(HistoricalObjectSerializer.Meta):
model = Version.history.model
archive_extra = {
'history_resource': 'historical_versions',
'object_resource': 'versions',
}
| mpl-2.0 |
UManPychron/pychron | pychron/processing/analyses/file_analysis.py | 2 | 2309 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Str, Property, cached_property, Float
# ============= standard library imports ========================
# ============= local library imports ==========================
from uncertainties import ufloat
from pychron.processing.analyses.analysis import Analysis
class NonDBAnalysis(Analysis):
# record_id = Str
uage = Property(depends_on='age, age_err')
uuid = Str
sample = Str
k39_err = Float
rad40_err = Float
kca_err = Float
radiogenic_yield_err = Float
@classmethod
def from_csv_record(cls, ri):
obj = cls()
for a in ('age', 'age_err', 'group', 'aliquot', 'sample', 'label_name',
'k39', 'k39_err', 'rad40', 'rad40_err',
'kca', 'kca_err', 'radiogenic_yield', 'radiogenic_yield_err'):
try:
setattr(obj, a, getattr(ri, a))
except AttributeError:
pass
return obj
def get_computed_value(self, attr):
if attr in ('k39', 'rad40', 'kca', 'radiogenic_yield'):
return self._as_ufloat(attr)
else:
return ufloat(0, 0)
def _as_ufloat(self, attr):
return ufloat(getattr(self, attr), getattr(self, '{}_err'.format(attr)))
@cached_property
def _get_uage(self):
return self._as_ufloat('age')
class FileAnalysis(NonDBAnalysis):
pass
class InterpretedAgeAnalysis(NonDBAnalysis):
pass
# ============= EOF =============================================
| apache-2.0 |
zanderle/django | tests/from_db_value/tests.py | 399 | 1075 | from django.db import connection
from django.db.models import Max
from django.test import TestCase
from .models import Cash, CashModel
class FromDBValueTest(TestCase):
def setUp(self):
CashModel.objects.create(cash='12.50')
def test_simple_load(self):
instance = CashModel.objects.get()
self.assertIsInstance(instance.cash, Cash)
def test_values_list(self):
values_list = CashModel.objects.values_list('cash', flat=True)
self.assertIsInstance(values_list[0], Cash)
def test_values(self):
values = CashModel.objects.values('cash')
self.assertIsInstance(values[0]['cash'], Cash)
def test_aggregation(self):
maximum = CashModel.objects.aggregate(m=Max('cash'))['m']
self.assertIsInstance(maximum, Cash)
def test_defer(self):
instance = CashModel.objects.defer('cash').get()
self.assertIsInstance(instance.cash, Cash)
def test_connection(self):
instance = CashModel.objects.get()
self.assertEqual(instance.cash.vendor, connection.vendor)
| bsd-3-clause |
hefen1/chromium | build/android/buildbot/bb_host_steps.py | 45 | 4630 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import bb_utils
import bb_annotations
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pylib import constants
SLAVE_SCRIPTS_DIR = os.path.join(bb_utils.BB_BUILD_DIR, 'scripts', 'slave')
VALID_HOST_TESTS = set(['check_webview_licenses', 'findbugs'])
DIR_BUILD_ROOT = os.path.dirname(constants.DIR_SOURCE_ROOT)
# Short hand for RunCmd which is used extensively in this file.
RunCmd = bb_utils.RunCmd
def SrcPath(*path):
return os.path.join(constants.DIR_SOURCE_ROOT, *path)
def CheckWebViewLicenses(_):
bb_annotations.PrintNamedStep('check_licenses')
RunCmd([SrcPath('android_webview', 'tools', 'webview_licenses.py'), 'scan'],
warning_code=1)
def RunHooks(build_type):
RunCmd([SrcPath('build', 'landmines.py')])
build_path = SrcPath('out', build_type)
landmine_path = os.path.join(build_path, '.landmines_triggered')
clobber_env = os.environ.get('BUILDBOT_CLOBBER')
if clobber_env or os.path.isfile(landmine_path):
bb_annotations.PrintNamedStep('Clobber')
if not clobber_env:
print 'Clobbering due to triggered landmines:'
with open(landmine_path) as f:
print f.read()
RunCmd(['rm', '-rf', build_path])
bb_annotations.PrintNamedStep('runhooks')
RunCmd(['gclient', 'runhooks'], halt_on_failure=True)
def Compile(options):
RunHooks(options.target)
cmd = [os.path.join(SLAVE_SCRIPTS_DIR, 'compile.py'),
'--build-tool=ninja',
'--compiler=goma',
'--target=%s' % options.target,
'--goma-dir=%s' % bb_utils.GOMA_DIR]
bb_annotations.PrintNamedStep('compile')
if options.build_targets:
build_targets = options.build_targets.split(',')
cmd += ['--build-args', ' '.join(build_targets)]
RunCmd(cmd, halt_on_failure=True, cwd=DIR_BUILD_ROOT)
def ZipBuild(options):
bb_annotations.PrintNamedStep('zip_build')
RunCmd([
os.path.join(SLAVE_SCRIPTS_DIR, 'zip_build.py'),
'--src-dir', constants.DIR_SOURCE_ROOT,
'--exclude-files', 'lib.target,gen,android_webview,jingle_unittests']
+ bb_utils.EncodeProperties(options), cwd=DIR_BUILD_ROOT)
def ExtractBuild(options):
bb_annotations.PrintNamedStep('extract_build')
RunCmd([os.path.join(SLAVE_SCRIPTS_DIR, 'extract_build.py')]
+ bb_utils.EncodeProperties(options), cwd=DIR_BUILD_ROOT)
def FindBugs(options):
bb_annotations.PrintNamedStep('findbugs')
build_type = []
if options.target == 'Release':
build_type = ['--release-build']
RunCmd([SrcPath('build', 'android', 'findbugs_diff.py')] + build_type)
RunCmd([SrcPath(
'tools', 'android', 'findbugs_plugin', 'test',
'run_findbugs_plugin_tests.py')] + build_type)
def BisectPerfRegression(options):
args = []
if options.extra_src:
args = ['--extra_src', options.extra_src]
RunCmd([SrcPath('tools', 'prepare-bisect-perf-regression.py'),
'-w', os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)])
RunCmd([SrcPath('tools', 'run-bisect-perf-regression.py'),
'-w', os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)] + args)
def GetHostStepCmds():
return [
('compile', Compile),
('extract_build', ExtractBuild),
('check_webview_licenses', CheckWebViewLicenses),
('bisect_perf_regression', BisectPerfRegression),
('findbugs', FindBugs),
('zip_build', ZipBuild)
]
def GetHostStepsOptParser():
parser = bb_utils.GetParser()
parser.add_option('--steps', help='Comma separated list of host tests.')
parser.add_option('--build-targets', default='',
help='Comma separated list of build targets.')
parser.add_option('--experimental', action='store_true',
help='Indicate whether to compile experimental targets.')
parser.add_option('--extra_src', default='',
help='Path to extra source file. If this is supplied, '
'bisect script will use it to override default behavior.')
return parser
def main(argv):
parser = GetHostStepsOptParser()
options, args = parser.parse_args(argv[1:])
if args:
return sys.exit('Unused args %s' % args)
setattr(options, 'target', options.factory_properties.get('target', 'Debug'))
setattr(options, 'extra_src',
options.factory_properties.get('extra_src', ''))
if options.steps:
bb_utils.RunSteps(options.steps.split(','), GetHostStepCmds(), options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
philgyford/django-spectator | spectator/core/managers.py | 1 | 4128 | from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models import Count
from .apps import spectator_apps
class CreatorManager(models.Manager):
def by_publications(self):
"""
The Creators who have been most-read, ordered by number of read
publications (ignoring if any of those publicatinos have been read
multiple times.)
Each Creator will have a `num_publications` attribute.
"""
if not spectator_apps.is_enabled("reading"):
raise ImproperlyConfigured(
"To use the CreatorManager.by_publications() method, "
"'spectator.reading' must by in INSTALLED_APPS."
)
qs = self.get_queryset()
qs = (
qs
.exclude(publications__reading__isnull=True)
.exclude(publications__reading__is_finished=False)
.annotate(num_publications=Count("publications"))
.order_by("-num_publications", "name_sort")
)
return qs
def by_readings(self, role_names=["", "Author"]):
"""
The Creators who have been most-read, ordered by number of readings.
By default it will only include Creators whose role was left empty,
or is 'Author'.
Each Creator will have a `num_readings` attribute.
"""
if not spectator_apps.is_enabled("reading"):
raise ImproperlyConfigured(
"To use the CreatorManager.by_readings() method, 'spectator.reading' "
"must by in INSTALLED_APPS."
)
qs = self.get_queryset()
qs = (
qs.filter(publication_roles__role_name__in=role_names)
.exclude(publications__reading__isnull=True)
.exclude(publications__reading__is_finished=False)
.annotate(num_readings=Count("publications__reading"))
.order_by("-num_readings", "name_sort")
)
return qs
def by_events(self, kind=None):
"""
Get the Creators involved in the most Events.
This only counts Creators directly involved in an Event.
i.e. if a Creator is the director of a movie Work, and an Event was
a viewing of that movie, that Event wouldn't count. Unless they were
also directly involved in the Event (e.g. speaking after the movie).
kind - If supplied, only Events with that `kind` value will be counted.
"""
if not spectator_apps.is_enabled("events"):
raise ImproperlyConfigured(
"To use the CreatorManager.by_events() method, 'spectator.events' "
"must by in INSTALLED_APPS."
)
qs = self.get_queryset()
if kind is not None:
qs = qs.filter(events__kind=kind)
qs = qs.annotate(num_events=Count("events", distinct=True)).order_by(
"-num_events", "name_sort"
)
return qs
def by_works(self, kind=None, role_name=None):
"""
Get the Creators involved in the most Works.
kind - If supplied, only Works with that `kind` value will be counted.
role_name - If supplied, only Works on which the role is that will be counted.
e.g. To get all 'movie' Works on which the Creators had the role 'Director':
Creator.objects.by_works(kind='movie', role_name='Director')
"""
if not spectator_apps.is_enabled("events"):
raise ImproperlyConfigured(
"To use the CreatorManager.by_works() method, 'spectator.events' "
"must by in INSTALLED_APPS."
)
qs = self.get_queryset()
filter_kwargs = {}
if kind is not None:
filter_kwargs["works__kind"] = kind
if role_name is not None:
filter_kwargs["work_roles__role_name"] = role_name
if filter_kwargs:
qs = qs.filter(**filter_kwargs)
qs = qs.annotate(num_works=Count("works", distinct=True)).order_by(
"-num_works", "name_sort"
)
return qs
| mit |
CloudServer/cinder | cinder/manager.py | 2 | 5788 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Manager class.
Managers are responsible for a certain aspect of the system. It is a logical
grouping of code relating to a portion of the system. In general other
components should be using the manager to make changes to the components that
it is responsible for.
For example, other components that need to deal with volumes in some way,
should do so by calling methods on the VolumeManager instead of directly
changing fields in the database. This allows us to keep all of the code
relating to volumes in the same place.
We have adopted a basic strategy of Smart managers and dumb data, which means
rather than attaching methods to data objects, components should call manager
methods that act on the data.
Methods on managers that can be executed locally should be called directly. If
a particular method must execute on a remote host, this should be done via rpc
to the service that wraps the manager
Managers should be responsible for most of the db access, and
non-implementation specific data. Anything implementation specific that can't
be generalized should be done by the Driver.
In general, we prefer to have one manager with multiple drivers for different
implementations, but sometimes it makes sense to have multiple managers. You
can think of it this way: Abstract different overall strategies at the manager
level(FlatNetwork vs VlanNetwork), and different implementations at the driver
level(LinuxNetDriver vs CiscoNetDriver).
Managers will often provide methods for initial setup of a host or periodic
tasks to a wrapping service.
This module provides Manager, a base class for managers.
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import periodic_task
from cinder.db import base
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import version
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class PeriodicTasks(periodic_task.PeriodicTasks):
def __init__(self):
super(PeriodicTasks, self).__init__(CONF)
class Manager(base.Base, PeriodicTasks):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, host=None, db_driver=None):
if not host:
host = CONF.host
self.host = host
self.additional_endpoints = []
super(Manager, self).__init__(db_driver)
def periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def init_host(self):
"""Handle initialization if this is a standalone service.
A hook point for services to execute tasks before the services are made
available (i.e. showing up on RPC and starting to accept RPC calls) to
other components. Child classes should override this method.
"""
pass
def init_host_with_rpc(self):
"""A hook for service to do jobs after RPC is ready.
Like init_host(), this method is a hook where services get a chance
to execute tasks that *need* RPC. Child classes should override
this method.
"""
pass
def service_version(self, context):
return version.version_string()
def service_config(self, context):
config = {}
for key in CONF:
config[key] = CONF.get(key, None)
return config
def is_working(self):
"""Method indicating if service is working correctly.
This method is supposed to be overriden by subclasses and return if
manager is working correctly.
"""
return True
class SchedulerDependentManager(Manager):
"""Periodically send capability updates to the Scheduler services.
Services that need to update the Scheduler of their capabilities
should derive from this class. Otherwise they can derive from
manager.Manager directly. Updates are only sent after
update_service_capabilities is called with non-None values.
"""
def __init__(self, host=None, db_driver=None, service_name='undefined'):
self.last_capabilities = None
self.service_name = service_name
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
super(SchedulerDependentManager, self).__init__(host, db_driver)
def update_service_capabilities(self, capabilities):
"""Remember these capabilities to send on next periodic update."""
self.last_capabilities = capabilities
@periodic_task.periodic_task
def _publish_service_capabilities(self, context):
"""Pass data back to the scheduler at a periodic interval."""
if self.last_capabilities:
LOG.debug('Notifying Schedulers of capabilities ...')
self.scheduler_rpcapi.update_service_capabilities(
context,
self.service_name,
self.host,
self.last_capabilities)
| apache-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.5/django/contrib/gis/db/models/proxy.py | 220 | 2595 | """
The GeometryProxy object, allows for lazy-geometries. The proxy uses
Python descriptors for instantiating and setting Geometry objects
corresponding to geographic model fields.
Thanks to Robert Coup for providing this functionality (see #4322).
"""
from django.contrib.gis import memoryview
from django.utils import six
class GeometryProxy(object):
def __init__(self, klass, field):
"""
Proxy initializes on the given Geometry class (not an instance) and
the GeometryField.
"""
self._field = field
self._klass = klass
def __get__(self, obj, type=None):
"""
This accessor retrieves the geometry, initializing it using the geometry
class specified during initialization and the HEXEWKB value of the field.
Currently, only GEOS or OGR geometries are supported.
"""
if obj is None:
# Accessed on a class, not an instance
return self
# Getting the value of the field.
geom_value = obj.__dict__[self._field.attname]
if isinstance(geom_value, self._klass):
geom = geom_value
elif (geom_value is None) or (geom_value==''):
geom = None
else:
# Otherwise, a Geometry object is built using the field's contents,
# and the model's corresponding attribute is set.
geom = self._klass(geom_value)
setattr(obj, self._field.attname, geom)
return geom
def __set__(self, obj, value):
"""
This accessor sets the proxied geometry with the geometry class
specified during initialization. Values of None, HEXEWKB, or WKT may
be used to set the geometry as well.
"""
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if isinstance(value, self._klass) and (str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY'):
# Assigning the SRID to the geometry.
if value.srid is None: value.srid = self._field.srid
elif value is None or isinstance(value, six.string_types + (memoryview,)):
# Set with None, WKT, HEX, or WKB
pass
else:
raise TypeError('cannot set %s GeometryProxy with value of type: %s' % (obj.__class__.__name__, type(value)))
# Setting the objects dictionary with the value, and returning.
obj.__dict__[self._field.attname] = value
return value
| apache-2.0 |
pymedusa/Medusa | ext/boto/cloudhsm/layer1.py | 135 | 16187 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cloudhsm import exceptions
class CloudHSMConnection(AWSQueryConnection):
"""
AWS CloudHSM Service
"""
APIVersion = "2014-05-30"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudhsm.us-east-1.amazonaws.com"
ServiceName = "CloudHSM"
TargetPrefix = "CloudHsmFrontendService"
ResponseError = JSONResponseError
_faults = {
"InvalidRequestException": exceptions.InvalidRequestException,
"CloudHsmServiceException": exceptions.CloudHsmServiceException,
"CloudHsmInternalException": exceptions.CloudHsmInternalException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudHSMConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_hapg(self, label):
"""
Creates a high-availability partition group. A high-
availability partition group is a group of partitions that
spans multiple physical HSMs.
:type label: string
:param label: The label of the new high-availability partition group.
"""
params = {'Label': label, }
return self.make_request(action='CreateHapg',
body=json.dumps(params))
def create_hsm(self, subnet_id, ssh_key, iam_role_arn, subscription_type,
eni_ip=None, external_id=None, client_token=None,
syslog_ip=None):
"""
Creates an uninitialized HSM instance. Running this command
provisions an HSM appliance and will result in charges to your
AWS account for the HSM.
:type subnet_id: string
:param subnet_id: The identifier of the subnet in your VPC in which to
place the HSM.
:type ssh_key: string
:param ssh_key: The SSH public key to install on the HSM.
:type eni_ip: string
:param eni_ip: The IP address to assign to the HSM's ENI.
:type iam_role_arn: string
:param iam_role_arn: The ARN of an IAM role to enable the AWS CloudHSM
service to allocate an ENI on your behalf.
:type external_id: string
:param external_id: The external ID from **IamRoleArn**, if present.
:type subscription_type: string
:param subscription_type: The subscription type.
:type client_token: string
:param client_token: A user-defined token to ensure idempotence.
Subsequent calls to this action with the same token will be
ignored.
:type syslog_ip: string
:param syslog_ip: The IP address for the syslog monitoring server.
"""
params = {
'SubnetId': subnet_id,
'SshKey': ssh_key,
'IamRoleArn': iam_role_arn,
'SubscriptionType': subscription_type,
}
if eni_ip is not None:
params['EniIp'] = eni_ip
if external_id is not None:
params['ExternalId'] = external_id
if client_token is not None:
params['ClientToken'] = client_token
if syslog_ip is not None:
params['SyslogIp'] = syslog_ip
return self.make_request(action='CreateHsm',
body=json.dumps(params))
def create_luna_client(self, certificate, label=None):
"""
Creates an HSM client.
:type label: string
:param label: The label for the client.
:type certificate: string
:param certificate: The contents of a Base64-Encoded X.509 v3
certificate to be installed on the HSMs used by this client.
"""
params = {'Certificate': certificate, }
if label is not None:
params['Label'] = label
return self.make_request(action='CreateLunaClient',
body=json.dumps(params))
def delete_hapg(self, hapg_arn):
"""
Deletes a high-availability partition group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
delete.
"""
params = {'HapgArn': hapg_arn, }
return self.make_request(action='DeleteHapg',
body=json.dumps(params))
def delete_hsm(self, hsm_arn):
"""
Deletes an HSM. Once complete, this operation cannot be undone
and your key material cannot be recovered.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM to delete.
"""
params = {'HsmArn': hsm_arn, }
return self.make_request(action='DeleteHsm',
body=json.dumps(params))
def delete_luna_client(self, client_arn):
"""
Deletes a client.
:type client_arn: string
:param client_arn: The ARN of the client to delete.
"""
params = {'ClientArn': client_arn, }
return self.make_request(action='DeleteLunaClient',
body=json.dumps(params))
def describe_hapg(self, hapg_arn):
"""
Retrieves information about a high-availability partition
group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
describe.
"""
params = {'HapgArn': hapg_arn, }
return self.make_request(action='DescribeHapg',
body=json.dumps(params))
def describe_hsm(self, hsm_arn=None, hsm_serial_number=None):
"""
Retrieves information about an HSM. You can identify the HSM
by its ARN or its serial number.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM. Either the HsmArn or the
SerialNumber parameter must be specified.
:type hsm_serial_number: string
:param hsm_serial_number: The serial number of the HSM. Either the
HsmArn or the HsmSerialNumber parameter must be specified.
"""
params = {}
if hsm_arn is not None:
params['HsmArn'] = hsm_arn
if hsm_serial_number is not None:
params['HsmSerialNumber'] = hsm_serial_number
return self.make_request(action='DescribeHsm',
body=json.dumps(params))
def describe_luna_client(self, client_arn=None,
certificate_fingerprint=None):
"""
Retrieves information about an HSM client.
:type client_arn: string
:param client_arn: The ARN of the client.
:type certificate_fingerprint: string
:param certificate_fingerprint: The certificate fingerprint.
"""
params = {}
if client_arn is not None:
params['ClientArn'] = client_arn
if certificate_fingerprint is not None:
params['CertificateFingerprint'] = certificate_fingerprint
return self.make_request(action='DescribeLunaClient',
body=json.dumps(params))
def get_config(self, client_arn, client_version, hapg_list):
"""
Gets the configuration files necessary to connect to all high
availability partition groups the client is associated with.
:type client_arn: string
:param client_arn: The ARN of the client.
:type client_version: string
:param client_version: The client version.
:type hapg_list: list
:param hapg_list: A list of ARNs that identify the high-availability
partition groups that are associated with the client.
"""
params = {
'ClientArn': client_arn,
'ClientVersion': client_version,
'HapgList': hapg_list,
}
return self.make_request(action='GetConfig',
body=json.dumps(params))
def list_available_zones(self):
"""
Lists the Availability Zones that have available AWS CloudHSM
capacity.
"""
params = {}
return self.make_request(action='ListAvailableZones',
body=json.dumps(params))
def list_hapgs(self, next_token=None):
"""
Lists the high-availability partition groups for the account.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListHapgs to retrieve the next set of items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListHapgs. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListHapgs',
body=json.dumps(params))
def list_hsms(self, next_token=None):
"""
Retrieves the identifiers of all of the HSMs provisioned for
the current customer.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListHsms to retrieve the next set of items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListHsms. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListHsms',
body=json.dumps(params))
def list_luna_clients(self, next_token=None):
"""
Lists all of the clients.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListLunaClients to retrieve the next set of
items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListLunaClients. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListLunaClients',
body=json.dumps(params))
def modify_hapg(self, hapg_arn, label=None, partition_serial_list=None):
"""
Modifies an existing high-availability partition group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
modify.
:type label: string
:param label: The new label for the high-availability partition group.
:type partition_serial_list: list
:param partition_serial_list: The list of partition serial numbers to
make members of the high-availability partition group.
"""
params = {'HapgArn': hapg_arn, }
if label is not None:
params['Label'] = label
if partition_serial_list is not None:
params['PartitionSerialList'] = partition_serial_list
return self.make_request(action='ModifyHapg',
body=json.dumps(params))
def modify_hsm(self, hsm_arn, subnet_id=None, eni_ip=None,
iam_role_arn=None, external_id=None, syslog_ip=None):
"""
Modifies an HSM.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM to modify.
:type subnet_id: string
:param subnet_id: The new identifier of the subnet that the HSM is in.
:type eni_ip: string
:param eni_ip: The new IP address for the elastic network interface
attached to the HSM.
:type iam_role_arn: string
:param iam_role_arn: The new IAM role ARN.
:type external_id: string
:param external_id: The new external ID.
:type syslog_ip: string
:param syslog_ip: The new IP address for the syslog monitoring server.
"""
params = {'HsmArn': hsm_arn, }
if subnet_id is not None:
params['SubnetId'] = subnet_id
if eni_ip is not None:
params['EniIp'] = eni_ip
if iam_role_arn is not None:
params['IamRoleArn'] = iam_role_arn
if external_id is not None:
params['ExternalId'] = external_id
if syslog_ip is not None:
params['SyslogIp'] = syslog_ip
return self.make_request(action='ModifyHsm',
body=json.dumps(params))
def modify_luna_client(self, client_arn, certificate):
"""
Modifies the certificate used by the client.
This action can potentially start a workflow to install the
new certificate on the client's HSMs.
:type client_arn: string
:param client_arn: The ARN of the client.
:type certificate: string
:param certificate: The new certificate for the client.
"""
params = {
'ClientArn': client_arn,
'Certificate': certificate,
}
return self.make_request(action='ModifyLunaClient',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| gpl-3.0 |
Alonso1398/muZic_kernel_ivoryss | Documentation/target/tcm_mod_builder.py | 3119 | 42754 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
pchmieli/h2o-3 | py2/h2o_get_ip.py | 30 | 2901 | # Hackery: find the ip address that gets you to Google's DNS
# Trickiness because you might have multiple IP addresses (Virtualbox), or Windows.
# we used to not like giving ip 127.0.0.1 to h2o?
import sys, socket, os, getpass
import h2o_args
# print "h2o_get_ip"
# copied here from h2o_test.py to eliminate a circular import
def verboseprint(*args, **kwargs):
if h2o_args.verbose:
for x in args: # so you don't have to create a single string
print x,
for x in kwargs: # so you don't have to create a single string
print x,
print
# so we can see problems when hung?
sys.stdout.flush()
def get_ip_address(ipFromCmdLine=None):
if ipFromCmdLine:
verboseprint("get_ip case 1:", ipFromCmdLine)
return ipFromCmdLine
ip = '127.0.0.1'
socket.setdefaulttimeout(0.5)
hostname = socket.gethostname()
# this method doesn't work if vpn is enabled..it gets the vpn ip
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0))
ip = s.getsockname()[0]
verboseprint("get_ip case 2:", ip)
except:
pass
try:
if ip.startswith('127'):
# drills down into family
ip = socket.getaddrinfo(hostname, None)[0][4][0]
verboseprint("get_ip case 3:", ip)
except:
pass
ipa = None
# we had some hosts that didn't support gethostbyname_ex().
# hopefully we don't need a hack to exclude
# the gethostbyname_ex can be slow. the timeout above will save us quickly
try:
# Translate a host name to IPv4 address format, extended interface.
# This should be resolve by dns so it's the right ip for talking to this guy?
# Return a triple (hostname, aliaslist, ipaddrlist)
# where hostname is the primary host name responding to the given ip_address,
# aliaslist is a (possibly empty) list of alternative host names for the same address,
# ipaddrlist is a list of IPv4 addresses for the same interface on the same host
ghbx = socket.gethostbyname_ex(hostname)
for ips in ghbx[2]:
# only take the first
if ipa is None and not ips.startswith("127."):
ipa = ips[:]
verboseprint("get_ip case 4:", ipa)
if ip != ipa:
print "\nAssuming", ip, "is the ip address h2o will use but", ipa,\
"is probably the real ip?"
print "You might have a vpn active. Best to use '-ip", ipa,\
"'to get python and h2o the same."
except:
pass
# print "Timeout during socket.gethostbyname_ex(hostname)"
verboseprint("get_ip_address:", ip)
# set it back to default higher timeout (None would be no timeout?)
socket.setdefaulttimeout(5)
return ip
| apache-2.0 |
hazelcast/hazelcast-python-client | hazelcast/protocol/codec/map_contains_key_codec.py | 1 | 1069 | from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x010600
_REQUEST_MESSAGE_TYPE = 67072
# hex: 0x010601
_RESPONSE_MESSAGE_TYPE = 67073
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_RESPONSE_RESPONSE_OFFSET = RESPONSE_HEADER_SIZE
def encode_request(name, key, thread_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
return OutboundMessage(buf, True)
def decode_response(msg):
initial_frame = msg.next_frame()
return FixSizedTypesCodec.decode_boolean(initial_frame.buf, _RESPONSE_RESPONSE_OFFSET)
| apache-2.0 |
PhloxAR/phloxar | PhloxAR/core/stream.py | 1 | 9045 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from ..compat import socketserver
from ..compat import SimpleHTTPServer
from ..base import cv2
import time
import socket
import re
import threading
__all__ = [
'JpegStreamHandler', 'JpegStreamer', 'JpegTCPServer', 'VideoStream'
]
_jpeg_streamers = {}
class JpegStreamHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
Handles requests to the threaded HTTP server.
Once initialized, any request to this port will receive
a multipart/replace jpeg.
"""
def get(self):
global _jpeg_streamers
if self.path == '/' or not self.path:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("""
<html>
<head>
<style type=text/css>
body {
background-image: url(/stream);
background-repeat: no-repeat;
background-position: center top;
background-attachment: fixed;
height: 100%;
}
</style>
</head>
<body>
 
</body>
</html>
""")
return
elif self.path == '/stream':
self.send_response(200)
self.send_header('Connection', 'close')
self.send_header('Max-Age', '0')
self.send_header('Expires', '0')
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=--BOUNDARYSTRING')
self.end_headers()
host, port = self.server.socket.getsockname()[:2]
count = 0
timeout = 0.75
last_time_served = 0
while True:
if (_jpeg_streamers[port].refreshtime > last_time_served or
time.time() - timeout > last_time_served):
try:
self.wfile.write('--BOUNDARYSTRING\r\n')
self.send_header('Content-type', 'image/jpeg')
self.send_header('Content-Length', str(len(
_jpeg_streamers[port].jpgdata.getvalue()
)))
self.end_headers()
self.wfile.write(_jpeg_streamers[port].jpgdata.getvalue() + '\r\n')
last_time_served = time.time()
except socket.error:
return
except IOError:
return
count += 1
time.sleep(_jpeg_streamers[port].sleeptime)
class JpegTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
daemon_threads = True
# factory class for jpeg tcp server.
class JpegStreamer(object):
"""
Allow user to stream a jpeg encoded file to a HTTP port. Any
updates to the jpeg file will automatically be pushed to the
browser via multipart/replace content type.
initialization:
js = JpegStreamer()
update:
img.save(js)
open a browser and display:
import webbrowser
webbrowser.open(js.url)
Note 3 optional parameters on the constructor:
- port (default 8080) which sets the TCP port you need to connect to
- sleep time (default 0.1) how often to update. Above 1 second seems
to cause dropped connections in Google chrome Once initialized,
the buffer and sleeptime can be modified and will function
properly -- port will not.
"""
server = ''
host = ''
port = ''
sleep_time = ''
frame_buffer = ''
counter = 0
refresh_time = 0
def __init__(self, host_port=8080, sleeptime=0.1):
global _jpeg_streamers
if isinstance(host_port, int):
self.port = host_port
self.host = 'localhost'
elif isinstance(host_port, str) and re.search(':', host_port):
self.host, self.port = host_port.split(':')
self.port = int(self.port)
elif isinstance(host_port, tuple):
self.host, self.port = host_port
else:
self.port = 8080
self.host = 'localhost'
self.sleep_time = sleeptime
self.server = JpegTCPServer((self.host, self.host), JpegStreamHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
_jpeg_streamers[self.port] = self
self.server_thread.daemon = True
self.server_thread.start()
self.frame_buffer = self
def url(self):
"""
Returns the JpegStreams Webbrowser-appropriate URL, if not provided
in the constructor, it defaults to "http://localhost:8080"
:return: url
"""
return 'http://' + self.host + ':' + str(self.port) + '/'
def stream_url(self):
"""
Returns the URL of the MJPEG stream. If host and port are not set in
the constructor, defaults to "http://localhost:8080/stream/"
:return: url
"""
return self.url() + 'stream'
class VideoStream(object):
"""
Allows user save video files in different formats.
You can initialize it by specifying the file you want to output::
vs = VideoStream("hello.avi")
You can also specify a framerate, and if you want to "fill" in
missed frames. So if you want to record a real time video you may
want to do this::
# note these are default values
vs = VideoStream("myvideo.avi", 25, True)
Where if you want to do a stop-motion animation, you would want to
turn fill off::
vs_animation = VideoStream("cartoon.avi", 15, False)
If you select a fill, the VideoStream will do its best to stay
close to "real time" by duplicating frames or dropping frames
when the clock doesn't sync up with the file writes.
You can save a frame to the video by using the Image.save() function::
my_camera.getImage().save(vs)
"""
fps = 25
filename = ''
writer = ''
fourcc = ''
frame_fill = True
video_time = 0.0
start_time = 0.0
frame_count = 0
last_frame = None
def __init__(self, filename, fps=25, frame_fill=True):
"""
TODO: details
:param filename:
:param fps:
:param frame_fill:
"""
self.filename = filename
self.fps = fps
self.frame_fill = frame_fill
self.fourcc = cv2.VideoWriter_fourcc('I', 'Y', 'U', 'V')
def init_writer(self, size):
"""
TODO: details
:param size:
:return:
"""
self.writer = cv2.VideoWriter(self.filename, self.fourcc, self.fps,
size, 1)
self.video_time = 0.0
self.start_time = time.time()
def write_frame(self, img):
"""
Write a frame to the display object. this is automatically called
by image.save() but you can use this function to save just the
bitmap as well so image markup is not implicit,typically you use
image.save() but this allows for more finer control
Args:
img (Image, array like): the image to be write
Returns:
None
"""
if not self.writer:
self.init_writer(img.size)
self.last_frame = img
frame_time = 1.0 / float(self.fps)
target_time = self.start_time + frame_time * self.frame_count
real_time = time.time()
if self.frame_fill:
# see if we need to do anything to adjust to real time
if target_time > real_time + frame_time:
# if we're more than one frame ahead,
# save the last_frame, but don't write to video out
self.last_frame = img
return
elif target_time < real_time - frame_time:
# we're at least one frame behind
frames_behind = int((real_time - target_time) * self.fps) + 1
# figure out how many frames behind we are
last_frames = frames_behind / 2
for i in range(0, last_frames):
self.frame_count += 1
self.writer.write(self.last_frame.narray)
frames = frames_behind - last_frames
for i in range(0, frames):
self.frame_count += 1
self.writer.write(img.narray)
else:
self.frame_count += 1
self.writer.write(img.narray)
else:
self.frame_count += 1
self.writer.write(img.narray)
self.last_frame = img
| apache-2.0 |
jribbens/voting | voting/management/commands/importvotingstatements.py | 1 | 1319 | """Import voting statements from a CSV file."""
import csv
import datetime
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from ...models import Statement
class Command(BaseCommand):
"""Import voting statements from a CSV file."""
help = "Import voting statements from a CSV file."
def add_arguments(self, parser):
parser.add_argument("filename")
def handle(self, *args, **options):
imported = 0
with open(options["filename"], encoding="ascii",
newline="") as csvfile:
for row in csv.reader(csvfile):
title, release_date, msgid = row[1:]
release_date = datetime.datetime.strptime(
release_date, "%Y-%m-%d").date()
if Statement.objects.filter(
release_date=release_date, title=title):
continue
Statement(
title=title,
slug=slugify(title),
release_date=release_date,
msgid=msgid,
statement="",
).save()
imported += 1
self.stdout.write(self.style.SUCCESS(
"Imported {} statement(s)".format(imported)
))
| mit |
InfiniteAlpha/profitpy | profit/lib/widgets/plot.py | 18 | 41789 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <troy@gci.net>
# Distributed under the terms of the GNU General Public License v2
##
#
# This module defines the Plot class for display of plots and
# associated controls.
#
##
from PyQt4.QtCore import QRectF, QString, QTimer, QVariant
from PyQt4.QtCore import Qt, pyqtSignature
from PyQt4.QtGui import QBrush, QColor, QColorDialog, QFont, QFontDialog
from PyQt4.QtGui import QStandardItem, QStandardItemModel, QMenu, QPen, QFrame
from PyQt4.Qwt5 import QwtLegend, QwtPicker, QwtPlot, QwtPlotCurve
from PyQt4.Qwt5 import QwtPlotGrid, QwtPlotPicker, QwtPlotZoomer, QwtPainter
from PyQt4.Qwt5 import QwtPlotMarker, QwtPlotPanner, QwtSymbol, QwtText
from ib.ext.TickType import TickType
from profit.lib import Settings, Signals, defaults
from profit.lib.gui import ValueColorItem, colorIcon, complementColor
from profit.lib.widgets.plotdatadialog import PlotDataDialog
from profit.lib.widgets.plotitemdialog import PlotItemDialog
from profit.lib.widgets.ui_plot import Ui_Plot
allAxes = \
xBottom, xTop, yRight, yLeft = \
QwtPlot.xBottom, QwtPlot.xTop, QwtPlot.yRight, QwtPlot.yLeft
def changePen(getr, setr, parent):
""" Allow the user to change a pen with a PlotItemDialog.
@param getr callable that returns current pen
@param setr callable to set selected pen if dialog is accepted
@param parent ancestor of dialog
@return new pen if dialog is accepted, otherwise None
"""
oldpen = getr()
dlg = PlotItemDialog(oldpen, parent)
if dlg.exec_() == dlg.Accepted:
newpen = QPen(dlg.selectedPen)
setr(newpen)
return newpen
def changeColor(getr, setr, parent):
""" Allow the user to change a color with a QColorDialog.
@param getr callable that returns current color
@param setr callable to set selected color if dialog is accepted
@param parent ancestor of dialog
@return new color if dialog is accepted, otherwise None
"""
oldcolor = QColor(getr())
newcolor = QColorDialog.getColor(oldcolor, parent)
if newcolor.isValid():
setr(newcolor)
return newcolor
class PlotCurve(QwtPlotCurve):
""" Specialized plot curve.
"""
dataMarker = None
settingsLoaded = False
def updateLegend(self, legend, enable=False):
""" Framework hook to update plot legend with this curve.
@param legend QwtLegend instance
@param enable=False must be true to include this curve in legend
@return None
"""
if self.isVisible() and enable:
QwtPlotCurve.updateLegend(self, legend)
class PlotGrid(QwtPlotGrid):
""" Specalized plot grid.
QwtPlotGrid instances will not draw their minor grids if the major
grid is also not enabled. This class reimplements 'draw' and
'drawLines' to overcome this limitation. Code for both was taken
from the Qwt sources.
"""
def __init__(self):
""" Constructor.
"""
QwtPlotGrid.__init__(self)
self.enableX(False)
self.enableY(False)
self.enableXMin(False)
self.enableYMin(False)
def draw(self, painter, mx, my, rect):
""" Draws minor and major grids.
@param painter QPainter instance
@param mx QwtScaleMap instance
@param my QwtScaleMap instance
@param rect QRect instance
@return None
"""
painter.setPen(self.minPen())
sdx = self.xScaleDiv()
sdy = self.yScaleDiv()
if self.xMinEnabled():
self.drawLines(
painter, rect, Qt.Vertical, mx, sdx.ticks(sdx.MinorTick))
self.drawLines(
painter, rect, Qt.Vertical, mx, sdx.ticks(sdx.MediumTick))
if self.yMinEnabled():
self.drawLines(
painter, rect, Qt.Horizontal, my, sdy.ticks(sdy.MinorTick))
self.drawLines(
painter, rect, Qt.Horizontal, my, sdy.ticks(sdy.MediumTick))
painter.setPen(self.majPen())
if self.xEnabled():
self.drawLines(
painter, rect, Qt.Vertical, mx, sdx.ticks(sdx.MajorTick))
if self.yEnabled():
self.drawLines(
painter, rect, Qt.Horizontal, my, sdy.ticks(sdy.MajorTick))
def drawLines(self, painter, rect, orientation, scalemap, values):
""" Draws specified lines.
@param painter QPainter instance
@param rect QRect instance
@param orientation Qt.Horizontal or Qt.Vertical
@param scalemap QwtScaleMap instance
@param values list of x or y values for line drawing
@return None
"""
x1 = rect.left()
x2 = rect.right()
y1 = rect.top()
y2 = rect.bottom()
for v in values:
value = scalemap.transform(v)
if orientation == Qt.Horizontal:
if ((value >= y1) and (value <= y2)):
QwtPainter.drawLine(painter, x1, value, x2, value)
else:
if ((value >= x1) and (value <= x2)):
QwtPainter.drawLine(painter, value, y1, value, y2)
class PlotDataMarker(QwtPlotMarker):
""" Specialized plot data marker.
"""
def __init__(self):
QwtPlotMarker.__init__(self)
def cloneFromValue(self, curve, x, y):
""" Creates and returns new plot marker similar to this one.
@param curve QwtPlotCurve instance
@param x marker x value
@param y marker y value
@return new PlotDataMarker instance
"""
clone = type(self)()
clone.setLineStyle(self.lineStyle())
clone.setLinePen(self.linePen())
clone.setSymbol(self.symbol())
clone.setAxis(curve.xAxis(), curve.yAxis())
clone.setValue(x, y)
return clone
def restyleFrom(self, other):
""" Matches the style of this instance given an example.
@param other QwtPlotMarker instance
@return None
"""
self.setLineStyle(other.lineStyle())
self.setLinePen(other.linePen())
self.setSymbol(other.symbol())
class PlotPanner(QwtPlotPanner):
""" Stub for future implementation.
"""
def __init__(self, canvas):
QwtPlotPanner.__init__(self, canvas)
self.setMouseButton(Qt.MidButton)
class PlotPicker(QwtPlotPicker):
""" Stub for future implementation.
"""
def __init__(self, canvas):
QwtPlotPicker.__init__(
self, xBottom, yRight, self.NoSelection, self.CrossRubberBand,
self.AlwaysOn, canvas)
def trackerText(self, pos):
pos = self.invTransform(pos)
band = self.rubberBand()
if band == self.HLineRubberBand:
label = '%.3f' % pos.y()
elif band == self.VLineRubberBand:
label = '%.3f' % pos.x()
else:
label = '%i, %.3f' % (pos.x(), pos.y(), )
return QwtText(label)
class PlotZoomer(QwtPlotZoomer):
""" Stub for future implementation.
"""
def __init__(self, canvas):
QwtPlotZoomer.__init__(
self, xBottom, yRight, self.DragSelection, self.AlwaysOff, canvas)
class Legend(QwtLegend):
""" Stub for future implementation.
"""
class ControlTreeValueItem(QStandardItem, ValueColorItem):
def __init__(self, text):
QStandardItem.__init__(self, text)
self.setEditable(False)
self.setTextAlignment(Qt.AlignVCenter|Qt.AlignRight)
def setText(self, text):
try:
v = float(self.text())
c = float(text)
except (ValueError, ):
pass
else:
if c != v: # explicitly ignore unchanged values
self.setForeground(self.compMap[cmp(c, v)])
QStandardItem.setText(self, text)
class ControlTreeItem(QStandardItem):
""" Self-configuring control tree item.
"""
def __init__(self, text, data, key, checkable=True):
""" Constructor.
@param text value for this item
@param data reference to data series for this item
"""
QStandardItem.__init__(self, text)
if checkable:
self.setCheckable(True)
self.setCheckState(Qt.Unchecked)
self.setEditable(False)
self.curve = PlotCurve(text)
self.curve.setYAxis(yRight)
self.curve.setVisible(False)
self.data = data
self.key = key
def isChecked(self):
""" True if this item is checked.
"""
return self.checkState() == Qt.Checked
def name(self):
""" Name of item including parent names if any.
"""
names = []
while self:
names.append(getattr(self, 'key', str(self.text())))
self = self.parent()
return str.join('/', reversed(names))
def setColor(self, color):
""" Sets the icon and color for this item.
@param color QColor instance
@return None
"""
self.color = color
self.setIcon(colorIcon(color))
class Plot(QFrame, Ui_Plot):
""" Plot container.
"""
def __init__(self, parent=None):
""" Initializer.
@param parent ancestor of this widget
"""
QFrame.__init__(self, parent)
self.setupUi(self)
self.settings = Settings()
self.settings.beginGroup(self.settings.keys.plots)
self.setupOptionsMenu()
self.setupPlotsMenu()
self.setupPlot()
def setupOptionsMenu(self):
""" Configure the options button menu.
@return None
"""
self.dataDialog = None
optionsButton = self.optionsButton
pop = QMenu(optionsButton)
optionsButton.setMenu(pop)
pop.addAction(self.actionDrawMajorX)
pop.addAction(self.actionDrawMajorY)
pop.addAction(self.actionChangeMajorGridStyle)
pop.addSeparator()
pop.addAction(self.actionDrawMinorX)
pop.addAction(self.actionDrawMinorY)
pop.addAction(self.actionChangeMinorGridStyle)
pop.addSeparator()
pop.addAction(self.actionShowDataDialog)
pop.addAction(self.actionDrawLegend)
pop.addAction(self.actionChangeCanvasColor)
def setupPlotsMenu(self):
""" Configure the plots button menu.
@return None
"""
plotButton = self.plotButton
pop = QMenu(plotButton)
plotButton.setMenu(pop)
pop.addAction(self.actionNewPlot)
pop.addAction(self.actionClosePlot)
pop.addSeparator()
pop.addAction(self.actionSyncWithData)
def setupPlot(self):
""" Configure the plot widget.
@return None
"""
pen = QPen(Qt.black)
plot = self.plot
plot.setFrameStyle(plot.NoFrame|plot.Plain)
plot.insertLegend(Legend(), plot.LeftLegend)
canvas = plot.canvas()
canvas.setFrameStyle(plot.NoFrame|plot.Plain)
layout = plot.plotLayout()
layout.setCanvasMargin(0)
layout.setAlignCanvasToScales(True)
self.grid = PlotGrid()
self.grid.attach(plot)
self.panner = PlotPanner(canvas)
self.zoomer = PlotZoomer(canvas)
self.zoomer.setRubberBandPen(pen)
self.picker = PlotPicker(canvas)
self.picker.setTrackerPen(pen)
self.connect(self.zoomer, Signals.zoomed, self.on_zoomer_zoomed)
self.enableAutoScale()
def setSessionPlot(self, session, collection, key, *indexes):
""" Associate a session with this instance.
@param session Session instance
@param key id of ticker as integer
@param *indexes unused
@return None
"""
self.controlsTreeItems = []
self.highlightMarkers = []
self.session = session
self.collection = collection
self.key = key
settings = self.settings
name = self.plotName()
statekey = '%s/%s' % (name, settings.keys.splitstate)
state = settings.value(statekey, defaults.rightSplitterState())
self.plotSplitter.restoreState(state.toByteArray())
self.setupTree()
self.loadGrids()
self.loadSelections()
self.loadCanvasColor()
self.loadLegend()
self.updateAxis()
scaler = self.plot.axisScaleEngine(xBottom)
scaler.setMargins(0.0, 0.05)
axisactions = [self.actionChangeAxesFont, self.actionChangeAxesColor]
for widget in self.axisWidgets():
widget.addActions(axisactions)
widget.setContextMenuPolicy(Qt.ActionsContextMenu)
color = settings.value('%s/axiscolor' % name)
if color.isValid():
self.setAxisColor(QColor(color))
font = settings.value('%s/axisfont' % name)
if font.isValid():
self.setAxisFont(QFont(font))
self.plot.replot()
if settings.value('%s/datadialog' % name).toBool():
## tab might not be available
QTimer.singleShot(500, self.actionShowDataDialog.trigger)
session.registerMeta(self)
def setupTree(self):
""" Configure the model and initial items for this instance.
@return None
"""
tree = self.controlsTree
self.controlsTreeModel = model = QStandardItemModel(self)
tree.setModel(model)
model.setHorizontalHeaderLabels(['Line', 'Value'])
tree.sortByColumn(0, Qt.AscendingOrder)
try:
ticker = self.collection[self.key]
except (KeyError, TypeError, ):
pass
else:
for field, series in ticker.series.items():
self.addSeries(TickType.getField(field), series)
self.connect(model, Signals.standardItemChanged,
self.on_controlsTree_itemChanged)
for col in range(model.columnCount()):
tree.resizeColumnToContents(col)
tree.addActions(
[self.actionChangeCurveStyle,
self.actionChangeDataMarker,
self.actionChangeCurveAxisX,
self.actionChangeCurveAxisY,])
tree.expandAll()
def addSeries(self, name, series, parent=None, items=[], checkable=True):
""" Creates new controls and curve for an individual series.
@param name series key
@return None
"""
try:
name + ()
except (TypeError, ):
key = name
else:
key = '/'.join(name)
name = name[0]
if parent is None:
parent = self.controlsTreeModel.invisibleRootItem()
item = ControlTreeItem(name, series, key, checkable=checkable)
self.controlsTreeItems.append(item)
if not items:
items = [ControlTreeValueItem(''), ]
parent.appendRow([item, ] + items)
if checkable:
item.setColor(self.loadItemPen(item).color())
for index in getattr(series, 'indexes', []):
self.addSeries(index.key, index, parent=item)
self.loadSelection(item)
return item
def anyCheckedItems(self):
""" True if any control is checked.
"""
return bool(self.checkedItems())
def axisWidgets(self):
""" Yields each plot axis widget.
"""
for axis in allAxes:
yield self.plot.axisWidget(axis)
def checkedItems(self):
""" Sequence of checked controls.
"""
return [item for item in self.controlsTreeItems if item.isChecked()]
def checkedNames(self):
""" Sequence of checked control names.
"""
return [self.itemName(item) for item in self.checkedItems()]
def on_zoomer_zoomed(self, rect):
""" Sets autoscaling mode when plot is zoomed to its base.
@param rect ignored
@return None
"""
if not self.zoomer.zoomRectIndex():
self.enableAutoScale()
def enableAutoScale(self):
""" Sets autoscaling mode on all four axes.
@return None
"""
for axis in allAxes:
self.plot.setAxisAutoScale(axis)
def enableCurve(self, item, enable=True):
""" Sets the visibility and style of a plot curve.
@param item tree widget item
@param enabled sets curve visible if True, otherwise invisible
@return None
"""
curve = item.curve
curve.hide()
plot = self.plot
legend = plot.legend()
drawLegend = self.actionDrawLegend
if enable:
if not curve.settingsLoaded:
self.loadCurve(self.itemName(item), curve)
curve.setData(item.data.x, item.data.y)
curve.attach(plot)
if self.actionDrawLegend.isChecked():
curve.updateLegend(legend, True)
curve.show()
else:
legend.remove(curve)
curve.detach()
self.emit(Signals.enableCurve, item, enable)
checked = self.anyCheckedItems()
self.actionDrawLegend.setEnabled(checked)
if not checked:
legend.clear()
legend.hide()
plot.updateAxes()
plot.replot()
def getAxisColor(self):
""" Returns the foreground color of the axis widgets.
@return QColor instance
"""
widget = self.referenceAxisWidget()
palette = widget.palette()
return palette.color(palette.WindowText)
def itemName(self, item):
""" Name for given item, including name of this plot.
@param item ControlTreeItem instance
@return name full item name including plot name
"""
return '%s/%s' % (self.plotName(), item.name())
def loadCanvasColor(self):
""" Reads and sets the canvas color from saved settings.
@return None
"""
color = self.settings.value(
'%s/canvascolor' % self.plotName(), defaults.canvasColor())
self.plot.setCanvasBackground(QColor(color))
def loadCurve(self, name, curve):
""" Reads and configures a plot curve.
@param name of curve
@param curve QwtPlotCurve instance
@return None
"""
getv = self.settings.value
curve.setBrush(QBrush(getv('%s/brush' % name, QBrush())))
curve.setPen(QPen(getv('%s/pen' % name, QPen())))
curve.setStyle(curve.CurveStyle(
getv('%s/style' % name, QVariant(curve.Lines)).toInt()[0]))
curve.setBaseline(
getv('%s/baseline' % name, QVariant(0.0)).toDouble()[0])
curve.setCurveAttribute(
curve.Inverted, getv('%s/inverted' % name).toBool())
curve.setCurveAttribute(
curve.Fitted, getv('%s/fitted' % name).toBool())
curve.setPaintAttribute(
curve.PaintFiltered, getv('%s/filtered' % name).toBool())
curve.setPaintAttribute(
curve.ClipPolygons, getv('%s/clippoly' % name).toBool())
curve.setXAxis(
QwtPlot.Axis(getv('%s/xaxis' % name, xBottom).toInt()[0]))
curve.setYAxis(
QwtPlot.Axis(getv('%s/yaxis' % name, yRight).toInt()[0]))
def applySymbol(symname, symobj):
symobj.setBrush(QBrush(getv('%s/brush' % symname, QBrush())))
symobj.setPen(QPen(getv('%s/pen' % symname, QPen())))
style = getv('%s/style' % symname, QVariant(symobj.NoSymbol))
symobj.setStyle(symobj.Style(style.toInt()[0]))
symobj.setSize(getv('%s/size' % symname).toSize())
applySymbol('%s/symbol' % name, curve.symbol())
curve.dataMarker = marker = PlotDataMarker()
marksym = QwtSymbol()
applySymbol('%s/dataselect/symbol' % name, marksym)
marker.setSymbol(marksym)
markstyle = getv('%s/dataselect/style' % name, PlotDataMarker.VLine)
marker.setLineStyle(marker.LineStyle(markstyle.toInt()[0]))
marker.setLinePen(QPen(getv('%s/dataselect/pen' % name, Qt.red)))
curve.settingsLoaded = True
def loadGrids(self):
""" Reads and sets the major and minor grid pens and visibility.
@return None
"""
name = self.plotName()
grid = self.grid
getv = self.settings.value
pen = getv('%s/major/pen' % name, defaults.majorGridPen())
grid.setMajPen(QPen(pen))
pen = getv('%s/minor/pen' % name, defaults.minorGridPen())
grid.setMinPen(QPen(pen))
items = [('%s/major/x/enabled', self.actionDrawMajorX),
('%s/major/y/enabled', self.actionDrawMajorY),
('%s/minor/x/enabled', self.actionDrawMinorX),
('%s/minor/y/enabled', self.actionDrawMinorY)]
for key, action in items:
v = getv(key % name)
if not v.isValid() or v.toBool():
action.trigger()
def loadItemPen(self, item):
""" Creates a pen from saved settings.
@param item ControlTreeItem instance
@return QPen instance
"""
pen = self.settings.value('%s/pen' % self.itemName(item))
if pen.isValid():
pen = QPen(pen)
else:
pen = defaults.itemPen(item.name())
return pen
def loadLegend(self):
""" Restores the plot legend visibility from saved settings.
"""
key = '%s/legend/enabled' % self.plotName()
if self.settings.value(key).toBool():
self.actionDrawLegend.trigger()
def loadSelection(self, item):
""" Restores an item check state and pen from saved settings.
"""
key = '%s/checkeditems' % self.plotName()
if self.itemName(item) in self.settings.valueLoad(key, ''):
item.setCheckState(Qt.Checked)
item.setColor(self.loadItemPen(item).color())
def loadSelections(self):
""" Restores each control tree item check state and pen.
"""
for item in self.controlsTreeItems:
self.loadSelection(item)
def saveSelections(self):
""" Saves the selected control item names.
"""
key = '%s/checkeditems' % self.plotName()
names = self.checkedNames()
if names:
# don't save an empty list because the user might be
# closing an empty plot that really does have selections
# saved in the settings.
self.settings.setValueDump(key, names)
def plotName(self):
""" The name of this plot.
"""
try:
return '%s/%s' % (self.key, self.objectName())
except (AttributeError, ):
return 'noname/%s' % (self.objectName(), )
def referenceAxisWidget(self):
""" Returns a referece axis widget.
"""
return self.plot.axisWidget(xBottom)
def saveCanvasColor(self):
""" Saves the canvas background color to user settings.
@return None
"""
prefix = self.plotName()
self.settings.setValue(
'%s/canvascolor' % prefix, self.plot.canvasBackground())
def saveCurve(self, name, curve):
""" Saves visual settings of a curve.
@param name curve name, used as settings key
@param curve QwtPlotCurve instance
@return None
"""
setv = self.settings.setValue
setv('%s/brush' % name, curve.brush())
setv('%s/pen' % name, curve.pen())
setv('%s/style' % name, curve.style())
setv('%s/baseline' % name, curve.baseline())
setv('%s/inverted' % name,
curve.testCurveAttribute(curve.Inverted))
setv('%s/fitted' % name,
curve.testCurveAttribute(curve.Fitted))
setv('%s/filtered' % name,
curve.testPaintAttribute(curve.PaintFiltered))
setv('%s/clippoly' % name,
curve.testPaintAttribute(curve.ClipPolygons))
setv('%s/xaxis' % name, curve.xAxis())
setv('%s/yaxis' % name, curve.yAxis())
name = '%s/symbol' % name
symbol = curve.symbol()
setv('%s/brush' % name, symbol.brush())
setv('%s/pen' % name, symbol.pen())
setv('%s/style' % name, symbol.style())
setv('%s/size' % name, symbol.size())
def saveMarker(self, name, marker):
""" Saves visual settings of a marker.
@param name curve name, used as settings key
@param curve QwtPlotMarker instance
@return None
"""
setv = self.settings.setValue
setv('%s/dataselect/style' % name, marker.lineStyle())
setv('%s/dataselect/pen' % name, marker.linePen())
symname = '%s/dataselect/symbol' % name
symbol = marker.symbol()
setv('%s/brush' % symname, symbol.brush())
setv('%s/pen' % symname, symbol.pen())
setv('%s/style' % symname, symbol.style())
setv('%s/size' % symname, symbol.size())
def saveLegend(self):
""" Saves the visibility of the plot legend to user settings.
@return None
"""
key = '%s/legend/enabled' % self.plotName()
self.settings.setValue(key, self.actionDrawLegend.isChecked())
def saveMajorX(self):
""" Saves the state and pen of the major grid x axis.
@return None
"""
name = self.plotName()
setv = self.settings.setValue
setv('%s/major/x/enabled' % name,
self.actionDrawMajorX.isChecked())
setv('%s/major/pen' % name, self.grid.majPen())
def saveMajorY(self):
""" Saves the state and pen of the major grid y axis.
@return None
"""
name = self.plotName()
setv = self.settings.setValue
setv('%s/major/y/enabled' % name,
self.actionDrawMajorY.isChecked())
setv('%s/major/pen' % name, self.grid.majPen())
def saveMinorX(self):
""" Saves the state and pen of the minor grid x axis.
@return None
"""
name = self.plotName()
setv = self.settings.setValue
setv('%s/minor/x/enabled' % name,
self.actionDrawMinorX.isChecked())
setv('%s/minor/pen' % name, self.grid.minPen())
def saveMinorY(self):
""" Saves the state and pen of the minor grid y axis.
@return None
"""
name = self.plotName()
setv = self.settings.setValue
setv('%s/minor/y/enabled' % name,
self.actionDrawMinorY.isChecked())
setv('%s/minor/pen' % name, self.grid.minPen())
def setAxisColor(self, color):
""" Sets the axis widgets foreground and text color.
@param color QColor instance
@return None
"""
for widget in self.axisWidgets():
palette = widget.palette()
palette.setColor(palette.WindowText, color)
palette.setColor(palette.Text, color)
widget.setPalette(palette)
def setAxisFont(self, font):
""" Sets the axis widgets font.
@param font QFont instance
@return None
"""
for widget in self.axisWidgets():
widget.setFont(font)
def updateAxis(self):
""" Enables each y axis if there are curves attached to it.
@return None
"""
enable = self.plot.enableAxis
items = self.checkedItems()
for pair, pred in [
([yRight, yLeft], lambda i, a:i.curve.yAxis()==a),
([xTop, xBottom], lambda i, a:i.curve.xAxis()==a)]:
for axis in pair:
enable(axis, any(item for item in items if pred(item, axis)))
## session signal handlers
def on_session_createdSeries(self, key, field):
""" Signal handler called when new Series objects are created.
@param key id of ticker with new series
@param field series field
"""
if key != self.key:
return
series = self.collection[self.key].series[field]
self.addSeries(TickType.getField(field), series)
self.controlsTree.sortByColumn(0, Qt.AscendingOrder)
def setItemValue(self, item):
idx = self.controlsTreeModel.indexFromItem(item)
parent = item.parent()
if parent:
getc = parent.child
else:
getc = self.controlsTreeModel.item
next = getc(item.row(), item.column()+1)
try:
next.setText('%.2f' % item.data[-1])
except (AttributeError, IndexError, TypeError, ):
pass
else:
for c in [item.child(r, 0) for r in range(item.rowCount())]:
self.setItemValue(c)
def on_session_TickPrice_TickSize(self, message):
""" Signal handler for TickPrice and TickSize session messages.
@param message Message instance
@return None
"""
if message.tickerId != self.key:
return
for item in self.controlsTreeItems:
self.setItemValue(item)
items = [i for i in self.controlsTreeItems if i.curve.isVisible()]
for item in items:
item.curve.setData(item.data.x, item.data.y)
if items:
self.plot.replot()
self.on_zoomer_zoomed(None)
def on_session_UpdateAccountValue(self, message):
if self.key != 'account':
return
items = [i for i in self.controlsTreeItems if i.curve.isVisible()]
for item in items:
item.curve.setData(item.data.x, item.data.y)
if items:
self.plot.replot()
self.on_zoomer_zoomed(None)
## action signal handlers
@pyqtSignature('')
def on_actionChangeCurveStyle_triggered(self):
""" Signal handler called to edit a curve.
@return None
"""
pos = self.sender().data().toPoint()
index = self.controlsTree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
indexZero = self.controlsTreeModel.sibling(index.row(), 0, index)
first = self.controlsTreeModel.itemFromIndex(indexZero)
try:
curve = first.curve
color = first.color
except (AttributeError, ):
return
else:
item = first
if not curve.settingsLoaded:
self.loadCurve(self.itemName(item), curve)
cplot = curve.plot()
if cplot is None:
curve.attach(self.plot)
dlg = PlotItemDialog(curve, self)
if dlg.exec_() == dlg.Accepted:
dlg.applyToCurve(curve)
item.setColor(curve.pen().color())
self.saveCurve(self.itemName(item), curve)
self.enableCurve(item, enable=item.checkState()==Qt.Checked)
if cplot is None:
curve.detach()
@pyqtSignature('')
def on_actionChangeCurveAxisX_triggered(self):
""" Signal handler called to toggle the x axis of a curve.
"""
pos = self.sender().data().toPoint()
index = self.controlsTree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
curve = item.curve
if curve.xAxis() == xTop:
curve.setXAxis(xBottom)
else:
curve.setXAxis(xTop)
self.updateAxis()
self.saveCurve(self.itemName(item), curve)
self.plot.replot()
@pyqtSignature('')
def on_actionChangeCurveAxisY_triggered(self):
""" Signal handler called to toggle the y axis of a curve.
@return None
"""
pos = self.sender().data().toPoint()
index = self.controlsTree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
curve = item.curve
if curve.yAxis() == yLeft:
curve.setYAxis(yRight)
else:
curve.setYAxis(yLeft)
self.updateAxis()
self.saveCurve(self.itemName(item), curve)
self.plot.replot()
@pyqtSignature('')
def on_actionChangeDataMarker_triggered(self):
""" Signal handler called to edit data marker.
@return None
"""
pos = self.sender().data().toPoint()
index = self.controlsTree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
curve = item.curve
if not curve.settingsLoaded:
self.loadCurve(self.itemName(item), curve)
cplot = curve.plot()
if cplot is None:
curve.attach(self.plot)
dlg = PlotItemDialog(curve, marker=curve.dataMarker, parent=self)
if dlg.exec_() == dlg.Accepted:
dlg.applyToMarker(curve.dataMarker)
self.saveMarker(self.itemName(item), curve.dataMarker)
for marker in self.highlightMarkers:
marker.restyleFrom(curve.dataMarker)
self.plot.replot()
if cplot is None:
curve.detach()
@pyqtSignature('bool')
def on_actionDrawLegend_triggered(self, enable):
""" Signal handler called to toggle the plot legend visibility.
@param enable if True, legend is enabled
@return False
"""
legend = self.plot.legend()
legend.setVisible(enable)
if enable:
items = self.checkedItems()
if items:
for item in items:
item.curve.updateLegend(legend, True)
else:
self.actionDrawLegend.setChecked(False)
else:
legend.clear()
self.saveLegend()
@pyqtSignature('bool')
def on_actionDrawMajorX_triggered(self, enable):
""" Signal handler called to toggle visiblity of major grid x axis.
@param enable if True, grid axis is enabled
@return None
"""
self.grid.enableX(enable)
self.plot.replot()
self.saveMajorX()
@pyqtSignature('bool')
def on_actionDrawMajorY_triggered(self, enable):
""" Signal handler called to toggle visiblity of major grid y axis.
@param enable if True, grid axis is enabled
@return None
"""
self.grid.enableY(enable)
self.plot.replot()
self.saveMajorY()
@pyqtSignature('bool')
def on_actionDrawMinorX_triggered(self, enable):
""" Signal handler called to toggle visiblity of minor grid x axis.
@param enable if True, grid axis is enabled
@return None
"""
self.grid.enableXMin(enable)
self.plot.replot()
self.saveMinorX()
@pyqtSignature('bool')
def on_actionDrawMinorY_triggered(self, enable):
""" Signal handler called to toggle visiblity of minor grid y axis.
@param enable if True, grid axis is enabled
@return None
"""
self.grid.enableYMin(enable)
self.plot.replot()
self.saveMinorY()
@pyqtSignature('')
def on_actionChangeMajorGridStyle_triggered(self):
""" Signal handler called to edit the major grid pen.
@return None
"""
pen = changePen(self.grid.majPen, self.grid.setMajPen, self)
if pen:
self.plot.replot()
self.saveMajorX()
self.saveMajorY()
@pyqtSignature('')
def on_actionChangeMinorGridStyle_triggered(self):
""" Signal handler called to edit the minor grid pen.
@return None
"""
pen = changePen(self.grid.minPen, self.grid.setMinPen, self)
if pen:
self.plot.replot()
self.saveMinorX()
self.saveMinorY()
@pyqtSignature('')
def on_actionChangeCanvasColor_triggered(self):
""" Signal handler called to edit the plot canvas background.
@return None
"""
plot = self.plot
color = changeColor(
plot.canvasBackground, plot.setCanvasBackground, self)
if color:
pen = QPen(complementColor(color))
self.zoomer.setRubberBandPen(pen)
self.picker.setTrackerPen(pen)
plot.replot()
self.saveCanvasColor()
@pyqtSignature('')
def on_actionChangeAxesFont_triggered(self):
""" Signal handler called to edit the axes font.
@return None
"""
widget = self.referenceAxisWidget()
default = widget.font()
font, okay = QFontDialog.getFont(default, self, 'Select Axis Font')
if okay:
self.setAxisFont(font)
self.settings.setValue(
'%s/axisfont' % self.plotName(), font)
@pyqtSignature('')
def on_actionChangeAxesColor_triggered(self):
""" Signal handler called to edit the axes color.
@return None
"""
color = changeColor(self.getAxisColor, self.setAxisColor, self)
if color:
self.settings.setValue('%s/axiscolor' % self.plotName(), color)
@pyqtSignature('bool')
def on_actionShowDataDialog_triggered(self, enable):
""" Signal handler called to show or hide the data dialog.
@return None
"""
if enable:
dlg = self.dataDialog = PlotDataDialog(self)
try:
tabs = self.window().centralTabs
except (AttributeError, ):
pass
else:
name = tabs.tabText(tabs.currentIndex())
dlg.setWindowTitle(str(dlg.windowTitle()) % name)
dlg.setWindowIcon(tabs.tabIcon(tabs.currentIndex()))
self.connect(
dlg, Signals.dialogFinished, self.on_dataDialog_finished)
self.connect(
dlg, Signals.highlightSelections, self.on_dataDialog_selected)
dlg.show()
elif self.dataDialog:
self.dataDialog.close()
self.dataDialog = None
self.settings.setValue('%s/datadialog' % self.plotName(), enable)
## controls tree signal handlers
def on_controlsTree_doubleClicked(self, index):
""" Signal handler for control tree double click.
@param index QModelIndex instance
@return None
"""
tree = self.controlsTree
if index.isValid():
pos = tree.visualRect(index).center()
actions = tree.actions()
for action in actions:
action.setData(QVariant(pos))
self.actionChangeCurveStyle.trigger()
def on_controlsTree_itemChanged(self, item):
""" Signal handler for all changes to control tree items.
@param item changed tree widget item
@return None
"""
try:
curve = item.curve
except (AttributeError, ):
pass
else:
self.enableCurve(item, enable=item.checkState()==Qt.Checked)
self.updateAxis()
self.saveSelections()
def on_controlsTree_customContextMenuRequested(self, pos):
""" Signal handler for context menu request over control tree.
@param pos QPoint of mouse click
@return None
"""
tree = self.controlsTree
index = tree.indexAt(pos)
if index.isValid():
item = self.controlsTreeModel.itemFromIndex(index)
if not hasattr(item, 'curve'):
return
if item.curve.yAxis() == yRight:
self.actionChangeCurveAxisY.setText('Move to Left Axis')
else:
self.actionChangeCurveAxisY.setText('Move to Right Axis')
if item.curve.xAxis() == xTop:
self.actionChangeCurveAxisX.setText('Move to Bottom Axis')
else:
self.actionChangeCurveAxisX.setText('Move to Top Axis')
actions = tree.actions()
for action in actions:
action.setData(QVariant(pos))
QMenu.exec_(actions, tree.mapToGlobal(pos))
def on_dataDialog_finished(self, result):
""" Signal handler for data dialog finish.
Sets and saves state of data dialog display after its closed.
@param result ignored
@return None
"""
self.actionShowDataDialog.setChecked(False)
self.dataDialog = None
self.on_dataDialog_selected([])
self.settings.setValue('%s/datadialog' % self.plotName(), False)
def on_dataDialog_selected(self, items):
""" Signal handler for data dialog selection changes.
@params items list of (index, item) two-tuples
@return None
"""
for marker in self.highlightMarkers:
marker.detach()
self.highlightMarkers = markers = []
for index, item in items:
try:
x, y = index.row(), item.data[index.row()]
except (IndexError, ):
continue
if x is None or y is None:
continue
curve = item.curve
marker = curve.dataMarker.cloneFromValue(curve, x, y)
markers.append(marker)
marker.attach(self.plot)
self.plot.replot()
def on_plotSplitter_splitterMoved(self, pos, index):
""" Signal handler for splitter move; saves state to user settings.
@param pos ignored
@param index ignored
@return None
"""
settings = self.settings
statekey = '%s/%s' % (self.plotName(), settings.keys.splitstate)
settings.setValue(statekey, self.plotSplitter.saveState())
def syncPlot(self, sync=None):
print '## sync?', sync
session = self.session
(session.registerMeta if sync else session.deregisterMeta)(self)
| gpl-2.0 |
scorpilix/Golemtest | tests/golem/docker/test_docker_task_thread.py | 2 | 2300 | import time
from threading import Thread
from mock import Mock
from golem.clientconfigdescriptor import ClientConfigDescriptor
from golem.docker.image import DockerImage
from golem.docker.task_thread import DockerTaskThread
from golem.task.taskcomputer import TaskComputer
from golem.tools.ci import ci_skip
from test_docker_job import TestDockerJob
@ci_skip
class TestDockerTaskThread(TestDockerJob):
def test_termination(self):
script = "import time\ntime.sleep(20)"
task_server = Mock()
task_server.config_desc = ClientConfigDescriptor()
task_server.config_desc.estimated_blender_performance = 2000.0
task_server.config_desc.estimated_lux_performance = 2000.0
task_server.client.datadir = self.test_dir
task_server.client.get_node_name.return_value = "test_node"
task_server.get_task_computer_root.return_value = task_server.client.datadir
task_computer = TaskComputer("node", task_server, use_docker_machine_manager=False)
image = DockerImage("golemfactory/base", tag="1.2")
with self.assertRaises(AttributeError):
DockerTaskThread(task_computer, "subtask_id", None,
self.work_dir, script, None, "test task thread",
self.resources_dir, self.output_dir, timeout=30)
def test():
tt = DockerTaskThread(task_computer, "subtask_id", [image],
self.work_dir, script, None, "test task thread",
self.resources_dir, self.output_dir, timeout=30)
task_computer.current_computations.append(tt)
task_computer.counting_task = True
tt.setDaemon(True)
tt.start()
time.sleep(1)
started = time.time()
parent_thread = Thread(target=test)
parent_thread.start()
time.sleep(1)
ct = task_computer.current_computations[0]
while ct and ct.is_alive():
task_computer.run()
if time.time() - started > 15:
self.fail("Job timed out")
elif task_computer.current_computations:
ct = task_computer.current_computations[0]
else:
ct = None
time.sleep(1)
| gpl-3.0 |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/difflib.py | 55 | 84404 | #! /usr/bin/env python
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import heapq
from collections import namedtuple as _namedtuple
from functools import reduce
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print round(s.ratio(), 3)
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print "a[%d] and b[%d] match for %d elements" % block
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print "%6s a[%d:%d] b[%d:%d]" % opcode
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# isbjunk
# for x in b, isbjunk(x) == isjunk(x) but much faster;
# it's really the __contains__ method of a hidden dict.
# DOES NOT WORK for x in a!
# isbpopular
# for x in b, isbpopular(x) is true iff b is reasonably long
# (at least 200 elements) and x accounts for more than 1 + 1% of
# its elements (when autojunk is enabled).
# DOES NOT WORK for x in a!
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
junk = set()
isjunk = self.isjunk
if isjunk:
for elt in list(b2j.keys()): # using list() since b2j is modified
if isjunk(elt):
junk.add(elt)
del b2j[elt]
# Purge popular elements that are not junk
popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in list(b2j.items()):
if len(idxs) > ntest:
popular.add(elt)
del b2j[elt]
# Now for x in b, isjunk(x) == x in junk, but the latter is much faster.
# Sicne the number of *unique* junk elements is probably small, the
# memory burden of keeping this set alive is likely trivial compared to
# the size of b2j.
self.isbjunk = junk.__contains__
self.isbpopular = popular.__contains__
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in xrange(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = non_adjacent
return map(Match._make, self.matching_blocks)
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with upto n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = reduce(lambda sum, triple: sum + triple[-1],
self.get_matching_blocks(), 0)
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(1)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(1)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print ''.join(result),
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError, 'unknown tag %r' % (tag,)
for line in g:
yield line
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in xrange(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
for line in g:
yield line
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print ''.join(results),
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in xrange(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in xrange(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError, 'unknown tag %r' % (tag,)
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
for line in g:
yield line
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print repr(line)
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print line # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in ('replace', 'delete'):
for line in a[i1:i2]:
yield '-' + line
if tag in ('replace', 'insert'):
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print ''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
... 'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current')),
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in ('replace', 'delete') for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in ('replace', 'insert') for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print ''.join(diff),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an interator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(diff_lines_iterator.next())
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see a intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff =line_iterator.next()
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield line_pair_iterator.next()
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = line_pair_iterator.next()
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = line_pair_iterator.next()
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markkup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print ''.join(restore(diff, 1)),
one
two
three
>>> print ''.join(restore(diff, 2)),
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
| bsd-2-clause |
kif/freesas | freesas/app/supycomb.py | 1 | 3859 | #!/usr/bin/python3
__author__ = "Guillaume Bonamis"
__license__ = "MIT"
__copyright__ = "2015, ESRF"
__date__ = "09/07/2020"
import logging
from os.path import dirname, abspath
from freesas.align import InputModels, AlignModels
from freesas.sas_argparser import SASParser
base = dirname(dirname(abspath(__file__)))
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("supycomb")
def parse():
"""Parse input and return list of files.
:return: list of args
"""
description = "Align several models and calculate NSD"
epilog = """supycomb is an open-source implementation of
[J. Appl. Cryst. (2001). 34, 33-41](doi:10.1107/S0021889800014126).
The main difference with supcomb: the fast mode does not re-bin beads. It only refines the best matching orientation which provides a speed-up of a factor 8.
"""
parser = SASParser(prog="supycomp", description=description, epilog=epilog)
parser.add_file_argument(help_text="pdb files to align")
parser.add_argument(
"-m",
"--mode",
dest="mode",
type=str,
choices=["SLOW", "FAST"],
default="SLOW",
help="Either SLOW or FAST, default: %(default)s)",
)
parser.add_argument(
"-e",
"--enantiomorphs",
type=str,
choices=["YES", "NO"],
default="YES",
help="Search enantiomorphs, YES or NO, default: %(default)s)",
)
parser.add_argument(
"-q",
"--quiet",
type=str,
choices=["ON", "OFF"],
default="ON",
help="Hide log or not, default: %(default)s",
)
parser.add_argument(
"-g",
"--gui",
type=str,
choices=["YES", "NO"],
default="YES",
help="Use GUI for figures or not, default: %(default)s",
)
parser.add_argument(
"-o",
"--output",
type=str,
default="aligned.pdb",
help="output filename, default: %(default)s",
)
return parser.parse_args()
def main():
"""main application"""
args = parse()
input_len = len(args.file)
logger.info("%s input files" % input_len)
selection = InputModels()
if args.mode == "SLOW":
slow = True
logger.info("SLOW mode")
else:
slow = False
logger.info("FAST mode")
if args.enantiomorphs == "YES":
enantiomorphs = True
else:
enantiomorphs = False
logger.info("NO enantiomorphs")
if args.quiet == "OFF":
logger.setLevel(logging.DEBUG)
logger.info("setLevel: Debug")
if args.gui == "NO":
save = True
logger.info(
"Figures saved automatically : \n R factor values and selection => Rfactor.png \n NSD table and selection => nsd.png"
)
else:
save = False
align = AlignModels(args.file, slow=slow, enantiomorphs=enantiomorphs)
if input_len == 2:
align.outputfiles = args.output
align.assign_models()
dist = align.alignment_2models()
logger.info("%s and %s aligned" % (args.file[0], args.file[1]))
logger.info("NSD after optimized alignment = %.2f" % dist)
else:
align.outputfiles = [
"model-%02i.pdb" % (i + 1) for i in range(input_len)
]
selection.inputfiles = args.file
selection.models_selection()
selection.rfactorplot(save=save)
align.models = selection.sasmodels
align.validmodels = selection.validmodels
align.makeNSDarray()
align.alignment_reference()
logger.info(
"valid models aligned on the model %s" % (align.reference + 1)
)
align.plotNSDarray(rmax=round(selection.rmax, 4), save=save)
if not save and input_len > 2:
input("Press any key to exit")
if __name__ == "__main__":
main()
| mit |
hmen89/odoo | addons/l10n_fr/report/compute_resultant_report.py | 374 | 4004 | # -*- coding: utf-8 -*-
#
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import base_report
from openerp.osv import osv
class cdr(base_report.base_report):
def __init__(self, cr, uid, name, context):
super(cdr, self).__init__(cr, uid, name, context)
def set_context(self, objects, data, ids):
super(cdr, self).set_context(objects, data, ids)
self._load('cdr', self.localcontext['data']['form'])
self._set_variable(
'ct1',
self.localcontext['cdrc1']+self.localcontext['cdrc2']+self.localcontext['cdrc3']+
self.localcontext['cdrc4']+self.localcontext['cdrc5']+self.localcontext['cdrc6']+
self.localcontext['cdrc7']+self.localcontext['cdrc8']+self.localcontext['cdrc9']+
self.localcontext['cdrc10']+self.localcontext['cdrc11']+self.localcontext['cdrc12']+
self.localcontext['cdrc13']+self.localcontext['cdrc14']+self.localcontext['cdrc15']
)
self._set_variable(
'ct3',
self.localcontext['cdrc17']+self.localcontext['cdrc18']+self.localcontext['cdrc19']+
self.localcontext['cdrc20']
)
self._set_variable(
'ct4',
self.localcontext['cdrc21']+self.localcontext['cdrc22']+self.localcontext['cdrc23']
)
self._set_variable(
'charges',
self.localcontext['ct1']+self.localcontext['cdrc16']+self.localcontext['ct3']+
self.localcontext['ct4']+self.localcontext['cdrc24']+self.localcontext['cdrc25']
)
self._set_variable(
'pta',
self.localcontext['cdrp1']+self.localcontext['cdrp2']
)
self._set_variable(
'ptb',
self.localcontext['cdrp3']+self.localcontext['cdrp4']+self.localcontext['cdrp5']+
self.localcontext['cdrp6']+self.localcontext['cdrp7']
)
self._set_variable(
'pt1',
self.localcontext['pta']+self.localcontext['ptb']
)
self._set_variable(
'pt3',
self.localcontext['cdrp9']+self.localcontext['cdrp10']+self.localcontext['cdrp11']+
self.localcontext['cdrp12']+self.localcontext['cdrp13']+self.localcontext['cdrp14']
)
self._set_variable(
'pt4',
self.localcontext['cdrp15']+self.localcontext['cdrp16']+self.localcontext['cdrp17']
)
self._set_variable(
'produits',
self.localcontext['pt1']+self.localcontext['cdrp8']+self.localcontext['pt3']+
self.localcontext['pt4']
)
class wrapped_report_resultat(osv.AbstractModel):
_name = 'report.l10n_fr.report_l10nfrresultat'
_inherit = 'report.abstract_report'
_template = 'l10n_fr.report_l10nfrresultat'
_wrapped_report_class = cdr
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
liu602348184/django | tests/template_backends/test_django.py | 199 | 4793 | from template_tests.test_response import test_processor_name
from django.template import RequestContext
from django.template.backends.django import DjangoTemplates
from django.template.library import InvalidTemplateLibrary
from django.test import RequestFactory, ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango110Warning
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
def test_context_has_priority_over_template_context_processors(self):
# See ticket #23789.
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'context_processors': [test_processor_name],
},
})
template = engine.from_string('{{ processors }}')
request = RequestFactory().get('/')
# Check that context processors run
content = template.render({}, request)
self.assertEqual(content, 'yes')
# Check that context overrides context processors
content = template.render({'processors': 'no'}, request)
self.assertEqual(content, 'no')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_request_context_conflicts_with_request(self):
template = self.engine.from_string('hello')
request = RequestFactory().get('/')
request_context = RequestContext(request)
# This doesn't raise an exception.
template.render(request_context, request)
other_request = RequestFactory().get('/')
msg = ("render() was called with a RequestContext and a request "
"argument which refer to different requests. Make sure "
"that the context argument is a dict or at least that "
"the two arguments refer to the same request.")
with self.assertRaisesMessage(ValueError, msg):
template.render(request_context, other_request)
@override_settings(INSTALLED_APPS=['template_backends.apps.good'])
def test_templatetag_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'libraries': {
'alternate': 'template_backends.apps.good.templatetags.good_tags',
'override': 'template_backends.apps.good.templatetags.good_tags',
},
},
})
# libraries are discovered from installed applications
self.assertEqual(
engine.engine.libraries['good_tags'],
'template_backends.apps.good.templatetags.good_tags',
)
self.assertEqual(
engine.engine.libraries['subpackage.tags'],
'template_backends.apps.good.templatetags.subpackage.tags',
)
# libraries are discovered from django.templatetags
self.assertEqual(
engine.engine.libraries['static'],
'django.templatetags.static',
)
# libraries passed in OPTIONS are registered
self.assertEqual(
engine.engine.libraries['alternate'],
'template_backends.apps.good.templatetags.good_tags',
)
# libraries passed in OPTIONS take precedence over discovered ones
self.assertEqual(
engine.engine.libraries['override'],
'template_backends.apps.good.templatetags.good_tags',
)
@override_settings(INSTALLED_APPS=['template_backends.apps.importerror'])
def test_templatetag_discovery_import_error(self):
"""
Import errors in tag modules should be reraised with a helpful message.
"""
with self.assertRaisesMessage(
InvalidTemplateLibrary,
"ImportError raised when trying to load "
"'template_backends.apps.importerror.templatetags.broken_tags'"
):
DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {},
})
def test_builtins_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'builtins': ['template_backends.apps.good.templatetags.good_tags'],
},
})
self.assertEqual(
engine.engine.builtins, [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
'template_backends.apps.good.templatetags.good_tags',
]
)
| bsd-3-clause |
HailStorm32/Q.bo_stacks | qbo_webi/build/catkin_generated/installspace/_setup_util.py | 45 | 11808 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': 'lib',
'PATH': 'bin',
'PKG_CONFIG_PATH': 'lib/pkgconfig',
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolder = env_var_subfolders[key]
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolder):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if subfolder:
path = os.path.join(path, subfolder)
# exclude any path already in env and any path we already added
if path not in environ_paths and path not in checked_paths:
checked_paths.append(path)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/opt/ros/hydro'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potantial "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| lgpl-2.1 |
ypu/tp-qemu | qemu/tests/qemu_nobody.py | 3 | 2970 | import logging
import re
from autotest.client.shared import utils, error
from virttest import env_process
@error.context_aware
def run(test, params, env):
"""
Check smbios table :
1) Run the qemu command as nobody
2) check the process is same as the user's
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def get_user_ugid(username):
"""
return user uid and gid as a list
"""
user_uid = utils.system_output("id -u %s" % username).split()
user_gid = utils.system_output("id -g %s" % username).split()
return(user_uid, user_gid)
def get_ugid_from_processid(pid):
"""
return a list[uid,euid,suid,fsuid,gid,egid,sgid,fsgid] of pid
"""
grep_ugid_cmd = "cat /proc/%s/status | grep -iE '^(U|G)id'"
o = utils.system_output(grep_ugid_cmd % pid.strip())
ugid = re.findall("(\d+)", o)
# real UID, effective UID, saved set UID, and file system UID
if ugid:
return ugid
else:
raise error.TestError("Could not find the correct UID for process %s"
% pid)
exec_username = params.get("user_runas", "nobody")
error.base_context("Run QEMU %s test:" % exec_username)
error.context("Get the user uid and gid,using 'id -u/g username'")
(exec_uid, exec_gid) = get_user_ugid(exec_username)
error.context("Run the qemu as user '%s'" % exec_username)
logging.info("The user %s :uid='%s', gid='%s'" %
(exec_username, exec_uid, exec_gid))
params["extra_params"] = " -runas %s" % exec_username
params["start_vm"] = "yes"
env_process.preprocess_vm(test, params, env, params.get("main_vm"))
vm = env.get_vm(params["main_vm"])
failures = []
for pid in utils.get_children_pids(vm.get_shell_pid()):
error.context("Get the process '%s' u/gid, using 'cat /proc/%s/status'"
% (pid, pid), logging.info)
qemu_ugid = get_ugid_from_processid(pid)
logging.info("Process run as uid=%s,euid=%s,suid=%s,fsuid=%s"
% tuple(qemu_ugid[0:4]))
logging.info("Process run as gid=%s,egid=%s,sgid=%s,fsgid=%s"
% tuple(qemu_ugid[4:]))
error.context("Check if the user %s ugid is equal to the process %s"
% (exec_username, pid))
# generate user uid, euid, suid, fsuid, gid, egid, sgid, fsgid
user_ugid_extend = exec_uid * 4 + exec_gid * 4
if cmp(user_ugid_extend, qemu_ugid) != 0:
e_msg = ("Process %s error, expect ugid is %s, real is %s"
% (pid, user_ugid_extend, qemu_ugid))
failures.append(e_msg)
if failures:
raise error.TestFail("FAIL: Test reported %s failures:\n%s" %
(len(failures), "\n".join(failures)))
| gpl-2.0 |
Azulinho/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_tools_wait.py | 44 | 6214 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Philippe Dellaert <philippe@dellaert.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_guest_tools_wait
short_description: Wait for VMware tools to become available
description:
- Wait for VMware tools to become available on the VM and return facts.
version_added: '2.4'
author:
- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
name:
description:
- Name of the VM for which to wait until the tools become available.
- This is required if uuid is not supplied.
name_match:
description:
- If multiple VMs match the name, use the first or last found.
default: 'first'
choices: ['first', 'last']
folder:
description:
- Destination folder, absolute or relative path to find an existing guest.
- This is required if C(name) is supplied.
- The folder should include the datacenter. ESX's datacenter is C(ha-datacenter).
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
default: /vm
uuid:
description:
- UUID of the VM for which to wait until the tools become available, if known. This is VMware's unique identifier.
- This is required if C(name) is not supplied.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Wait for VMware tools to become available by UUID
vmware_guest_tools_wait:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
validate_certs: no
uuid: 421e4592-c069-924d-ce20-7e7533fab926
delegate_to: localhost
register: facts
- name: Wait for VMware tools to become available by name
vmware_guest_tools_wait:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
validate_certs: no
name: test-vm
folder: /datacenter1/vm
delegate_to: localhost
register: facts
'''
RETURN = """
instance:
description: metadata about the virtual machine
returned: always
type: dict
sample: None
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import connect_to_api, gather_vm_facts, vmware_argument_spec, find_vm_by_id
HAS_PYVMOMI = False
try:
import pyVmomi
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
pass
class PyVmomiHelper(object):
def __init__(self, module):
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi module required')
self.module = module
self.params = module.params
self.content = connect_to_api(self.module)
def getvm(self, name=None, uuid=None, folder=None):
vm = None
match_first = False
if uuid:
vm = find_vm_by_id(self.content, vm_id=uuid, vm_id_type="uuid")
elif folder and name:
if self.params['name_match'] == 'first':
match_first = True
vm = find_vm_by_id(self.content, vm_id=name, vm_id_type="inventory_path", folder=folder, match_first=match_first)
return vm
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def wait_for_tools(self, vm, poll=100, sleep=5):
tools_running = False
vm_facts = {}
poll_num = 0
vm_uuid = vm.config.uuid
while not tools_running and poll_num <= poll:
newvm = self.getvm(uuid=vm_uuid)
vm_facts = self.gather_facts(newvm)
if vm_facts['guest_tools_status'] == 'guestToolsRunning':
tools_running = True
else:
time.sleep(sleep)
poll_num += 1
if not tools_running:
return {'failed': True, 'msg': 'VMware tools either not present or not running after {0} seconds'.format((poll * sleep))}
changed = False
if poll_num > 0:
changed = True
return {'changed': changed, 'failed': False, 'instance': vm_facts}
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
name_match=dict(type='str', default='first'),
folder=dict(type='str', default='/vm'),
uuid=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[['name', 'uuid']],
required_together=['name', 'folder']
)
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.getvm(name=module.params['name'],
folder=module.params['folder'],
uuid=module.params['uuid'])
if not vm:
vm_id = module.params.get('name') or module.params.get('uuid')
module.fail_json(msg="Unable to wait for tools for non-existing VM {0:s}".format(vm_id))
try:
result = pyv.wait_for_tools(vm)
except Exception as e:
module.fail_json(msg="Waiting for tools failed with exception: {0:s}".format(to_native(e)))
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
yoava333/servo | tests/wpt/web-platform-tests/tools/serve/serve.py | 215 | 18394 | # -*- coding: utf-8 -*-
import argparse
import json
import os
import signal
import socket
import sys
import threading
import time
import traceback
import urllib2
import uuid
from collections import defaultdict, OrderedDict
from multiprocessing import Process, Event
from .. import localpaths
import sslutils
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve.logger import set_logger
from mod_pywebsocket import standalone as pywebsocket
repo_root = localpaths.repo_root
class WorkersHandler(object):
def __init__(self):
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
return self.handler(request, response)
def handle_request(self, request, response):
worker_path = request.url_parts.path.replace(".worker", ".worker.js")
return """<!doctype html>
<meta charset=utf-8>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%s"));
</script>
""" % (worker_path,)
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
subdomains = [u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/serve.py", handlers.ErrorHandler(404))]
self.static = [("GET", "*.worker", WorkersHandler())]
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.static
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_static(self, path, format_args, content_type, route):
handler = handlers.StaticHandler(path, format_args, content_type)
self.static.append((b"GET", str(route), handler))
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [("GET", "*.asis", handlers.AsIsHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
b"%s%s" % (str(url_base) if url_base != "/" else "", str(suffix)),
handler_cls(base_path=path, url_base=url_base)))
def default_routes():
return RoutesBuilder().get_routes()
def setup_logger(level):
import logging
global logger
logger = logging.getLogger("web-platform-tests")
logging.basicConfig(level=getattr(logging, level.upper()))
set_logger(logger)
def open_socket(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port != 0:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', port))
sock.listen(5)
return sock
def get_port():
free_socket = open_socket(0)
port = free_socket.getsockname()[1]
logger.debug("Going to use port %s" % port)
free_socket.close()
return port
class ServerProc(object):
def __init__(self):
self.proc = None
self.daemon = None
self.stop = Event()
def start(self, init_func, host, port, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs):
self.proc = Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config))
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config, **kwargs):
try:
self.daemon = init_func(host, port, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs)
except socket.error:
print >> sys.stderr, "Socket error on port %s" % port
raise
except:
print >> sys.stderr, traceback.format_exc()
raise
if self.daemon:
try:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
except:
print >> sys.stderr, traceback.format_exc()
raise
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(host, paths, bind_hostname, ssl_config):
port = get_port()
subdomains = get_subdomains(host)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, default_routes(), bind_hostname,
None, ssl_config)
connected = False
for i in range(10):
try:
urllib2.urlopen("http://%s:%d/" % (host, port))
connected = True
break
except urllib2.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server on http://%s:%s You may need to edit /etc/hosts or similar" % (host, port))
sys.exit(1)
for subdomain, (punycode, host) in subdomains.iteritems():
domain = "%s.%s" % (punycode, host)
try:
urllib2.urlopen("http://%s:%d/" % (domain, port))
except Exception as e:
logger.critical("Failed probing domain %s. You may need to edit /etc/hosts or similar." % domain)
sys.exit(1)
wrapper.wait()
def get_subdomains(host):
#This assumes that the tld is ascii-only or already in punycode
return {subdomain: (subdomain.encode("idna"), host)
for subdomain in subdomains}
def start_servers(host, ports, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
servers = defaultdict(list)
for scheme, ports in ports.iteritems():
assert len(ports) == {"http":2}.get(scheme, 1)
for port in ports:
if port is None:
continue
init_func = {"http":start_http_server,
"https":start_https_server,
"ws":start_ws_server,
"wss":start_wss_server}[scheme]
server_proc = ServerProc()
server_proc.start(init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def start_http_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=external_config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
def start_https_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=external_config,
use_ssl=True,
key_file=ssl_config["key_path"],
certificate=ssl_config["cert_path"],
encrypt_after_connect=ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, log_level, bind_hostname,
ssl_config):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root,
"--log-level", log_level]
if ssl_config is not None:
# This is usually done through pywebsocket.main, however we're
# working around that to get the server instance and manually
# setup the wss server.
if pywebsocket._import_ssl():
tls_module = pywebsocket._TLS_BY_STANDARD_MODULE
elif pywebsocket._import_pyopenssl():
tls_module = pywebsocket._TLS_BY_PYOPENSSL
else:
print "No SSL module available"
sys.exit(1)
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"],
"--tls-module", tls_module]
if (bind_hostname):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname,
ssl_config = None)
def start_wss_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname,
ssl_config)
def get_ports(config, ssl_environment):
rv = defaultdict(list)
for scheme, ports in config["ports"].iteritems():
for i, port in enumerate(ports):
if scheme in ["wss", "https"] and not ssl_environment.ssl_enabled:
port = None
if port == "auto":
port = get_port()
else:
port = port
rv[scheme].append(port)
return rv
def normalise_config(config, ports):
host = config["external_host"] if config["external_host"] else config["host"]
domains = get_subdomains(host)
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
for key, value in domains.iteritems():
domains[key] = ".".join(value)
domains[""] = host
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
return {"host": host,
"domains": domains,
"ports": ports_}
def get_ssl_config(config, external_domains, ssl_environment):
key_path, cert_path = ssl_environment.host_cert_path(external_domains)
return {"key_path": key_path,
"cert_path": cert_path,
"encrypt_after_connect": config["ssl"]["encrypt_after_connect"]}
def start(config, ssl_environment, routes, **kwargs):
host = config["host"]
domains = get_subdomains(host)
ports = get_ports(config, ssl_environment)
bind_hostname = config["bind_hostname"]
paths = {"doc_root": config["doc_root"],
"ws_doc_root": config["ws_doc_root"]}
external_config = normalise_config(config, ports)
ssl_config = get_ssl_config(config, external_config["domains"].values(), ssl_environment)
if config["check_subdomains"]:
check_subdomains(host, paths, bind_hostname, ssl_config)
servers = start_servers(host, ports, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs)
return external_config, servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def value_set(config, key):
return key in config and config[key] is not None
def get_value_or_default(config, key, default=None):
return config[key] if value_set(config, key) else default
def set_computed_defaults(config):
if not value_set(config, "doc_root"):
config["doc_root"] = repo_root
if not value_set(config, "ws_doc_root"):
root = get_value_or_default(config, "doc_root", default=repo_root)
config["ws_doc_root"] = os.path.join(root, "websockets", "handlers")
def merge_json(base_obj, override_obj):
rv = {}
for key, value in base_obj.iteritems():
if key not in override_obj:
rv[key] = value
else:
if isinstance(value, dict):
rv[key] = merge_json(value, override_obj[key])
else:
rv[key] = override_obj[key]
return rv
def get_ssl_environment(config):
implementation_type = config["ssl"]["type"]
cls = sslutils.environments[implementation_type]
try:
kwargs = config["ssl"][implementation_type].copy()
except KeyError:
raise ValueError("%s is not a vaid ssl type." % implementation_type)
return cls(logger, **kwargs)
def load_config(default_path, override_path=None, **kwargs):
if os.path.exists(default_path):
with open(default_path) as f:
base_obj = json.load(f)
else:
raise ValueError("Config path %s does not exist" % default_path)
if os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
else:
override_obj = {}
rv = merge_json(base_obj, override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
base_obj = rv
with open(other_path) as f:
override_obj = json.load(f)
rv = merge_json(base_obj, override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
rv[key] = value
set_computed_defaults(rv)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
return parser
def main():
kwargs = vars(get_parser().parse_args())
config = load_config("config.default.json",
"config.json",
**kwargs)
setup_logger(config["log_level"])
with stash.StashServer((config["host"], get_port()), authkey=str(uuid.uuid4())):
with get_ssl_environment(config) as ssl_env:
config_, servers = start(config, ssl_env, default_routes(), **kwargs)
try:
while any(item.is_alive() for item in iter_procs(servers)):
for item in iter_procs(servers):
item.join(1)
except KeyboardInterrupt:
logger.info("Shutting down")
| mpl-2.0 |
flumotion-mirror/flumotion | flumotion/admin/connections.py | 3 | 9761 | # -*- Mode: Python; fill-column: 80 -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
"""recent connections"""
import datetime
import fnmatch
import os
from xml.dom import minidom, Node
from flumotion.common import log, common, xdg
from flumotion.common.connection import PBConnectionInfo, parsePBConnectionInfo
from flumotion.common.errors import OptionError
from flumotion.configure import configure
from flumotion.twisted.pb import Authenticator
__version__ = "$Rev$"
class ConnectionInfo(object):
"""
I wrap the information contained in a single connection file entry.
I can be used to construct L{PBConnectionInfo} object, but because some of
my variables can be shell globs, they are all strings.
"""
def __init__(self, host, port, use_insecure, user, passwd, manager):
self.host = host
self.port = port
self.use_insecure = use_insecure
self.user = user
self.passwd = passwd
self.manager = manager
def asPBConnectionInfo(self):
"""
Return a L{PBConnectionInfo} object constructed from my state. If my
state contains shell globs, I might throw a ValueError.
"""
if ('*' in self.host) or (self.use_insecure not in ('0', '1')):
raise ValueError("Shell glob in connection info")
return PBConnectionInfo(self.host, int(self.port),
self.use_insecure == '0',
Authenticator(username=self.user,
password=self.passwd))
def __str__(self):
return '%s@%s:%s' % (self.user, self.host, self.port)
class RecentConnection(object):
"""
I am an object representing a recent connection.
You can access some of my state and update the timestamp
(eg, when I was last connected to) by calling L{updateTimestamp}.
@ivar name: name of the recent connection usually host:port
@type name: string
@ivar host: hostname
@type host: string
@ivar filename: filename of the connection
@type filename: string
@ivar info: connection info
@type info: L{PBConnectionInfo}
@ivar timestamp: timestamp
@type timestamp: datetime.datetime
"""
def __init__(self, host, filename, info):
self.name = str(info)
self.host = host
self.filename = filename
self.info = info.asPBConnectionInfo()
self.manager = info.manager
self.timestamp = datetime.datetime.fromtimestamp(
os.stat(filename).st_ctime)
def updateTimestamp(self):
os.utime(self.filename, None)
def asConnectionInfo(self):
"""
Return a L{ConnectionInfo} object constructed from my state.
"""
info = self.info
return ConnectionInfo(info.host, str(info.port),
info.use_ssl and '0' or '1',
info.authenticator.username,
info.authenticator.password, '')
def _getRecentFilenames():
# DSU, or as perl folks call it, a Schwartz Transform
common.ensureDir(configure.registrydir, "registry dir")
for filename in os.listdir(configure.registrydir):
filename = os.path.join(configure.registrydir, filename)
if filename.endswith('.connection'):
yield filename
def hasRecentConnections():
"""
Returns if we have at least one recent connection
@returns: if we have a recent connection
@rtype: bool
"""
gen = _getRecentFilenames()
try:
gen.next()
except StopIteration:
return False
return True
def _parseConnection(element):
state = {}
for childNode in element.childNodes:
if (childNode.nodeType != Node.TEXT_NODE and
childNode.nodeType != Node.COMMENT_NODE):
state[childNode.nodeName] = childNode.childNodes[0].wholeText
return ConnectionInfo(state['host'], state['port'], state['use_insecure'],
state['user'], state['passwd'], state['manager'])
def _parseSingleConnectionFile(filename):
tree = minidom.parse(filename)
return _parseConnection(tree.documentElement)
def _parseMultipleConnectionsFile(filename):
tree = minidom.parse(filename)
return map(_parseConnection, tree.getElementsByTagName('connection'))
def getRecentConnections():
"""
Fetches a list of recently used connections
@returns: recently used connections
@rtype: list of L{RecentConnection}
"""
recentFilenames = _getRecentFilenames()
recentConnections = []
for filename in sorted(recentFilenames, reverse=True):
try:
state = _parseSingleConnectionFile(filename)
recentConnections.append(
RecentConnection(str(state),
filename=filename,
info=state))
except Exception, e:
log.warning('connections', 'Error parsing %s: %r', filename, e)
return recentConnections
def getDefaultConnections():
"""
Fetches a list of default connections.
@returns: default connections
@rtype: list of L{ConnectionInfo}
"""
filename = xdg.config_read_path('connections')
if not filename:
return []
try:
return _parseMultipleConnectionsFile(filename)
except Exception, e:
log.warning('connections', 'Error parsing %s: %r', filename, e)
return []
def updateFromConnectionList(info, connections, match_glob=False):
"""
Updates the info object with the username and password taken from the list
of connections.
@param info: connection info
@type info: L{PBConnectionInfo}
@param connections: recent or default connections
@type: a list of L{ConnectionInfo}
@param match_glob: if values of host, port, etc. to be matched between
info and the recent or default connections should be
treated as shell globs
@type: boolean
@returns: None
"""
def match(v1, v2):
if match_glob:
# v2 is the candidate, which might be a shell glob
return fnmatch.fnmatch(v1, v2)
else:
return v1 == v2
def compatible(info, c_info):
if not match(info.host, c_info.host):
return False
port = str(info.port)
if not match(port, c_info.port):
return False
use_insecure = info.use_ssl and '0' or '1'
if not match(use_insecure, c_info.use_insecure):
return False
auth = info.authenticator
if auth.username and not match(auth.username, c_info.user):
return False
# doesn't make sense to match the password, if everything before that
# matched, we won't fill in anything
return True
for candidate in connections:
if compatible(info, candidate):
# it's compatible, fill in the variables
if not info.authenticator.username:
info.authenticator.username = candidate.user
if not info.authenticator.password:
info.authenticator.password = candidate.passwd
break
return info
def parsePBConnectionInfoRecent(managerString, use_ssl=True,
defaultPort=configure.defaultSSLManagerPort):
"""The same as L{flumotion.common.connection.parsePBConnectionInfo},
but fills in missing information from the recent connections cache or
from the default user and password definitions file if possible.
@param managerString: manager string we should connect to
@type managerString: string
@param use_ssl: True if we should use ssl
@type use_ssl: bool
@param defaultPort: default port to use
@type defaultPort: int
@returns: connection info
@rtype: a L{PBConnectionInfo}
"""
recent = getRecentConnections()
if not managerString:
if recent:
return recent[0].info
else:
raise OptionError('No string given and no recent '
'connections to use')
info = parsePBConnectionInfo(managerString, username=None,
password=None,
port=defaultPort,
use_ssl=use_ssl)
if not (info.authenticator.username and info.authenticator.password):
recent_infos = [r.asConnectionInfo() for r in recent]
updateFromConnectionList(info, recent_infos, match_glob=False)
if not (info.authenticator.username and info.authenticator.password):
defaults = getDefaultConnections()
updateFromConnectionList(info, defaults, match_glob=True)
if not (info.authenticator.username and info.authenticator.password):
raise OptionError('You are connecting to %s for the '
'first time; please specify a user and '
'password (e.g. user:test@%s).'
% (managerString, managerString))
else:
return info
| lgpl-2.1 |
richardcs/ansible | test/units/modules/network/f5/test_bigip_log_publisher.py | 21 | 3862 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_log_publisher import ApiParameters
from library.modules.bigip_log_publisher import ModuleParameters
from library.modules.bigip_log_publisher import ModuleManager
from library.modules.bigip_log_publisher import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_log_publisher import ApiParameters
from ansible.modules.network.f5.bigip_log_publisher import ModuleParameters
from ansible.modules.network.f5.bigip_log_publisher import ModuleManager
from ansible.modules.network.f5.bigip_log_publisher import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='my desc',
destinations=[
'dest1',
'dest2'
],
password='password',
server='localhost',
user='admin'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.description == 'my desc'
assert p.destinations == ['/Common/dest1', '/Common/dest2']
def test_api_parameters(self):
args = load_fixture('load_sys_log_config_publisher_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.description == 'my description'
assert p.destinations == [
'/Common/SECURITYLOGSERVERS-LOGGING',
'/Common/local-db',
'/Common/local-syslog',
]
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_policy(self, *args):
set_module_args(dict(
name="foo",
description='foo description',
destinations=[
'dest1',
'dest2'
],
state='present',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'foo description'
assert results['destinations'] == ['/Common/dest1', '/Common/dest2']
| gpl-3.0 |
Ervii/garage-time | pajamas/src/python/twitter/common/java/java_types.py | 16 | 3364 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import struct
class JavaNativeType(object):
class ParseException(Exception): pass
def __init__(self, data):
pass
def __call__(self):
return self._value
def value(self):
return self._value
def get(self):
return self.value()
@staticmethod
def size():
raise Exception("Unimplemented!")
@staticmethod
def parse(data, *type_args):
offset = 0
parsed_types = []
total_size = 0
for t in type_args:
if not issubclass(t, JavaNativeType):
raise JavaNativeType.ParseException("Not a valid JavaNativeType: %s" % t)
total_size += t.size()
if total_size > len(data):
raise JavaNativeType.ParseException("Not enough data to deserialize %s" % repr(type_args))
for t in type_args:
parsed_type = t(data[slice(offset, offset + t.size())]).value()
parsed_types.append(parsed_type)
offset += t.size()
return parsed_types, data[total_size:]
class u1(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack('>B', data[0:1])[0]
@staticmethod
def size():
return 1
class u2(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">H", data[0:2])[0]
@staticmethod
def size():
return 2
class s2(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">h", data[0:2])[0]
@staticmethod
def size():
return 2
class u4(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">L", data[0:4])[0]
@staticmethod
def size():
return 4
class s4(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">l", data[0:4])[0]
@staticmethod
def size():
return 4
class s8(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">q", data[0:8])[0]
@staticmethod
def size():
return 8
class f4(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">f", data[0:4])[0]
@staticmethod
def size():
return 4
class f8(JavaNativeType):
def __init__(self, data):
JavaNativeType.__init__(self, data)
self._value = struct.unpack(">d", data[0:8])[0]
@staticmethod
def size():
return 8
| apache-2.0 |
scorphus/django | django/http/multipartparser.py | 332 | 24331 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| bsd-3-clause |
yohanko88/gem5-DC | src/mem/ExternalSlave.py | 47 | 2722 | # Copyright (c) 2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andrew Bardsley
from m5.params import *
from MemObject import MemObject
class ExternalSlave(MemObject):
type = 'ExternalSlave'
cxx_header = "mem/external_slave.hh"
port = SlavePort("Slave port")
addr_ranges = VectorParam.AddrRange([], 'Addresses served by'
' this port\'s external agent')
port_type = Param.String('stub', 'Registered external port handler'
' to pass this port to in instantiation')
port_data = Param.String('stub', 'A string to pass to the port'
' handler (in a format specific to the handler) to describe how'
' the port should be bound/bindable/discoverable')
| bsd-3-clause |
cysuncn/python | crawler/rent/dataAnalyse/ziroomAnalysis.py | 1 | 13803 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 12:16:17 2017
@author: zhanglu01
"""
import json
import pandas as pd
import matplotlib.pyplot as plot
import ziroomAnalysis.geohash as geohash
def line_json_load(filename):
with open(filename, 'r', encoding='utf-8') as f:
lines = f.readlines()
df = pd.DataFrame()
i = 0
for line in lines:
tmp_df = pd.DataFrame(json.loads(line), index=[i])
tmp_df["price"] = tmp_df["price"].astype("int")
tmp_df["area"] = tmp_df["area"].astype("float")
tmp_df["lng"] = tmp_df["lng"].astype("float")
tmp_df["lat"] = tmp_df["lat"].astype("float")
if tmp_df.iloc[0]["time_unit"] == "每天":
tmp_df.price[i] = tmp_df.price[i]*30
df = df.append(tmp_df)
i += 1
return df
filename = 'F:/PyWorkspace/ziroomAnalysis/0729/ziroomBeijing.json'
df = line_json_load(filename)
df = df.drop_duplicates()
df = df[(df['time_unit']!='每天') & (df['direction']!='南北') & (df['floorLoc']!='') & (df['floorTotal']!='')]
#不同租赁方式的统计量
#df["price_per_m2"] = df["price"]/df["area"]
groups = df.groupby(df["rentType"])
rt_count = groups.size()
rt_mean = groups.mean().rename(columns={'price':'mean_price'})
rt_max = groups.max().rename(columns={'price':'max_price'})
rt_min = groups.min().rename(columns={'price':'min_price'})
rt_median = groups.median().rename(columns={'price':'median_price'})
rentTypeDf = pd.concat([rt_mean["mean_price"],pd.DataFrame(rt_count,columns=["count"]),rt_max["max_price"],rt_min["min_price"],rt_median["median_price"]],axis=1)
#df[df['price']==990]["link"]
############合租分析############
#每100元为区间段统计数量
he_intervals = {100*x:0 for x in range(64)}
for price in df[df['rentType']=='合']['price']:
he_intervals[price//100*100] += 1
plot.bar(he_intervals.keys(), he_intervals.values(), width=100, alpha = .5, color = 'blue')
plot.xlabel(u"月租(元)", fontproperties='SimHei')
plot.ylabel(u"房间数量", fontproperties='SimHei')
plot.show()
#将经纬度转换成字符串编码,将同一个格子里的点合并,目的是减少热力图中的打点数量
geohash_dict = dict()
for house in df[df['rentType']=='合'].iterrows():
geohash_code = geohash.encode(house[1]["lat"], house[1]["lng"], 6)
if geohash_code in geohash_dict.keys():
geohash_dict[geohash_code] += 1
else:
geohash_dict[geohash_code] = 1
#将he_position_str的值替换“房间数量热力图.html”中相应的值
he_position_str = ""
for code in geohash_dict:
he_position_str += '{{"lng": {0}, "lat": {1}, "count": {2}}},\n'.format(geohash.decode_exactly(code)[1],geohash.decode_exactly(code)[0],geohash_dict[code])
#将he_position_price_str的值替换“价格在地图上的分布.html”中相应的值
he_position_price_str = ""
for house in df[df['rentType']=='合'].iterrows():
if house[1]["price"]<2000:
he_position_price_str += '{{"lng": {0}, "lat": {1}, "count": {2}}},\n'.format(house[1]["lng"],house[1]["lat"],5)
elif house[1]["price"]<3000:
he_position_price_str += '{{"lng": {0}, "lat": {1}, "count": {2}}},\n'.format(house[1]["lng"],house[1]["lat"],10)
else:
he_position_price_str += '{{"lng": {0}, "lat": {1}, "count": {2}}},\n'.format(house[1]["lng"],house[1]["lat"],15)
############################地理位置聚类############################
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
#用来评估簇的个数是否合适,每个簇的样本点到这个簇的中心点的距离之和越小说明簇分的越好,选取临界点的簇个数
__clfInertia__ = []
for i in range(2,30,1):
clf = KMeans(n_clusters=i)
s = clf.fit(df[(df['rentType']=='合') & (df['price']>=3000)][["lng", "lat"]])
__clfInertia__.append([i, clf.inertia_])
plt.plot([x[0] for x in __clfInertia__], [x[1] for x in __clfInertia__],'b*')
plt.plot([x[0] for x in __clfInertia__], [x[1] for x in __clfInertia__],'r')
#调用kmeans类
clf = KMeans(n_clusters=4)
s = clf.fit(df[(df['rentType']=='合') & (df['price']>=3000)][["lng", "lat"]])
print(s)
#n个中心
print(clf.cluster_centers_)
############################随机森林回归############################
from math import radians,sin,cos,degrees,atan2,atan,tan,acos
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
def getDegree(latA, lngA, latB, lngB):
"""
Args:
point p1(latA, lngA)
point p2(latB, lngB)
Returns:
bearing between the two GPS points,
default: the basis of heading direction is north
"""
radLatA = radians(latA)
radLngA = radians(lngA)
radLatB = radians(latB)
radLngB = radians(lngB)
dLng = radLngB - radLngA
y = sin(dLng) * cos(radLatB)
x = cos(radLatA) * sin(radLatB) - sin(radLatA) * cos(radLatB) * cos(dLng)
brng = degrees(atan2(y, x))
brng = (brng + 360) % 360
return brng
def getDistance(latA, lngA, latB, lngB):
ra = 6378140 # radius of equator: meter
rb = 6356755 # radius of polar: meter
flatten = (ra - rb) / ra # Partial rate of the earth
# change angle to radians
radLatA = radians(latA)
radLngA = radians(lngA)
radLatB = radians(latB)
radLngB = radians(lngB)
pA = atan(rb / ra * tan(radLatA))
pB = atan(rb / ra * tan(radLatB))
x = acos(sin(pA) * sin(pB) + cos(pA) * cos(pB) * cos(radLngA - radLngB))
c1 = (sin(x) - x) * (sin(pA) + sin(pB))**2 / cos(x / 2)**2
c2 = (sin(x) + x) * (sin(pA) - sin(pB))**2 / sin(x / 2)**2
dr = flatten / 8 * (c1 - c2)
distance = ra * (x + dr)
return distance
df['degree'] = df.apply(lambda row: getDegree(39.915129,116.403981,row.lat,row.lng), axis=1)
df['distance'] = df.apply(lambda row: getDistance(39.915129,116.403981,row.lat,row.lng), axis=1)
#df['distance1'] = df.apply(lambda row: getDistance(39.93573198,116.33882039,row.lat,row.lng), axis=1)
#df['distance2'] = df.apply(lambda row: getDistance(39.9934964,116.45926247,row.lat,row.lng), axis=1)
#df['distance3'] = df.apply(lambda row: getDistance(39.91515228,116.4790283,row.lat,row.lng), axis=1)
#df['distance4'] = df.apply(lambda row: getDistance(40.04388111,116.35319092,row.lat,row.lng), axis=1)
#df['distance5'] = df.apply(lambda row: getDistance(39.929654,116.403119,row.lat,row.lng), axis=1)
rf_data = df[(df.rentType=='合') & (df.time_unit!='每天') & (df.floorLoc!='') & (df.floorTotal!='')][['area','confGen','confType','direction','floorLoc','floorTotal','nearestSubWayDist','privateBalcony','privateBathroom','rooms','halls','district','degree','distance','link','price']]
rf_data = rf_data.reset_index(drop=True) #重置索引
confGenLe = LabelEncoder()
rf_data['confGen']=confGenLe.fit_transform(rf_data['confGen'])
list(confGenLe.classes_)
confTypeLe = LabelEncoder()
rf_data['confType']=confTypeLe.fit_transform(rf_data['confType'])
list(confTypeLe.classes_)
directionLe = LabelEncoder()
rf_data['direction']=directionLe.fit_transform(rf_data['direction'])
list(directionLe.classes_)
districtLe = LabelEncoder()
rf_data['district']=districtLe.fit_transform(rf_data['district'])
list(districtLe.classes_)
rf_data.nearestSubWayDist = rf_data.nearestSubWayDist.replace('','5000')
#one-hot encoding
def one_hot_encode(label_set,data):
oneHotEnc = OneHotEncoder()
oneHotEnc.fit(label_set)
result=oneHotEnc.transform(data).toarray()
return result
oneHotEncodes = one_hot_encode(
[[0,0,0,0],[1,1,1,1],[2,2,2,2],[3,3,3,3],[0,4,4,4],[0,5,5,5],[0,0,6,6],[0,0,7,7],[0,0,0,8],[0,0,0,9],[0,0,0,10],[0,0,0,11],[0,0,0,12]],
rf_data[['confGen','confType','direction','district']])
#将二维list转dataframe
one_hot_columns = ["confGen0", "confGen1", "confGen2", "confGen3",
"confType0", "confType1", "confType2", "confType3", "confType4", "confType5",
"direction0", "direction1", "direction2", "direction3", "direction4", "direction5", "direction6", "direction7",
"district0", "district1", "district2", "district3", "district4", "district5", "district6", "district7", "district8", "district9", "district10", "district11", "district12"]
rf_data[one_hot_columns] = pd.DataFrame(oneHotEncodes,columns=one_hot_columns)
rf_data=rf_data.drop(['confGen','confType','direction','district'],axis=1)
tmp_link=rf_data[['link','price']]
rf_data=rf_data.drop(['link','price'],axis=1)
rf_data[['link','price']]=tmp_link
X_train, X_test, y_train, y_test = train_test_split(rf_data.iloc[:,0:42], rf_data.iloc[:,[42]], test_size=0.33, random_state=42)
#训练模型_start
#首先对n_estimators进行网格搜索
param_test1= {'n_estimators':list(range(450,550,10))}
gsearch1= GridSearchCV(estimator = RandomForestRegressor(max_features="log2", min_samples_leaf=2, oob_score=True), param_grid =param_test1, scoring=None, cv=5)
gsearch1.fit(X_train.iloc[:,0:18],y_train)
gsearch1.grid_scores_,gsearch1.best_params_, gsearch1.best_score_
#接着对决策树最大深度max_depth和内部节点再划分所需最小样本数min_samples_split进行网格搜索。
param_test2= {'max_depth':list(range(80,100,2)), 'min_samples_split':list(range(2,101,2))}
gsearch2= GridSearchCV(estimator = RandomForestRegressor(n_estimators=50, max_features="log2", min_samples_leaf=2, oob_score=True), param_grid = param_test2,scoring=None,iid=False, cv=5)
gsearch2.fit(X_train.iloc[:,0:18],y_train)
gsearch2.grid_scores_,gsearch2.best_params_, gsearch2.best_score_
#再对内部节点再划分所需最小样本数min_samples_split和叶子节点最少样本数min_samples_leaf一起调参
param_test3= {'min_samples_split':list(range(2,10,2)), 'min_samples_leaf':list(range(2,20,2))}
gsearch3= GridSearchCV(estimator = RandomForestRegressor(n_estimators=50, max_features="log2",max_depth=96, oob_score=True), param_grid = param_test3,scoring=None,iid=False, cv=5)
gsearch3.fit(X_train.iloc[:,0:18],y_train)
gsearch3.grid_scores_,gsearch3.best_params_, gsearch3.best_score_
#最后再对最大特征数max_features做调参:
param_test4= {'max_features':list(range(2,17,1))}
gsearch4= GridSearchCV(estimator = RandomForestRegressor(n_estimators=50,max_depth=96,min_samples_split=4,min_samples_leaf=2, oob_score=True), param_grid = param_test4,scoring=None,iid=False, cv=5)
gsearch4.fit(X_train.iloc[:,0:18],y_train)
gsearch4.grid_scores_,gsearch4.best_params_, gsearch4.best_score_
rf_classifier = RandomForestRegressor(n_estimators=540,max_features=12,max_depth=96,min_samples_split=4,min_samples_leaf=2, oob_score=True)
rf_classifier.fit(X_train.iloc[:,0:41],y_train)
rf_classifier.oob_score_ #袋外分
pd.Series(rf_classifier.feature_importances_,index=X_train.columns[0:41]).sort_values(ascending=False) #特征重要性排序
#训练模型_end
#模型预测_start
results = rf_classifier.predict(X_test.iloc[:,0:41]).astype(int)
rf_classifier.score(X_test.iloc[:,0:41],y_test) #模型准确度
pddf = pd.DataFrame({'actual':y_test.price,'predict':results,'link':X_test.link,'size':X_test.area})
pddf['diff'] = abs(pddf.predict-pddf.actual)/pddf.actual
pddf_ordered = pddf.sort(columns='diff', ascending=False)
#模型预测_end
#############################灰色关联分析#############################
he_df = df[(df['rentType']=='合') & (df.time_unit!='每天') & (df.area>8) & (df.price<2200)] #过滤超出自己心理预期的数据
he_df['co_distance'] = he_df.apply(lambda row: getDistance(39.988122,116.319725,row.lat,row.lng), axis=1) #计算到公司的距离
#指标无量纲化(离差标准化)
he_feature_max = he_df[['area','price','co_distance']].max()
he_feature_min = he_df[['area','price','co_distance']].min()
he_df['area_nondim'] = he_df.apply(lambda row: (row.area-he_feature_min.area)/(he_feature_max.area-he_feature_min.area), axis=1)
he_df['price_nondim'] = he_df.apply(lambda row: (row.price-he_feature_min.price)/(he_feature_max.price-he_feature_min.price), axis=1)
he_df['co_distance_nondim'] = he_df.apply(lambda row: (row.co_distance-he_feature_min.co_distance)/(he_feature_max.co_distance-he_feature_min.co_distance), axis=1)
#计算关联系数
opt_series = pd.Series([1,0,0], index=['area_nondim','price_nondim','co_distance_nondim']) #设定最优化序列
he_df['area_nondim_opt_diff'] = he_df.apply(lambda row: abs(row.area_nondim-opt_series.area_nondim), axis=1)
he_df['price_nondim_opt_diff'] = he_df.apply(lambda row: abs(row.price_nondim-opt_series.price_nondim), axis=1)
he_df['co_distance_nondim_opt_diff'] = he_df.apply(lambda row: abs(row.co_distance_nondim-opt_series.co_distance_nondim), axis=1)
min_nondim_opt_diff = min(min(he_df['area_nondim_opt_diff']),min(he_df['price_nondim_opt_diff']),min(he_df['co_distance_nondim_opt_diff']))
max_nondim_opt_diff = max(max(he_df['area_nondim_opt_diff']),max(he_df['price_nondim_opt_diff']),max(he_df['co_distance_nondim_opt_diff']))
he_df['area_cor'] = he_df.apply(lambda row: (min_nondim_opt_diff+0.5*max_nondim_opt_diff)/(row.area_nondim_opt_diff+0.5*max_nondim_opt_diff), axis=1)
he_df['price_cor'] = he_df.apply(lambda row: (min_nondim_opt_diff+0.5*max_nondim_opt_diff)/(row.price_nondim_opt_diff+0.5*max_nondim_opt_diff), axis=1)
he_df['co_distance_cor'] = he_df.apply(lambda row: (min_nondim_opt_diff+0.5*max_nondim_opt_diff)/(row.co_distance_nondim_opt_diff+0.5*max_nondim_opt_diff), axis=1)
he_df['room_cor_order'] = he_df['area_cor']/6+he_df['price_cor']/3+he_df['co_distance_cor']/2
he_ordered_df = he_df.sort(columns='room_cor_order', ascending=False) #房间关联系数倒排 | gpl-3.0 |
larsmans/numpy | numpy/lib/_version.py | 156 | 4867 | """Utility to compare (Numpy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
from __future__ import division, absolute_import, print_function
import re
from numpy.compat import basestring
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be > 9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance. Note that all development versions of the same
(pre-)release compare equal.
.. versionadded:: 1.9.0
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Examples
--------
>>> from numpy.lib import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0'):
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (basestring, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, basestring):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
| bsd-3-clause |
javaos74/neutron | neutron/agent/linux/daemon.py | 25 | 8046 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import fcntl
import grp
import logging as std_logging
from logging import handlers
import os
import pwd
import signal
import sys
from oslo_log import log as logging
from neutron.common import exceptions
from neutron.i18n import _LE, _LI
LOG = logging.getLogger(__name__)
DEVNULL = object()
# Note: We can't use sys.std*.fileno() here. sys.std* objects may be
# random file-like objects that may not match the true system std* fds
# - and indeed may not even have a file descriptor at all (eg: test
# fixtures that monkey patch fixtures.StringStream onto sys.stdout).
# Below we always want the _real_ well-known 0,1,2 Unix fds during
# os.dup2 manipulation.
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
def setuid(user_id_or_name):
try:
new_uid = int(user_id_or_name)
except (TypeError, ValueError):
new_uid = pwd.getpwnam(user_id_or_name).pw_uid
if new_uid != 0:
try:
os.setuid(new_uid)
except OSError:
msg = _('Failed to set uid %s') % new_uid
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
def setgid(group_id_or_name):
try:
new_gid = int(group_id_or_name)
except (TypeError, ValueError):
new_gid = grp.getgrnam(group_id_or_name).gr_gid
if new_gid != 0:
try:
os.setgid(new_gid)
except OSError:
msg = _('Failed to set gid %s') % new_gid
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
def unwatch_log():
"""Replace WatchedFileHandler handlers by FileHandler ones.
Neutron logging uses WatchedFileHandler handlers but they do not
support privileges drop, this method replaces them by FileHandler
handlers supporting privileges drop.
"""
log_root = logging.getLogger(None).logger
to_replace = [h for h in log_root.handlers
if isinstance(h, handlers.WatchedFileHandler)]
for handler in to_replace:
new_handler = std_logging.FileHandler(handler.baseFilename,
mode=handler.mode,
encoding=handler.encoding,
delay=handler.delay)
log_root.removeHandler(handler)
log_root.addHandler(new_handler)
def drop_privileges(user=None, group=None):
"""Drop privileges to user/group privileges."""
if user is None and group is None:
return
if os.geteuid() != 0:
msg = _('Root permissions are required to drop privileges.')
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
if group is not None:
try:
os.setgroups([])
except OSError:
msg = _('Failed to remove supplemental groups')
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
setgid(group)
if user is not None:
setuid(user)
LOG.info(_LI("Process runs with uid/gid: %(uid)s/%(gid)s"),
{'uid': os.getuid(), 'gid': os.getgid()})
class Pidfile(object):
def __init__(self, pidfile, procname, uuid=None):
self.pidfile = pidfile
self.procname = procname
self.uuid = uuid
try:
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
LOG.exception(_LE("Error while handling pidfile: %s"), pidfile)
sys.exit(1)
def __str__(self):
return self.pidfile
def unlock(self):
fcntl.flock(self.fd, fcntl.LOCK_UN)
def write(self, pid):
os.ftruncate(self.fd, 0)
os.write(self.fd, "%d" % pid)
os.fsync(self.fd)
def read(self):
try:
pid = int(os.read(self.fd, 128))
os.lseek(self.fd, 0, os.SEEK_SET)
return pid
except ValueError:
return
def is_running(self):
pid = self.read()
if not pid:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
exec_out = f.readline()
return self.procname in exec_out and (not self.uuid or
self.uuid in exec_out)
except IOError:
return False
class Daemon(object):
"""A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin=DEVNULL, stdout=DEVNULL,
stderr=DEVNULL, procname='python', uuid=None,
user=None, group=None, watch_log=True):
"""Note: pidfile may be None."""
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.procname = procname
self.pidfile = (Pidfile(pidfile, procname, uuid)
if pidfile is not None else None)
self.user = user
self.group = group
self.watch_log = watch_log
def _fork(self):
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError:
LOG.exception(_LE('Fork failed'))
sys.exit(1)
def daemonize(self):
"""Daemonize process by doing Stevens double fork."""
# flush any buffered data before fork/dup2.
if self.stdout is not DEVNULL:
self.stdout.flush()
if self.stderr is not DEVNULL:
self.stderr.flush()
# sys.std* may not match STD{OUT,ERR}_FILENO. Tough.
for f in (sys.stdout, sys.stderr):
f.flush()
# fork first time
self._fork()
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# fork second time
self._fork()
# redirect standard file descriptors
with open(os.devnull, 'w+') as devnull:
stdin = devnull if self.stdin is DEVNULL else self.stdin
stdout = devnull if self.stdout is DEVNULL else self.stdout
stderr = devnull if self.stderr is DEVNULL else self.stderr
os.dup2(stdin.fileno(), STDIN_FILENO)
os.dup2(stdout.fileno(), STDOUT_FILENO)
os.dup2(stderr.fileno(), STDERR_FILENO)
if self.pidfile is not None:
# write pidfile
atexit.register(self.delete_pid)
signal.signal(signal.SIGTERM, self.handle_sigterm)
self.pidfile.write(os.getpid())
def delete_pid(self):
if self.pidfile is not None:
os.remove(str(self.pidfile))
def handle_sigterm(self, signum, frame):
sys.exit(0)
def start(self):
"""Start the daemon."""
if self.pidfile is not None and self.pidfile.is_running():
self.pidfile.unlock()
LOG.error(_LE('Pidfile %s already exist. Daemon already '
'running?'), self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def run(self):
"""Override this method and call super().run when subclassing Daemon.
start() will call this method after the process has daemonized.
"""
if not self.watch_log:
unwatch_log()
drop_privileges(self.user, self.group)
| apache-2.0 |
bleib1dj/boto | boto/kms/exceptions.py | 135 | 1523 | # The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InvalidGrantTokenException(BotoServerError):
pass
class DisabledException(BotoServerError):
pass
class LimitExceededException(BotoServerError):
pass
class DependencyTimeoutException(BotoServerError):
pass
class InvalidMarkerException(BotoServerError):
pass
class AlreadyExistsException(BotoServerError):
pass
class InvalidCiphertextException(BotoServerError):
pass
class KeyUnavailableException(BotoServerError):
pass
class InvalidAliasNameException(BotoServerError):
pass
class UnsupportedOperationException(BotoServerError):
pass
class InvalidArnException(BotoServerError):
pass
class KMSInternalException(BotoServerError):
pass
class InvalidKeyUsageException(BotoServerError):
pass
class MalformedPolicyDocumentException(BotoServerError):
pass
class NotFoundException(BotoServerError):
pass
| mit |
alistairlow/tensorflow | tensorflow/python/ops/batch_norm_benchmark.py | 76 | 10818 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""End-to-end benchmark for batch normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def batch_norm_op(tensor, mean, variance, beta, gamma, scale):
"""Fused kernel for batch normalization."""
# _batch_norm_with_global_normalization is deprecated in v9
ops.get_default_graph().graph_def_versions.producer = 8
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(tensor, mean,
variance, beta, gamma,
0.001, scale)
# pylint: enable=protected-access
# Note that the naive implementation is much slower:
# batch_norm = (tensor - mean) * tf.rsqrt(variance + 0.001)
# if scale:
# batch_norm *= gamma
# return batch_norm + beta
def batch_norm_py(tensor, mean, variance, beta, gamma, scale):
"""Python implementation of batch normalization."""
return nn_impl.batch_normalization(tensor, mean, variance, beta, gamma if
scale else None, 0.001)
def batch_norm_slow(tensor, mean, variance, beta, gamma, scale):
batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001)
if scale:
batch_norm *= gamma
return batch_norm + beta
def build_graph(device, input_shape, axes, num_layers, mode, scale, train):
"""Build a graph containing a sequence of batch normalizations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
Returns:
An array of tensors to run()
"""
moment_shape = []
keep_dims = mode == "py" or mode == "slow"
if keep_dims:
for axis in range(len(input_shape)):
if axis in axes:
moment_shape.append(1)
else:
moment_shape.append(input_shape[axis])
else:
for axis in range(len(input_shape)):
if axis not in axes:
moment_shape.append(input_shape[axis])
with ops.device("/%s:0" % device):
tensor = variables.Variable(random_ops.truncated_normal(input_shape))
for _ in range(num_layers):
if train:
mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims)
else:
mean = array_ops.zeros(moment_shape)
variance = array_ops.ones(moment_shape)
beta = variables.Variable(array_ops.zeros(moment_shape))
gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape))
if mode == "py":
tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale)
elif mode == "op":
tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale)
elif mode == "slow":
tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale)
if train:
return gradients_impl.gradients([tensor], variables.trainable_variables())
else:
return [tensor]
def print_difference(mode, t1, t2):
"""Print the difference in timing between two runs."""
difference = (t2 - t1) / t1 * 100.0
print("=== %s: %.1f%% ===" % (mode, difference))
class BatchNormBenchmark(test.Benchmark):
"""Benchmark batch normalization."""
def _run_graph(self, device, input_shape, axes, num_layers, mode, scale,
train, num_iters):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, axes, num_layers, mode, scale,
train)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
_ = session.run([out.op for out in outputs]) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run([out.op for out in outputs])
duration = time.time() - start_time
print("%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs" %
(device, len(input_shape), len(axes), num_layers, mode, scale, train,
duration / num_iters))
name_template = (
"batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_"
"layers_{num_layers}_scale_{scale}_"
"train_{train}")
self.report_benchmark(
name=name_template.format(
device=device,
mode=mode,
num_layers=num_layers,
scale=scale,
train=train,
shape=str(input_shape).replace(" ", ""),
axes=str(axes)).replace(" ", ""),
iters=num_iters,
wall_time=duration / num_iters)
return duration
def benchmark_batch_norm(self):
print("Forward convolution (lower layers).")
shape = [8, 128, 128, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (lower layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward convolution (higher layers).")
shape = [256, 17, 17, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (higher layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward fully-connected.")
shape = [1024, 32]
axes = [0]
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("py vs slow", t1, t2)
print("Forward/backward fully-connected.")
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 50)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 50)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 5)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 5)
print_difference("py vs slow", t1, t2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--use_gpu",
type="bool",
nargs="?",
const=True,
default=True,
help="Run GPU benchmarks."
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
test.main(argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
wisechengyi/pants | contrib/node/src/python/pants/contrib/node/register.py | 1 | 2405 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
from pants.contrib.node.subsystems.resolvers.node_preinstalled_module_resolver import (
NodePreinstalledModuleResolver,
)
from pants.contrib.node.subsystems.resolvers.npm_resolver import NpmResolver
from pants.contrib.node.targets.node_bundle import NodeBundle
from pants.contrib.node.targets.node_module import NodeModule
from pants.contrib.node.targets.node_preinstalled_module import NodePreinstalledModule
from pants.contrib.node.targets.node_remote_module import NodeRemoteModule
from pants.contrib.node.targets.node_test import NodeTest as NodeTestTarget
from pants.contrib.node.tasks.javascript_style import JavascriptStyleFmt, JavascriptStyleLint
from pants.contrib.node.tasks.node_build import NodeBuild
from pants.contrib.node.tasks.node_bundle import NodeBundle as NodeBundleTask
from pants.contrib.node.tasks.node_install import NodeInstall
from pants.contrib.node.tasks.node_repl import NodeRepl
from pants.contrib.node.tasks.node_resolve import NodeResolve
from pants.contrib.node.tasks.node_run import NodeRun
from pants.contrib.node.tasks.node_test import NodeTest as NodeTestTask
def build_file_aliases():
return BuildFileAliases(
targets={
"node_bundle": NodeBundle,
"node_module": NodeModule,
"node_preinstalled_module": NodePreinstalledModule,
"node_remote_module": NodeRemoteModule,
"node_test": NodeTestTarget,
},
)
def register_goals():
# Register tasks.
task(name="node", action=NodeRepl).install("repl")
task(name="node", action=NodeResolve).install("resolve")
task(name="node", action=NodeRun).install("run")
task(name="node", action=NodeBuild).install("compile", first=True)
task(name="node", action=NodeTestTask).install("test")
task(name="node", action=NodeBundleTask).install("bundle")
task(name="node-install", action=NodeInstall).install()
# Linting
task(name="javascriptstyle", action=JavascriptStyleLint).install("lint")
task(name="javascriptstyle", action=JavascriptStyleFmt).install("fmt")
def global_subsystems():
return (NodePreinstalledModuleResolver, NpmResolver)
| apache-2.0 |
castlest/shell-detection | coherence-elliptical-kernel/main.py | 1 | 7932 | '''
Author: S.T. Castle
Created: 2015-03-15
'''
#import math
import numpy as np
from scipy import ndimage
from scipy import stats
import scipy.ndimage.filters
import scipy.linalg
#import skimage.feature
import cv2
from matplotlib import pyplot as plt
def main():
'''
Run the explicit coherence enhancing filter with spatial adaptive
elliptical kernel from F.Li et al. 2012.
'''
# Params.
window_size = 7
sigma = 1 # Standard deviation of initial Gaussian kernel.
rho = 6 # Std dev of Gaussian kernel used to compute structure tensor.
gamma = 0.05
eps = np.spacing(1) # Very small positive number.
filename = 'fingerprint1.png'
# Open as grayscale image.
orig_img = cv2.imread(filename, 0)
print 'Opened ' + filename
#plt.subplot(111),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.show()
# Convolve image with a Gaussian kernel with standard deviation sigma.
img = scipy.ndimage.filters.gaussian_filter(orig_img, sigma)
#plt.subplot(111),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.show()
print 'shape of img:',
print img.shape
# Compute the 2D structure tensor of the image.
# The structure tensor is:
# [j11 j12]
# [j12 j22]
#j11, j12, j22 = skimage.feature.structure_tensor(img, sigma=sigma)
j11, j12, j22 = structure_tensor(img, sigma=sigma)
#print 'j11'
#print j11
#print 'j12'
#print j12
#print 'j22'
#print j22
print 'shape of j11:',
print j11.shape
print 'shape of J:',
print np.array([[j11,j12],[j12,j22]]).shape
# Compute eigenvalues mu1, mu2 of structure tensor. mu1 >= mu2.
mu1 = (j11 + j22) / 2 + np.sqrt(4 * j12 ** 2 + (j11 - j22) ** 2) / 2
mu2 = (j11 + j22) / 2 - np.sqrt(4 * j12 ** 2 + (j11 - j22) ** 2) / 2
print 'shape of mu1:',
print mu1.shape
# Compute corresponding normalized eigenvectors v1, v2.
v1 = np.asarray([ 2*j12,
j22-j11 + np.sqrt((j11-j22)**2 + 4*(j12**2)) ])
# Rearrange axis so that v1 is indexed as (x,y,(eigvector))
v1 = np.rollaxis(v1,0,3)
#print 'mu1'
#print mu1
#print 'mu2'
#print mu2
#print 'v1'
#print v1
#print 'v2'
#print v2
print 'shape of v1:',
print v1.shape
#print 'v1[0] =',
#print v1[0]
#print 'v1[0][0] =',
#print v1[0][0]
#print v1
# Compute theta based on the angle of v1 and the positive direction of
# the horizontal axis.
# cos(theta) = x / magnitude.
# If the magnitude is 0, then just try setting theta=0 for now.
print 'Calculating theta...'
theta = np.empty((v1.shape[0], v1.shape[1]))
for i in xrange(v1.shape[0]):
for j in xrange(v1.shape[1]):
v = v1[i][j]
mag = float(magnitude(v))
if mag:
theta[i][j] = np.arccos(v[0]/magnitude(v))
else:
theta[i][j] = 0
print 'Done.'
print 'shape of theta:',
print theta.shape
# Now that necessary values are calculated, proceed to filtering.
print 'Filtering...'
fimg = np.empty_like(img) # Create a blank array for the filtered image.
rad = window_size/2 # Radius of the filtering window.
sig1 = 10*gamma
# Current pixel is (x1,x2) and neighbor is (y1,y2).
height = img.shape[0]
width = img.shape[1]
for x1 in xrange(height):
for x2 in xrange(width):
eig1 = mu1[x1][x2]
eig2 = mu2[x1][x2]
ang = theta[x1][x2]
sig2 = 10*(gamma+(1-gamma)*np.exp(-1/((eig1-eig2)**2+eps)))
wt_const = 1/(2*np.pi*sig1*sig2) # Constant factor for weighting.
# Add weighted value from neighbor pixel y.
sum = 0
wt_sum = 0 # Sum of the weights for normalization scaling.
for i in xrange(-rad,rad+1):
y1 = x1+i
if (y1 < 0) or (y1 >= height):
continue
for j in xrange(-rad,rad+1):
y2 = x2+i
if (y2 < 0) or (y2 >= width):
continue
# Calculate weight of neighboring position y.
s = (y1-x1)*np.cos(ang) + (y2-x2)*np.sin(ang)
t = -(y1-x1)*np.sin(ang) + (y2-x2)*np.cos(ang)
wt = wt_const * np.exp( -s**2/(2*sig1**2) - t**2/(2*sig2**2) )
sum = sum + wt*orig_img[y1][y2] # Use original image or blurred?
wt_sum = wt_sum + wt
# Set value of this pixel x.
#sum = sum * (1.0/wt_sum) # Scale the pixel value.
fimg[x1][x2] = sum
print x1
print 'Done.'
# Display original and filtered images.
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(fimg, cmap = 'gray')
plt.title('Filtered Image'), plt.xticks([]), plt.yticks([])
plt.show()
def magnitude(v):
"""Magnitude of a vector."""
return np.sqrt(np.dot(v, v))
# from skimage !!!!
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndimage.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndimage.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
#image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndimage.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndimage.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
if __name__ == '__main__':
main()
| bsd-3-clause |
BT-astauder/odoo | openerp/osv/osv.py | 337 | 1384 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..exceptions import except_orm
from .orm import Model, TransientModel, AbstractModel
# Deprecated, kept for backward compatibility.
# openerp.exceptions.Warning should be used instead.
except_osv = except_orm
# Deprecated, kept for backward compatibility.
osv = Model
osv_memory = TransientModel
osv_abstract = AbstractModel # ;-)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mantiz/ansible-modules-core | system/group.py | 22 | 11974 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: group
author: Stephen Fromm
version_added: "0.0.2"
short_description: Add or remove groups
requirements: [ groupadd, groupdel, groupmod ]
description:
- Manage presence of groups on a host.
options:
name:
required: true
description:
- Name of the group to manage.
gid:
required: false
description:
- Optional I(GID) to set for the group.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group should be present or not on the remote host.
system:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If I(yes), indicates that the group created is a system group.
'''
EXAMPLES = '''
# Example group command from Ansible Playbooks
- group: name=somegroup state=present
'''
import grp
import syslog
import platform
class Group(object):
"""
This is a generic Group manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- group_del()
- group_add()
- group_mod()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
GROUPFILE = '/etc/group'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Group, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.gid = module.params['gid']
self.system = module.params['system']
self.syslogging = False
def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd)
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(kwargs[key])
elif key == 'system' and kwargs[key] == True:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('-g')
cmd.append(kwargs[key])
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self):
try:
if grp.getgrnam(self.name):
return True
except KeyError:
return False
def group_info(self):
if not self.group_exists():
return False
try:
info = list(grp.getgrnam(self.name))
except KeyError:
return False
return info
# ===========================================
class SunOS(Group):
"""
This is a SunOS Group manipulation class. Solaris doesn't have
the 'system' group concept.
This overrides the following methods from the generic class:-
- group_add()
"""
platform = 'SunOS'
distribution = None
GROUPFILE = '/etc/group'
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(kwargs[key])
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class AIX(Group):
"""
This is a AIX Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'AIX'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('rmgroup', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('mkgroup', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('id='+kwargs[key])
elif key == 'system' and kwargs[key] == True:
cmd.append('-a')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('chgroup', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('id='+kwargs[key])
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class FreeBsdGroup(Group):
"""
This is a FreeBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'FreeBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
if self.gid is not None:
cmd.append('-g %d' % int(self.gid))
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g %d' % int(self.gid))
# modify the group if cmd will do anything
if cmd_len != len(cmd):
if self.module.check_mode:
return (0, '', '')
return self.execute_command(cmd)
return (None, '', '')
# ===========================================
class OpenBsdGroup(Group):
"""
This is a OpenBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'OpenBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBsdGroup(Group):
"""
This is a NetBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'NetBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
gid=dict(default=None, type='str'),
system=dict(default=False, type='bool'),
),
supports_check_mode=True
)
group = Group(module)
if group.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - platform %s' % group.platform)
if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - distribution %s' % group.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = group.name
result['state'] = group.state
if group.state == 'absent':
if group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_del()
if rc != 0:
module.fail_json(name=group.name, msg=err)
elif group.state == 'present':
if not group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_add(gid=group.gid, system=group.system)
else:
(rc, out, err) = group.group_mod(gid=group.gid)
if rc is not None and rc != 0:
module.fail_json(name=group.name, msg=err)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if group.group_exists():
info = group.group_info()
result['system'] = group.system
result['gid'] = info[2]
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
pacav69/namebench | nb_third_party/dns/rdtypes/nsbase.py | 248 | 2995 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""NS-like base classes."""
import cStringIO
import dns.exception
import dns.rdata
import dns.name
class NSBase(dns.rdata.Rdata):
"""Base class for rdata that is like an NS record.
@ivar target: the target name of the rdata
@type target: dns.name.Name object"""
__slots__ = ['target']
def __init__(self, rdclass, rdtype, target):
super(NSBase, self).__init__(rdclass, rdtype)
self.target = target
def to_text(self, origin=None, relativize=True, **kw):
target = self.target.choose_relativity(origin, relativize)
return str(target)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
target = tok.get_name()
target = target.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, target)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.target.to_wire(file, compress, origin)
def to_digestable(self, origin = None):
return self.target.to_digestable(origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(target, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
target = target.relativize(origin)
return cls(rdclass, rdtype, target)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.target = self.target.choose_relativity(origin, relativize)
def _cmp(self, other):
return cmp(self.target, other.target)
class UncompressedNS(NSBase):
"""Base class for rdata that is like an NS record, but whose name
is not compressed when convert to DNS wire format, and whose
digestable form is not downcased."""
def to_wire(self, file, compress = None, origin = None):
super(UncompressedNS, self).to_wire(file, None, origin)
def to_digestable(self, origin = None):
f = cStringIO.StringIO()
self.to_wire(f, None, origin)
return f.getvalue()
| apache-2.0 |
benedeku/HolaMundo | androguard/core/api_specific_resources/aosp_permissions/aosp_permissions_api22.py | 15 | 109292 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#################################################
### Extracted from platform version: 5.1.1
#################################################
AOSP_PERMISSIONS = {
'android.permission.REMOTE_AUDIO_PLAYBACK': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.DUMP': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to retrieve internal state of the system. Malicious apps may retrieve a wide variety of private and secure information that they should never normally need.',
'protectionLevel': 'signature|system|development',
'label': 'retrieve system internal state'
},
'android.permission.BODY_SENSORS': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description':
'Allows the app to access data from sensors that monitor your physical condition, such as your heart rate.',
'protectionLevel': '',
'label': 'body sensors (like heart rate monitors)'
},
'android.permission.READ_SOCIAL_STREAM': {
'permissionGroup': 'android.permission-group.SOCIAL_INFO',
'description':
'Allows the app to access and sync social updates from you and your friends. Be careful when sharing information -- this allows the app to read communications between you and your friends on social networks, regardless of confidentiality. Note: this permission may not be enforced on all social networks.',
'protectionLevel': 'dangerous',
'label': 'read your social stream'
},
'android.permission.MODIFY_AUDIO_ROUTING': {
'permissionGroup': '',
'description':
'Allows the app to directly control audio routing and override audio policy decisions.',
'protectionLevel': 'signature|system',
'label': 'Audio Routing'
},
'android.permission.READ_NETWORK_USAGE_HISTORY': {
'permissionGroup': '',
'description':
'Allows the app to read historical network usage for specific networks and apps.',
'protectionLevel': 'signature|system',
'label': 'read historical network usage'
},
'android.permission.BIND_DIRECTORY_SEARCH': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.INTERNET': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to create network sockets and use custom network protocols. The browser and other applications provide means to send data to the internet, so this permission is not required to send data to the internet.',
'protectionLevel': 'dangerous',
'label': 'full network access'
},
'android.permission.HARDWARE_TEST': {
'permissionGroup': 'android.permission-group.HARDWARE_CONTROLS',
'description':
'Allows the app to control various peripherals for the purpose of hardware testing.',
'protectionLevel': 'signature',
'label': 'test hardware'
},
'android.permission.START_TASKS_FROM_RECENTS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to use an ActivityManager.RecentTaskInfo object to launch a defunct task that was returned from ActivityManager.getRecentTaskList().',
'protectionLevel': 'signature|system',
'label': 'start a task from recents'
},
'android.permission.ACCESS_DOWNLOAD_MANAGER_ADVANCED': {
'permissionGroup': '',
'description':
'Allows the app to access the download manager\'s advanced functions. Malicious apps can use this to disrupt downloads and access private information.',
'protectionLevel': 'signatureOrSystem',
'label': 'Advanced download manager functions.'
},
'android.permission.REMOVE_DRM_CERTIFICATES': {
'permissionGroup': '',
'description':
'Allows an application to remove DRM certficates. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'remove DRM certificates'
},
'com.android.launcher.permission.INSTALL_SHORTCUT': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows an application to add Homescreen shortcuts without user intervention.',
'protectionLevel': 'dangerous',
'label': 'install shortcuts'
},
'android.permission.BIND_TV_INPUT': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a TV input. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'bind to a TV input'
},
'android.permission.BIND_VPN_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a Vpn service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a VPN service'
},
'com.android.voicemail.permission.READ_VOICEMAIL': {
'permissionGroup': 'android.permission-group.VOICEMAIL',
'description': 'Allows the app to read your voicemails.',
'protectionLevel': 'system|signature',
'label': 'read voicemail'
},
'android.permission.REGISTER_CONNECTION_MANAGER': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description': 'Allows the app to manage telecom connections.',
'protectionLevel': 'system|signature',
'label': 'manage telecom connections'
},
'android.permission.READ_SEARCH_INDEXABLES': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.BIND_INPUT_METHOD': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of an input method. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to an input method'
},
'android.permission.ACCESS_CACHE_FILESYSTEM': {
'permissionGroup': '',
'description': 'Allows the app to read and write the cache filesystem.',
'protectionLevel': 'signature|system',
'label': 'access the cache filesystem'
},
'android.permission.DOWNLOAD_CACHE_NON_PURGEABLE': {
'permissionGroup': '',
'description':
'Allows the app to download files to the download cache, which can\'t be automatically deleted when the download manager needs more space.',
'protectionLevel': 'signatureOrSystem',
'label': 'Reserve space in the download cache'
},
'android.permission.CONFIGURE_WIFI_DISPLAY': {
'permissionGroup': '',
'description':
'Allows the app to configure and connect to Wifi displays.',
'protectionLevel': 'signature',
'label': 'configure Wifi displays'
},
'com.android.gallery3d.permission.GALLERY_PROVIDER': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signatureOrSystem',
'label': ''
},
'com.android.permission.WHITELIST_BLUETOOTH_DEVICE': {
'permissionGroup': '',
'description':
'Allows the app to temporarily whitelist a Bluetooth device, allowing that device to send files to this device without user confirmation.',
'protectionLevel': 'signature',
'label': 'Whitelist bluetooth device access.'
},
'android.permission.READ_CELL_BROADCASTS': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to read cell broadcast messages received by your device. Cell broadcast alerts are delivered in some locations to warn you of emergency situations. Malicious apps may interfere with the performance or operation of your device when an emergency cell broadcast is received.',
'protectionLevel': 'dangerous',
'label': 'read cell broadcast messages'
},
'android.permission.BIND_DEVICE_ADMIN': {
'permissionGroup': '',
'description':
'Allows the holder to send intents to a device administrator. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'interact with a device admin'
},
'android.permission.FRAME_STATS': {
'permissionGroup': '',
'description':
'Allows an application to collect frame statistics. Malicious apps may observe the frame statistics of windows from other apps.',
'protectionLevel': 'signature',
'label': 'retrieve frame statistics'
},
'com.android.providers.tv.permission.ACCESS_ALL_EPG_DATA': {
'permissionGroup': '',
'description':
'Allows the app to read and write all TV channel/program data stored on your device.',
'protectionLevel': 'signatureOrSystem',
'label': 'access all TV channel/program information'
},
'android.permission.WRITE_SECURE_SETTINGS': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to modify the system\'s secure settings data. Not for use by normal apps.',
'protectionLevel': 'signature|system|development',
'label': 'modify secure system settings'
},
'android.permission.MANAGE_DOCUMENTS': {
'permissionGroup': 'android.permission-group.STORAGE',
'description': 'Allows the app to manage document storage.',
'protectionLevel': 'signature',
'label': 'manage document storage'
},
'android.permission.SYSTEM_ALERT_WINDOW': {
'permissionGroup': 'android.permission-group.DISPLAY',
'description':
'Allows the app to draw on top of other applications or parts of the user interface. They may interfere with your use of the interface in any application, or change what you think you are seeing in other applications.',
'protectionLevel': 'dangerous',
'label': 'draw over other apps'
},
'com.android.cts.permissionNotUsedWithSignature': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.READ_SYNC_STATS': {
'permissionGroup': 'android.permission-group.SYNC_SETTINGS',
'description':
'Allows an app to read the sync stats for an account, including the history of sync events and how much data is synced.',
'protectionLevel': 'normal',
'label': 'read sync statistics'
},
'android.permission.START_ANY_ACTIVITY': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to start any activity, regardless of permission protection or exported state.',
'protectionLevel': 'signature',
'label': 'start any activity'
},
'android.permission.AUTHENTICATE_ACCOUNTS': {
'permissionGroup': 'android.permission-group.ACCOUNTS',
'description':
'Allows the app to use the account authenticator capabilities of the AccountManager, including creating accounts and getting and setting their passwords.',
'protectionLevel': 'dangerous',
'label': 'create accounts and set passwords'
},
'test_permission': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'normal',
'label': ''
},
'android.permission.MODIFY_PHONE_STATE': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description':
'Allows the app to control the phone features of the device. An app with this permission can switch networks, turn the phone radio on and off and the like without ever notifying you.',
'protectionLevel': 'signature|system',
'label': 'modify phone state'
},
'android.permission.LAUNCH_TRUST_AGENT_SETTINGS': {
'permissionGroup': '',
'description':
'Allows an application to launch an activity that changes the trust agent behavior.',
'protectionLevel': 'signatureOrSystem',
'label': 'Launch trust agent settings menu.'
},
'android.permission.CAPTURE_SECURE_VIDEO_OUTPUT': {
'permissionGroup': '',
'description':
'Allows the app to capture and redirect secure video output.',
'protectionLevel': 'signature|system',
'label': 'capture secure video output'
},
'com.android.launcher.permission.PRELOAD_WORKSPACE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signatureOrSystem',
'label': ''
},
'android.permission.BIND_CONNECTION_SERVICE': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description':
'Allows the app to interact with telephony services to make/receive calls.',
'protectionLevel': 'system|signature',
'label': 'interact with telephony services'
},
'android.permission.WRITE_PROFILE': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description':
'Allows the app to change or add to personal profile information stored on your device, such as your name and contact information. This means the app can identify you and may send your profile information to others.',
'protectionLevel': 'dangerous',
'label': 'modify your own contact card'
},
'android.permission.BIND_NFC_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to applications that are emulating NFC cards. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to NFC service'
},
'android.permission.GRANT_REVOKE_PERMISSIONS': {
'permissionGroup': '',
'description':
'Allows an application to grant or revoke specific permissions for it or other applications. Malicious applications may use this to access features you have not granted them.',
'protectionLevel': 'signature',
'label': 'grant or revoke permissions'
},
'android.permission.CAMERA': {
'permissionGroup': 'android.permission-group.CAMERA',
'description':
'Allows the app to take pictures and videos with the camera. This permission allows the app to use the camera at any time without your confirmation.',
'protectionLevel': 'dangerous',
'label': 'take pictures and videos'
},
'android.permission.START_PRINT_SERVICE_CONFIG_ACTIVITY': {
'permissionGroup': '',
'description':
'Allows the holder to start the configuration activities of a print service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'start print service configuration activities'
},
'android.permission.SET_WALLPAPER_HINTS': {
'permissionGroup': 'android.permission-group.WALLPAPER',
'description': 'Allows the app to set the system wallpaper size hints.',
'protectionLevel': 'normal',
'label': 'adjust your wallpaper size'
},
'android.permission.INVOKE_CARRIER_SETUP': {
'permissionGroup': '',
'description':
'Allows the holder to invoke the carrier-provided configuration app. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'invoke the carrier-provided configuration app'
},
'android.permission.BIND_NOTIFICATION_LISTENER_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a notification listener service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a notification listener service'
},
'android.permission.BIND_CARRIER_MESSAGING_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a carrier messaging service. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'bind to a carrier messaging service'
},
'android.permission.CONTROL_LOCATION_UPDATES': {
'permissionGroup': '',
'description':
'Allows the app to enable/disable location update notifications from the radio. Not for use by normal apps.',
'protectionLevel': 'signature|system',
'label': 'control location update notifications'
},
'android.permission.REBOOT': {
'permissionGroup': '',
'description': 'Allows the app to force the phone to reboot.',
'protectionLevel': 'signature|system',
'label': 'force phone reboot'
},
'android.permission.BROADCAST_WAP_PUSH': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to broadcast a notification that a WAP PUSH message has been received. Malicious apps may use this to forge MMS message receipt or to silently replace the content of any webpage with malicious variants.',
'protectionLevel': 'signature',
'label': 'send WAP-PUSH-received broadcast'
},
'android.permission.ACCESS_NETWORK_STATE': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to view information about network connections such as which networks exist and are connected.',
'protectionLevel': 'normal',
'label': 'view network connections'
},
'android.permission.CAPTURE_TV_INPUT': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signatureOrSystem',
'label': ''
},
'android.permission.CHANGE_WIMAX_STATE': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to connect the phone to and disconnect the phone from WiMAX networks.',
'protectionLevel': 'dangerous',
'label': 'Change WiMAX state'
},
'com.foo.mypermission': {
'permissionGroup': '',
'description': 'MyActivity',
'protectionLevel': '',
'label': 'MyActivity'
},
'android.permission.MOUNT_FORMAT_FILESYSTEMS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': 'Allows the app to format removable storage.',
'protectionLevel': 'system|signature',
'label': 'erase SD Card'
},
'android.permission.SCORE_NETWORKS': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to rank networks and influence which networks the phone should prefer.',
'protectionLevel': 'signature|system',
'label': 'score networks'
},
'android.permission.BIND_APPWIDGET': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description':
'Allows the app to tell the system which widgets can be used by which app. An app with this permission can give access to personal data to other apps. Not for use by normal apps.',
'protectionLevel': 'signature|system',
'label': 'choose widgets'
},
'com.android.frameworks.coretests.permission.TEST_GRANTED': {
'permissionGroup': '',
'description':
'Used for running unit tests, for testing operations where we have the permission.',
'protectionLevel': 'normal',
'label': 'Test Granted'
},
'android.permission.ASEC_CREATE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': 'Allows the app to create internal storage.',
'protectionLevel': 'signature',
'label': 'create internal storage'
},
'android.permission.MANAGE_CA_CERTIFICATES': {
'permissionGroup': '',
'description':
'Allows the app to install and uninstall CA certificates as trusted credentials.',
'protectionLevel': 'signature|system',
'label': 'manage trusted credentials'
},
'android.permission.INSTALL_LOCATION_PROVIDER': {
'permissionGroup': '',
'description':
'Create mock location sources for testing or install a new location provider. This allows the app to override the location and/or status returned by other location sources such as GPS or location providers.',
'protectionLevel': 'signature|system',
'label': 'permission to install a location provider'
},
'android.permission.LOOP_RADIO': {
'permissionGroup': 'android.permission-group.NETWORK',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.TRANSMIT_IR': {
'permissionGroup': 'android.permission-group.AFFECTS_BATTERY',
'description':
'Allows the app to use the phone\'s infrared transmitter.',
'protectionLevel': 'normal',
'label': 'transmit infrared'
},
'com.android.browser.permission.WRITE_HISTORY_BOOKMARKS': {
'permissionGroup': 'android.permission-group.BOOKMARKS',
'description':
'Allows the app to modify the Browser\'s history or bookmarks stored on your phone. This may allow the app to erase or modify Browser data. Note: this permission may note be enforced by third-party browsers or other applications with web browsing capabilities.',
'protectionLevel': 'dangerous',
'label': 'write web bookmarks and history'
},
'android.permission.WRITE_SETTINGS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to modify the system\'s settings data. Malicious apps may corrupt your system\'s configuration.',
'protectionLevel': 'normal',
'label': 'modify system settings'
},
'android.permission.MANAGE_APP_TOKENS': {
'permissionGroup': '',
'description':
'Allows the app to create and manage their own tokens, bypassing their normal Z-ordering. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'manage app tokens'
},
'android.permission.RESTART_PACKAGES': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to end background processes of other apps. This may cause other apps to stop running.',
'protectionLevel': 'normal',
'label': 'close other apps'
},
'android.permission.ACCESS_DRM_CERTIFICATES': {
'permissionGroup': '',
'description':
'Allows an application to provision and use DRM certficates. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'access DRM certificates'
},
'android.permission.PACKAGE_VERIFICATION_AGENT': {
'permissionGroup': '',
'description': 'Allows the app to verify a package is installable.',
'protectionLevel': 'signature|system',
'label': 'verify packages'
},
'android.permission.CONFIRM_FULL_BACKUP': {
'permissionGroup': '',
'description':
'Allows the app to launch the full backup confirmation UI. Not to be used by any app.',
'protectionLevel': 'signature',
'label': 'confirm a full backup or restore operation'
},
'com.android.smspush.WAPPUSH_MANAGER_BIND': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signatureOrSystem',
'label': ''
},
'com.android.gallery3d.filtershow.permission.READ': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.BIND_PRINT_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a print service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a print service'
},
'com.android.providers.tv.permission.ACCESS_WATCHED_PROGRAMS': {
'permissionGroup': '',
'description':
'Allows the app to read and write the list of TV programs you watched. Malicious apps may collect your private TV watch history.',
'protectionLevel': 'signatureOrSystem',
'label': 'access watched TV program information'
},
'android.permission.ASEC_ACCESS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': 'Allows the app to get information on internal storage.',
'protectionLevel': 'signature',
'label': 'get information on internal storage'
},
'android.permission.USE_SIP': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description': 'Allows the app to make and receive SIP calls.',
'protectionLevel': 'dangerous',
'label': 'make/receive SIP calls'
},
'android.permission.RECEIVE_DATA_ACTIVITY_CHANGE': {
'permissionGroup': 'android.permission-group.NETWORK',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.NET_ADMIN': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.CHANGE_BACKGROUND_DATA_SETTING': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to change the background data usage setting.',
'protectionLevel': 'signature',
'label': 'change background data usage setting'
},
'android.permission.PROCESS_CALLLOG_INFO': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.CAPTURE_AUDIO_HOTWORD': {
'permissionGroup': '',
'description':
'Allows the app to capture audio for Hotword detection. The capture can happen in the background but does not prevent other audio capture (e.g. Camcorder).',
'protectionLevel': 'signature|system',
'label': 'Hotword detection'
},
'android.permission.NFC': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to communicate with Near Field Communication (NFC) tags, cards, and readers.',
'protectionLevel': 'dangerous',
'label': 'control Near Field Communication'
},
'android.permission.SEND_SMS': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to send SMS messages. This may result in unexpected charges. Malicious apps may cost you money by sending messages without your confirmation.',
'protectionLevel': 'dangerous',
'label': 'send SMS messages'
},
'android.permission.INTERACT_ACROSS_USERS_FULL': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': 'Allows all possible interactions across users.',
'protectionLevel': 'signature',
'label': 'full license to interact across users'
},
'android.permission.CLEAR_APP_USER_DATA': {
'permissionGroup': '',
'description': 'Allows the app to clear user data.',
'protectionLevel': 'signature',
'label': 'delete other apps\' data'
},
'android.permission.ACCESS_MOCK_LOCATION': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Create mock location sources for testing or install a new location provider. This allows the app to override the location and/or status returned by other location sources such as GPS or location providers.',
'protectionLevel': 'dangerous',
'label': 'mock location sources for testing'
},
'android.permission.CAPTURE_AUDIO_OUTPUT': {
'permissionGroup': '',
'description': 'Allows the app to capture and redirect audio output.',
'protectionLevel': 'signature|system',
'label': 'capture audio output'
},
'android.permission.GET_DETAILED_TASKS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to retrieve detailed information about currently and recently running tasks. Malicious apps may discover private information about other apps.',
'protectionLevel': 'signature',
'label': 'retrieve details of running apps'
},
'android.permission.ACCESS_ALL_DOWNLOADS': {
'permissionGroup': '',
'description':
'Allows the app to view and modify all downloads initiated by any app on the system.',
'protectionLevel': 'signature',
'label': 'Access all system downloads'
},
'android.permission.STATUS_BAR': {
'permissionGroup': '',
'description':
'Allows the app to disable the status bar or add and remove system icons.',
'protectionLevel': 'signature|system',
'label': 'disable or modify status bar'
},
'android.permission.TV_INPUT_HARDWARE': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signatureOrSystem',
'label': ''
},
'android.permission.MEDIA_CONTENT_CONTROL': {
'permissionGroup': '',
'description':
'Allows the app to control media playback and access the media information (title, author...).',
'protectionLevel': 'signature|system',
'label': 'control media playback and metadata access'
},
'android.permission.DOWNLOAD_WITHOUT_NOTIFICATION': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to download files through the download manager without any notification being shown to the user.',
'protectionLevel': 'normal',
'label': 'download files without notification'
},
'android.permission.RECOVERY': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows an application to interact with the recovery system and system updates.',
'protectionLevel': 'signature|system',
'label': 'Interact with update and recovery system'
},
'com.android.email.permission.READ_ATTACHMENT': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description': 'Allows the app to read your email attachments.',
'protectionLevel': 'dangerous',
'label': 'Read email attachments'
},
'android.permission.NET_TUNNELING': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.SET_TIME': {
'permissionGroup': '',
'description': 'Allows the app to change the phone\'s clock time.',
'protectionLevel': 'signature|system',
'label': 'set time'
},
'android.permission.MANAGE_MEDIA_PROJECTION': {
'permissionGroup': '',
'description':
'Allows an application to manage media projection sessions. These sessions can provide applications the ability to capture display and audio contents. Should never be needed by normal apps.',
'protectionLevel': 'signature',
'label': 'Manage media projection sessions'
},
'android.permission.CALL_PHONE': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description':
'Allows the app to call phone numbers without your intervention. This may result in unexpected charges or calls. Note that this doesn\'t allow the app to call emergency numbers. Malicious apps may cost you money by making calls without your confirmation.',
'protectionLevel': 'dangerous',
'label': 'directly call phone numbers'
},
'android.permission.FLASHLIGHT': {
'permissionGroup': 'android.permission-group.AFFECTS_BATTERY',
'description': 'Allows the app to control the flashlight.',
'protectionLevel': 'normal',
'label': 'control flashlight'
},
'android.permission.READ_PHONE_STATE': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description':
'Allows the app to access the phone features of the device. This permission allows the app to determine the phone number and device IDs, whether a call is active, and the remote number connected by a call.',
'protectionLevel': 'dangerous',
'label': 'read phone status and identity'
},
'android.permission.MANAGE_DEVICE_ADMINS': {
'permissionGroup': '',
'description':
'Allows the holder to add or remove active device administrators. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'add or remove a device admin'
},
'com.android.voicemail.permission.ADD_VOICEMAIL': {
'permissionGroup': 'android.permission-group.VOICEMAIL',
'description':
'Allows the app to add messages to your voicemail inbox.',
'protectionLevel': 'dangerous',
'label': 'add voicemail'
},
'android.permission.REAL_GET_TASKS': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to retrieve information about currently and recently running tasks. This may allow the app to discover information about which applications are used on the device.',
'protectionLevel': 'signature|system',
'label': 'retrieve running apps'
},
'android.permission.KILL_BACKGROUND_PROCESSES': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to end background processes of other apps. This may cause other apps to stop running.',
'protectionLevel': 'normal',
'label': 'close other apps'
},
'android.permission.RECEIVE_MMS': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to receive and process MMS messages. This means the app could monitor or delete messages sent to your device without showing them to you.',
'protectionLevel': 'dangerous',
'label': 'receive text messages (MMS)'
},
'android.permission.WAKE_LOCK': {
'permissionGroup': 'android.permission-group.AFFECTS_BATTERY',
'description':
'Allows the app to prevent the phone from going to sleep.',
'protectionLevel': 'normal',
'label': 'prevent phone from sleeping'
},
'android.permission.BIND_VOICE_INTERACTION': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a voice interaction service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a voice interactor'
},
'android.permission.STATUS_BAR_SERVICE': {
'permissionGroup': '',
'description': 'Allows the app to be the status bar.',
'protectionLevel': 'signature',
'label': 'status bar'
},
'android.permission.DELETE_CACHE_FILES': {
'permissionGroup': '',
'description': 'Allows the app to delete cache files.',
'protectionLevel': 'signature|system',
'label': 'delete other apps\' caches'
},
'android.permission.MODIFY_NETWORK_ACCOUNTING': {
'permissionGroup': '',
'description':
'Allows the app to modify how network usage is accounted against apps. Not for use by normal apps.',
'protectionLevel': 'signature|system',
'label': 'modify network usage accounting'
},
'android.permission.GET_ACCOUNTS': {
'permissionGroup': 'android.permission-group.ACCOUNTS',
'description':
'Allows the app to get the list of accounts known by the phone. This may include any accounts created by applications you have installed.',
'protectionLevel': 'normal',
'label': 'find accounts on the device'
},
'android.permission.CHANGE_NETWORK_STATE': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to change the state of network connectivity.',
'protectionLevel': 'normal',
'label': 'change network connectivity'
},
'android.permission.ACCESS_MTP': {
'permissionGroup': 'android.permission-group.HARDWARE_CONTROLS',
'description':
'Allows access to the kernel MTP driver to implement the MTP USB protocol.',
'protectionLevel': 'signature|system',
'label': 'implement MTP protocol'
},
'android.permission.DISABLE_KEYGUARD': {
'permissionGroup': 'android.permission-group.SCREENLOCK',
'description':
'Allows the app to disable the keylock and any associated password security. For example, the phone disables the keylock when receiving an incoming phone call, then re-enables the keylock when the call is finished.',
'protectionLevel': 'dangerous',
'label': 'disable your screen lock'
},
'android.permission.BIND_PACKAGE_VERIFIER': {
'permissionGroup': '',
'description':
'Allows the holder to make requests of package verifiers. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a package verifier'
},
'com.android.launcher.permission.UNINSTALL_SHORTCUT': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the application to remove Homescreen shortcuts without user intervention.',
'protectionLevel': 'dangerous',
'label': 'uninstall shortcuts'
},
'android.permission.USE_CREDENTIALS': {
'permissionGroup': 'android.permission-group.ACCOUNTS',
'description': 'Allows the app to request authentication tokens.',
'protectionLevel': 'dangerous',
'label': 'use accounts on the device'
},
'android.permission.WRITE_MEDIA_STORAGE': {
'permissionGroup': 'android.permission-group.STORAGE',
'description':
'Allows the app to modify the contents of the internal media storage.',
'protectionLevel': 'signature|system',
'label': 'modify/delete internal media storage contents'
},
'android.permission.ACCESS_COARSE_LOCATION': {
'permissionGroup': 'android.permission-group.LOCATION',
'description':
'Allows the app to get your approximate location. This location is derived by location services using network location sources such as cell towers and Wi-Fi. These location services must be turned on and available to your device for the app to use them. Apps may use this to determine approximately where you are.',
'protectionLevel': 'dangerous',
'label': 'approximate location (network-based)'
},
'android.permission.BIND_KEYGUARD_APPWIDGET': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.BIND_TRUST_AGENT': {
'permissionGroup': '',
'description':
'Allows an application to bind to a trust agent service.',
'protectionLevel': 'signature',
'label': 'Bind to a trust agent service'
},
'android.permission.CONTROL_VPN': {
'permissionGroup': '',
'description':
'Allows the app to control low-level features of Virtual Private Networks.',
'protectionLevel': 'signature|system',
'label': 'control Virtual Private Networks'
},
'android.permission.BLUETOOTH_ADMIN': {
'permissionGroup': 'android.permission-group.BLUETOOTH_NETWORK',
'description':
'Allows the app to configure the local Bluetooth phone, and to discover and pair with remote devices.',
'protectionLevel': 'dangerous',
'label': 'access Bluetooth settings'
},
'android.permission.PERSISTENT_ACTIVITY': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to make parts of itself persistent in memory. This can limit memory available to other apps slowing down the phone.',
'protectionLevel': 'normal',
'label': 'make app always run'
},
'android.permission.TRUST_LISTENER': {
'permissionGroup': '',
'description':
'Allows an application to listen for changes in trust state.',
'protectionLevel': 'signature',
'label': 'Listen to trust state changes.'
},
'android.permission.CARRIER_FILTER_SMS': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.BIND_TEXT_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a text service(e.g. SpellCheckerService). Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a text service'
},
'android.permission.RECEIVE_WAP_PUSH': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to receive and process WAP messages. This permission includes the ability to monitor or delete messages sent to you without showing them to you.',
'protectionLevel': 'dangerous',
'label': 'receive text messages (WAP)'
},
'com.foo.mypermission2': {
'permissionGroup': '',
'description': 'MyActivity',
'protectionLevel': '',
'label': 'MyActivity'
},
'android.permission.SET_WALLPAPER': {
'permissionGroup': 'android.permission-group.WALLPAPER',
'description': 'Allows the app to set the system wallpaper.',
'protectionLevel': 'normal',
'label': 'set wallpaper'
},
'android.permission.PROVIDE_TRUST_AGENT': {
'permissionGroup': '',
'description': 'Allows an application to provide a trust agent.',
'protectionLevel': 'signatureOrSystem',
'label': 'Provide a trust agent.'
},
'android': {
'permissionGroup': '',
'description': '',
'protectionLevel': '',
'label': ''
},
'android.permission.BIND_PRINT_SPOOLER_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a print spooler service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a print spooler service'
},
'android.permission.GET_PACKAGE_SIZE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to retrieve its code, data, and cache sizes',
'protectionLevel': 'normal',
'label': 'measure app storage space'
},
'android.permission.BIND_REMOTE_DISPLAY': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a remote display. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a remote display'
},
'android.permission.READ_EXTERNAL_STORAGE': {
'permissionGroup': 'android.permission-group.STORAGE',
'description': 'Allows the app to read the contents of your SD card.',
'protectionLevel': 'normal',
'label': 'read the contents of your SD card'
},
'android.permission.SET_KEYBOARD_LAYOUT': {
'permissionGroup': '',
'description':
'Allows the app to change the keyboard layout. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'change keyboard layout'
},
'com.android.frameworks.coretests.SIGNATURE': {
'permissionGroup': 'android.permission-group.COST_MONEY',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.INTERNAL_SYSTEM_WINDOW': {
'permissionGroup': '',
'description':
'Allows the app to create windows that are intended to be used by the internal system user interface. Not for use by normal apps.',
'protectionLevel': 'signature',
'label': 'display unauthorized windows'
},
'com.android.cts.intent.sender.permission.SAMPLE': {
'permissionGroup': '',
'description': '',
'protectionLevel': '',
'label': ''
},
'com.android.launcher3.permission.RECEIVE_FIRST_LOAD_BROADCAST': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signatureOrSystem',
'label': ''
},
'android.permission.PERFORM_CDMA_PROVISIONING': {
'permissionGroup': '',
'description':
'Allows the app to start CDMA provisioning. Malicious apps may unnecessarily start CDMA provisioning.',
'protectionLevel': 'signature|system',
'label': 'directly start CDMA phone setup'
},
'com.android.browser.permission.PRELOAD': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signatureOrSystem',
'label': 'Preload results'
},
'android.permission.MODIFY_AUDIO_SETTINGS': {
'permissionGroup': 'android.permission-group.AUDIO_SETTINGS',
'description':
'Allows the app to modify global audio settings such as volume and which speaker is used for output.',
'protectionLevel': 'normal',
'label': 'change your audio settings'
},
'android.permission.CONTROL_WIFI_DISPLAY': {
'permissionGroup': '',
'description':
'Allows the app to control low-level features of Wifi displays.',
'protectionLevel': 'signature',
'label': 'control Wifi displays'
},
'android.permission.SET_ACTIVITY_WATCHER': {
'permissionGroup': '',
'description':
'Allows the app to monitor and control how the system launches activities. Malicious apps may completely compromise the system. This permission is only needed for development, never for normal use.',
'protectionLevel': 'signature',
'label': 'monitor and control all app launching'
},
'com.android.frameworks.coretests.NORMAL': {
'permissionGroup': 'android.permission-group.COST_MONEY',
'description': '',
'protectionLevel': 'normal',
'label': ''
},
'android.permission.BROADCAST_NETWORK_PRIVILEGED': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to send privileged network broadcasts. Never needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'send privileged network broadcasts'
},
'android.permission.COPY_PROTECTED_DATA': {
'permissionGroup': '',
'description': 'copy content',
'protectionLevel': 'signature',
'label': 'copy content'
},
'android.permission.RETRIEVE_WINDOW_TOKEN': {
'permissionGroup': '',
'description':
'Allows an application to retrieve the window token. Malicious apps may perfrom unauthorized interaction with the application window impersonating the system.',
'protectionLevel': 'signature',
'label': 'retrieve window token'
},
'com.android.cts.permissionWithSignature': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.PACKAGE_USAGE_STATS': {
'permissionGroup': '',
'description':
'Allows the app to modify collected component usage statistics. Not for use by normal apps.',
'protectionLevel': 'signature|development|appop',
'label': 'update component usage statistics'
},
'android.permission.RECEIVE_BLUETOOTH_MAP': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to receive and process Bluetooth MAP messages. This means the app could monitor or delete messages sent to your device without showing them to you.',
'protectionLevel': 'signature|system',
'label': 'receive Bluetooth messages (MAP)'
},
'android.permission.BROADCAST_CALLLOG_INFO': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.CONTROL_INCALL_EXPERIENCE': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description': 'Allows the app to provide an in-call user experience.',
'protectionLevel': 'system|signature',
'label': 'provide an in-call user experience'
},
'android.permission.MOUNT_UNMOUNT_FILESYSTEMS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to mount and unmount filesystems for removable storage.',
'protectionLevel': 'system|signature',
'label': 'access SD Card filesystem'
},
'android.permission.BIND_WALLPAPER': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a wallpaper. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'bind to a wallpaper'
},
'android.permission.READ_DREAM_STATE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.NFC_HANDOVER_STATUS': {
'permissionGroup': '',
'description':
'Allows this application to receive information about current Android Beam transfers',
'protectionLevel': 'signature|system',
'label': 'Receive Android Beam transfer status'
},
'android.permission.FORCE_BACK': {
'permissionGroup': '',
'description':
'Allows the app to force any activity that is in the foreground to close and go back. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'force app to close'
},
'android.permission.READ_CALENDAR': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description':
'Allows the app to read all calendar events stored on your phone, including those of friends or co-workers. This may allow the app to share or save your calendar data, regardless of confidentiality or sensitivity.',
'protectionLevel': 'dangerous',
'label': 'read calendar events plus confidential information'
},
'android.permission.DEVICE_POWER': {
'permissionGroup': '',
'description': 'Allows the app to turn the phone on or off.',
'protectionLevel': 'signature',
'label': 'power phone on or off'
},
'android.permission.SHUTDOWN': {
'permissionGroup': '',
'description':
'Puts the activity manager into a shutdown state. Does not perform a complete shutdown.',
'protectionLevel': 'signature|system',
'label': 'partial shutdown'
},
'android.os.cts.permission.TEST_GRANTED': {
'permissionGroup': '',
'description':
'Used for running CTS tests, for testing operations where we have the permission.',
'protectionLevel': 'normal',
'label': 'Test Granted'
},
'android.permission.CHANGE_CONFIGURATION': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to change the current configuration, such as the locale or overall font size.',
'protectionLevel': 'signature|system|development',
'label': 'change system display settings'
},
'android.permission.READ_CONTACTS': {
'permissionGroup': 'android.permission-group.SOCIAL_INFO',
'description':
'Allows the app to read data about your contacts stored on your phone, including the frequency with which you\'ve called, emailed, or communicated in other ways with specific individuals. This permission allows apps to save your contact data, and malicious apps may share contact data without your knowledge.',
'protectionLevel': 'dangerous',
'label': 'read your contacts'
},
'android.permission.BIND_DREAM_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a dream service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a dream service'
},
'android.permission.SEND_DOWNLOAD_COMPLETED_INTENTS': {
'permissionGroup': '',
'description':
'Allows the app to send notifications about completed downloads. Malicious apps can use this to confuse other apps that download files.',
'protectionLevel': 'signature',
'label': 'Send download notifications.'
},
'android.permission.READ_CALL_LOG': {
'permissionGroup': 'android.permission-group.SOCIAL_INFO',
'description':
'Allows the app to read your phone\'s call log, including data about incoming and outgoing calls. This permission allows apps to save your call log data, and malicious apps may share call log data without your knowledge.',
'protectionLevel': 'dangerous',
'label': 'read call log'
},
'android.permission.BLUETOOTH_PRIVILEGED': {
'permissionGroup': 'android.permission-group.BLUETOOTH_NETWORK',
'description':
'Allows the app to pair with remote devices without user interaction.',
'protectionLevel': 'system|signature',
'label': 'allow Bluetooth pairing by Application'
},
'android.permission.WRITE_CALL_LOG': {
'permissionGroup': 'android.permission-group.SOCIAL_INFO',
'description':
'Allows the app to modify your phone\'s call log, including data about incoming and outgoing calls. Malicious apps may use this to erase or modify your call log.',
'protectionLevel': 'dangerous',
'label': 'write call log'
},
'android.permission.CHANGE_WIFI_MULTICAST_STATE': {
'permissionGroup': 'android.permission-group.AFFECTS_BATTERY',
'description':
'Allows the app to receive packets sent to all devices on a Wi-Fi network using multicast addresses, not just your phone. It uses more power than the non-multicast mode.',
'protectionLevel': 'dangerous',
'label': 'allow Wi-Fi Multicast reception'
},
'android.permission.ACCESS_PDB_STATE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.MODIFY_APPWIDGET_BIND_PERMISSIONS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.SET_TIME_ZONE': {
'permissionGroup': 'android.permission-group.SYSTEM_CLOCK',
'description': 'Allows the app to change the phone\'s time zone.',
'protectionLevel': 'normal',
'label': 'set time zone'
},
'android.permission.HDMI_CEC': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signatureOrSystem',
'label': ''
},
'android.permission.WRITE_SYNC_SETTINGS': {
'permissionGroup': 'android.permission-group.SYNC_SETTINGS',
'description':
'Allows an app to modify the sync settings for an account. For example, this can be used to enable sync of the People app with an account.',
'protectionLevel': 'normal',
'label': 'toggle sync on and off'
},
'android.permission.CRYPT_KEEPER': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.READ_LOGS': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to read from the system\'s various log files. This allows it to discover general information about what you are doing with the phone, potentially including personal or private information.',
'protectionLevel': 'signature|system|development',
'label': 'read sensitive log data'
},
'android.permission.WRITE_GSERVICES': {
'permissionGroup': '',
'description':
'Allows the app to modify the Google services map. Not for use by normal apps.',
'protectionLevel': 'signature|system',
'label': 'modify the Google services map'
},
'android.permission.SET_ORIENTATION': {
'permissionGroup': '',
'description':
'Allows the app to change the rotation of the screen at any time. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'change screen orientation'
},
'android.permission.BROADCAST_STICKY': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to send sticky broadcasts, which remain after the broadcast ends. Excessive use may make the phone slow or unstable by causing it to use too much memory.',
'protectionLevel': 'normal',
'label': 'send sticky broadcast'
},
'android.permission.FORCE_STOP_PACKAGES': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': 'Allows the app to forcibly stop other apps.',
'protectionLevel': 'signature|system',
'label': 'force stop other apps'
},
'com.android.frameworks.coretests.permission.TEST_DENIED': {
'permissionGroup': '',
'description':
'Used for running unit tests, for testing operations where we do not have the permission.',
'protectionLevel': 'normal',
'label': 'Test Denied'
},
'com.android.providers.tv.permission.READ_EPG_DATA': {
'permissionGroup': '',
'description':
'Allows the app to read the TV channel/program information stored on your device.',
'protectionLevel': 'dangerous',
'label': 'read TV channel/program information'
},
'android.permission.UPDATE_DEVICE_STATS': {
'permissionGroup': '',
'description':
'Allows the app to modify collected battery statistics. Not for use by normal apps.',
'protectionLevel': 'signature|system',
'label': 'modify battery statistics'
},
'android.permission.ACCESS_LOCATION_EXTRA_COMMANDS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to access extra location provider commands. This may allow the app to interfere with the operation of the GPS or other location sources.',
'protectionLevel': 'normal',
'label': 'access extra location provider commands'
},
'android.permission.GET_TASKS': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to retrieve information about currently and recently running tasks. This may allow the app to discover information about which applications are used on the device.',
'protectionLevel': 'normal',
'label': 'retrieve running apps'
},
'android.permission.CHANGE_WIFI_STATE': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to connect to and disconnect from Wi-Fi access points and to make changes to device configuration for Wi-Fi networks.',
'protectionLevel': 'dangerous',
'label': 'connect and disconnect from Wi-Fi'
},
'android.permission.RECEIVE_SMS': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to receive and process SMS messages. This means the app could monitor or delete messages sent to your device without showing them to you.',
'protectionLevel': 'dangerous',
'label': 'receive text messages (SMS)'
},
'android.permission.READ_PROFILE': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description':
'Allows the app to read personal profile information stored on your device, such as your name and contact information. This means the app can identify you and may send your profile information to others.',
'protectionLevel': 'dangerous',
'label': 'read your own contact card'
},
'android.permission.ACCESS_NETWORK_CONDITIONS': {
'permissionGroup': '',
'description':
'Allows an application to listen for observations on network conditions. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'listen for observations on network conditions'
},
'android.permission.ACCOUNT_MANAGER': {
'permissionGroup': 'android.permission-group.ACCOUNTS',
'description': 'Allows the app to make calls to AccountAuthenticators.',
'protectionLevel': 'signature',
'label': 'act as the AccountManagerService'
},
'android.permission.SET_ANIMATION_SCALE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to change the global animation speed (faster or slower animations) at any time.',
'protectionLevel': 'signature|system|development',
'label': 'modify global animation speed'
},
'com.android.certinstaller.INSTALL_AS_USER': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.BLUETOOTH_STACK': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.SET_PROCESS_LIMIT': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to control the maximum number of processes that will run. Never needed for normal apps.',
'protectionLevel': 'signature|system|development',
'label': 'limit number of running processes'
},
'android.permission.MOVE_PACKAGE': {
'permissionGroup': '',
'description':
'Allows the app to move app resources from internal to external media and vice versa.',
'protectionLevel': 'signature|system',
'label': 'move app resources'
},
'com.android.cts.permissionAllowedWithSignature': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.SET_DEBUG_APP': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to turn on debugging for another app. Malicious apps may use this to kill other apps.',
'protectionLevel': 'signature|system|development',
'label': 'enable app debugging'
},
'android.permission.BRICK': {
'permissionGroup': '',
'description':
'Allows the app to disable the entire phone permanently. This is very dangerous.',
'protectionLevel': 'signature',
'label': 'permanently disable phone'
},
'android.permission.BLUETOOTH': {
'permissionGroup': 'android.permission-group.BLUETOOTH_NETWORK',
'description':
'Allows the app to view the configuration of the Bluetooth on the phone, and to make and accept connections with paired devices.',
'protectionLevel': 'dangerous',
'label': 'pair with Bluetooth devices'
},
'com.android.launcher3.permission.RECEIVE_LAUNCH_BROADCASTS': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.MMS_SEND_OUTBOX_MSG': {
'permissionGroup': '',
'description': 'Sends out all MMSs from the outbox to the network',
'protectionLevel': 'signatureOrSystem',
'label': 'MMS Wakeup'
},
'android.permission.UPDATE_APP_OPS_STATS': {
'permissionGroup': '',
'description':
'Allows the app to modify collected application operation statistics. Not for use by normal apps.',
'protectionLevel': 'signature|system',
'label': 'modify app ops statistics'
},
'android.permission.READ_PRIVILEGED_PHONE_STATE': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.SET_WALLPAPER_COMPONENT': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'com.android.launcher3.permission.READ_SETTINGS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to read the settings and shortcuts in Home.',
'protectionLevel': 'normal',
'label': 'read Home settings and shortcuts'
},
'android.permission.WRITE_DREAM_STATE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.ACCESS_BLUETOOTH_SHARE': {
'permissionGroup': '',
'description':
'Allows the app to access the BluetoothShare manager and use it to transfer files.',
'protectionLevel': 'signature',
'label': 'Access download manager.'
},
'android.permission.READ_WIFI_CREDENTIAL': {
'permissionGroup': 'android.permission-group.NETWORK',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.intent.category.MASTER_CLEAR.permission.C2D_MESSAGE': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.UPDATE_LOCK': {
'permissionGroup': '',
'description':
'Allows the holder to offer information to the system about when would be a good time for a noninteractive reboot to upgrade the device.',
'protectionLevel': 'signatureOrSystem',
'label': 'discourage automatic device updates'
},
'com.android.cts.keysets_permdef.keysets_perm': {
'permissionGroup': '',
'description': 'keysets_perm_description',
'protectionLevel': 'signature',
'label': 'keysets_perm_label'
},
'android.permission.WRITE_USER_DICTIONARY': {
'permissionGroup': 'android.permission-group.WRITE_USER_DICTIONARY',
'description':
'Allows the app to write new words into the user dictionary.',
'protectionLevel': 'normal',
'label': 'add words to user-defined dictionary'
},
'com.android.browser.permission.READ_HISTORY_BOOKMARKS': {
'permissionGroup': 'android.permission-group.BOOKMARKS',
'description':
'Allows the app to read the history of all URLs that the Browser has visited, and all of the Browser\'s bookmarks. Note: this permission may not be enforced by third-party browsers or other applications with web browsing capabilities.',
'protectionLevel': 'dangerous',
'label': 'read your Web bookmarks and history'
},
'android.permission.RECORD_AUDIO': {
'permissionGroup': 'android.permission-group.MICROPHONE',
'description':
'Allows the app to record audio with the microphone. This permission allows the app to record audio at any time without your confirmation.',
'protectionLevel': 'dangerous',
'label': 'record audio'
},
'android.permission.WRITE_CONTACTS': {
'permissionGroup': 'android.permission-group.SOCIAL_INFO',
'description':
'Allows the app to modify the data about your contacts stored on your phone, including the frequency with which you\'ve called, emailed, or communicated in other ways with specific contacts. This permission allows apps to delete contact data.',
'protectionLevel': 'dangerous',
'label': 'modify your contacts'
},
'android.permission.REGISTER_CALL_PROVIDER': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description': 'Allows the app to register new telecom connections.',
'protectionLevel': 'system|signature',
'label': 'register new telecom connections'
},
'android.permission.ACCESS_KEYGUARD_SECURE_STORAGE': {
'permissionGroup': '',
'description':
'Allows an application to access keguard secure storage.',
'protectionLevel': 'signature',
'label': 'Access keyguard secure storage'
},
'android.permission.BIND_CONDITION_PROVIDER_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a condition provider service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to a condition provider service'
},
'android.permission.SIGNAL_PERSISTENT_PROCESSES': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to request that the supplied signal be sent to all persistent processes.',
'protectionLevel': 'signature|system|development',
'label': 'send Linux signals to apps'
},
'android.permission.MANAGE_VOICE_KEYPHRASES': {
'permissionGroup': '',
'description':
'Allows the holder to manage the keyphrases for voice hotword detection. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'manage voice keyphrases'
},
'android.permission.MASTER_CLEAR': {
'permissionGroup': '',
'description':
'Allows the app to completely reset the system to its factory settings, erasing all data, configuration, and installed apps.',
'protectionLevel': 'signature|system',
'label': 'reset system to factory defaults'
},
'android.permission.READ_INPUT_STATE': {
'permissionGroup': '',
'description':
'Allows the app to watch the keys you press even when interacting with another app (such as typing a password). Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'record what you type and actions you take'
},
'android.permission.INJECT_EVENTS': {
'permissionGroup': '',
'description':
'Allows the app to deliver its own input events (key presses, etc.) to other apps. Malicious apps may use this to take over the phone.',
'protectionLevel': 'signature',
'label': 'press keys and control buttons'
},
'com.android.email.permission.ACCESS_PROVIDER': {
'permissionGroup': '',
'description':
'Allows the app to access your email database, including received messages, sent messages, usernames, and passwords.',
'protectionLevel': 'signature',
'label': 'Access email provider data'
},
'org.chromium.content_shell.permission.SANDBOX': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.ACCESS_WIMAX_STATE': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to determine whether WiMAX is enabled and information about any WiMAX networks that are connected.',
'protectionLevel': 'normal',
'label': 'connect and disconnect from WiMAX'
},
'com.android.launcher.permission.WRITE_SETTINGS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to change the settings and shortcuts in Home.',
'protectionLevel': 'signatureOrSystem',
'label': 'write Home settings and shortcuts'
},
'android.permission.FREEZE_SCREEN': {
'permissionGroup': '',
'description':
'Allows the application to temporarily freeze the screen for a full-screen transition.',
'protectionLevel': 'signature',
'label': 'freeze screen'
},
'android.permission.GET_TOP_ACTIVITY_INFO': {
'permissionGroup': '',
'description':
'Allows the holder to retrieve private information about the current application in the foreground of the screen.',
'protectionLevel': 'signature',
'label': 'get current app info'
},
'android.permission.WRITE_APN_SETTINGS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to change network settings and to intercept and inspect all network traffic, for example to change the proxy and port of any APN. Malicious apps may monitor, redirect, or modify network packets without your knowledge.',
'protectionLevel': 'signature|system',
'label': 'change/intercept network settings and traffic'
},
'android.permission.ACCESS_SURFACE_FLINGER': {
'permissionGroup': '',
'description':
'Allows the app to use SurfaceFlinger low-level features.',
'protectionLevel': 'signature',
'label': 'access SurfaceFlinger'
},
'android.permission.USER_ACTIVITY': {
'permissionGroup': '',
'description': 'Allows the app to reset the display timeout.',
'protectionLevel': 'signature|system',
'label': 'reset display timeout'
},
'android.permission.SERIAL_PORT': {
'permissionGroup': '',
'description':
'Allows the holder to access serial ports using the SerialManager API.',
'protectionLevel': 'signature|system',
'label': 'access serial ports'
},
'android.permission.ALLOW_ANY_CODEC_FOR_PLAYBACK': {
'permissionGroup': '',
'description':
'Allows the app to use any installed media decoder to decode for playback.',
'protectionLevel': 'signature|system',
'label': 'use any media decoder for playback'
},
'android.permission.MANAGE_USB': {
'permissionGroup': 'android.permission-group.HARDWARE_CONTROLS',
'description':
'Allows the app to manage preferences and permissions for USB devices.',
'protectionLevel': 'signature|system',
'label': 'manage preferences and permissions for USB devices'
},
'android.permission.PROCESS_OUTGOING_CALLS': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description':
'Allows the app to see the number being dialed during an outgoing call with the option to redirect the call to a different number or abort the call altogether.',
'protectionLevel': 'dangerous',
'label': 'reroute outgoing calls'
},
'android.permission.CALL_PRIVILEGED': {
'permissionGroup': '',
'description':
'Allows the app to call any phone number, including emergency numbers, without your intervention. Malicious apps may place unnecessary and illegal calls to emergency services.',
'protectionLevel': 'signature|system',
'label': 'directly call any phone numbers'
},
'com.android.gallery3d.filtershow.permission.WRITE': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.WRITE_CALENDAR': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description':
'Allows the app to add, remove, change events that you can modify on your phone, including those of friends or co-workers. This may allow the app to send messages that appear to come from calendar owners, or modify events without the owners\' knowledge.',
'protectionLevel': 'dangerous',
'label':
'add or modify calendar events and send email to guests without owners\' knowledge'
},
'android.permission.ACCESS_CONTENT_PROVIDERS_EXTERNALLY': {
'permissionGroup': '',
'description':
'Allows the holder to access content providers from the shell. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'access content providers externally'
},
'android.permission.SUBSCRIBED_FEEDS_READ': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to get details about the currently synced feeds.',
'protectionLevel': 'normal',
'label': 'read subscribed feeds'
},
'android.permission.MANAGE_ACCOUNTS': {
'permissionGroup': 'android.permission-group.ACCOUNTS',
'description':
'Allows the app to perform operations like adding and removing accounts, and deleting their password.',
'protectionLevel': 'dangerous',
'label': 'add or remove accounts'
},
'android.permission.BIND_INCALL_SERVICE': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description':
'Allows the app to control when and how the user sees the in-call screen.',
'protectionLevel': 'system|signature',
'label': 'interact with in-call screen'
},
'android.permission.BIND_REMOTEVIEWS': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of a widget service. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'bind to a widget service'
},
'org.chromium.chrome.shell.permission.DEBUG': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.BIND_ACCESSIBILITY_SERVICE': {
'permissionGroup': '',
'description':
'Allows the holder to bind to the top-level interface of an accessibility service. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'bind to an accessibility service'
},
'android.permission.ACCESS_NOTIFICATIONS': {
'permissionGroup': '',
'description':
'Allows the app to retrieve, examine, and clear notifications, including those posted by other apps.',
'protectionLevel': 'signature|system',
'label': 'access notifications'
},
'android.permission.WRITE_SMS': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to write to SMS messages stored on your phone or SIM card. Malicious apps may delete your messages.',
'protectionLevel': 'dangerous',
'label': 'edit your text messages (SMS or MMS)'
},
'android.permission.DELETE_PACKAGES': {
'permissionGroup': '',
'description':
'Allows the app to delete Android packages. Malicious apps may use this to delete important apps.',
'protectionLevel': 'signature|system',
'label': 'delete apps'
},
'android.permission.FILTER_EVENTS': {
'permissionGroup': '',
'description':
'Allows an application to register an input filter which filters the stream of all user events before they are dispatched. Malicious app may control the system UI whtout user intervention.',
'protectionLevel': 'signature',
'label': 'filter events'
},
'android.permission.ACCESS_CHECKIN_PROPERTIES': {
'permissionGroup': '',
'description':
'Allows the app read/write access to properties uploaded by the checkin service. Not for use by normal apps.',
'protectionLevel': 'signature|system',
'label': 'access checkin properties'
},
'android.permission.SEND_RESPOND_VIA_MESSAGE': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to send requests to other messaging apps to handle respond-via-message events for incoming calls.',
'protectionLevel': 'signature|system',
'label': 'send respond-via-message events'
},
'com.android.voicemail.permission.WRITE_VOICEMAIL': {
'permissionGroup': 'android.permission-group.VOICEMAIL',
'description':
'Allows the app to modify and remove messages from your voicemail inbox.',
'protectionLevel': 'system|signature',
'label': 'write voicemails'
},
'android.permission.RETRIEVE_WINDOW_CONTENT': {
'permissionGroup': 'android.permission-group.PERSONAL_INFO',
'description':
'Allows the app to retrieve the content of the active window. Malicious apps may retrieve the entire window content and examine all its text except passwords.',
'protectionLevel': 'signature|system',
'label': 'retrieve screen content'
},
'android.permission.SET_PREFERRED_APPLICATIONS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to modify your preferred apps. Malicious apps may silently change the apps that are run, spoofing your existing apps to collect private data from you.',
'protectionLevel': 'signature',
'label': 'set preferred apps'
},
'android.permission.VIBRATE': {
'permissionGroup': 'android.permission-group.AFFECTS_BATTERY',
'description': 'Allows the app to control the vibrator.',
'protectionLevel': 'normal',
'label': 'control vibration'
},
'android.app.cts.permission.TEST_GRANTED': {
'permissionGroup': '',
'description':
'Used for running CTS tests, for testing operations where we have the permission.',
'protectionLevel': 'normal',
'label': 'Test Granted'
},
'android.permission.ACCESS_FM_RADIO': {
'permissionGroup': 'android.permission-group.HARDWARE_CONTROLS',
'description':
'Allows the app to access FM radio to listen to programs.',
'protectionLevel': 'signature|system',
'label': 'access FM radio'
},
'android.permission.DIAGNOSTIC': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to read and write to any resource owned by the diag group; for example, files in /dev. This could potentially affect system stability and security. This should be ONLY be used for hardware-specific diagnostics by the manufacturer or operator.',
'protectionLevel': 'signature',
'label': 'read/write to resources owned by diag'
},
'android.permission.BIND_JOB_SERVICE': {
'permissionGroup': '',
'description':
'This permission allows the Android system to run the application in the background when requested.',
'protectionLevel': 'signature',
'label': 'run the application\'s scheduled background work'
},
'org.chromium.chrome.shell.permission.SANDBOX': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.INTERACT_ACROSS_USERS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to perform actions across different users on the device. Malicious apps may use this to violate the protection between users.',
'protectionLevel': 'signature|system|development',
'label': 'interact across users'
},
'android.permission.REGISTER_SIM_SUBSCRIPTION': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description':
'Allows the app to register new telecom SIM connections.',
'protectionLevel': 'system|signature',
'label': 'register new telecom SIM connections'
},
'android.permission.BROADCAST_SMS': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to broadcast a notification that an SMS message has been received. Malicious apps may use this to forge incoming SMS messages.',
'protectionLevel': 'signature',
'label': 'send SMS-received broadcast'
},
'android.permission.BLUETOOTH_MAP': {
'permissionGroup': 'android.permission-group.BLUETOOTH_NETWORK',
'description': 'Allows the app to access Bluetooth MAP data.',
'protectionLevel': 'signature',
'label': 'access Bluetooth MAP data'
},
'android.permission.READ_FRAME_BUFFER': {
'permissionGroup': '',
'description':
'Allows the app to read the content of the frame buffer.',
'protectionLevel': 'signature|system',
'label': 'read frame buffer'
},
'android.permission.STOP_APP_SWITCHES': {
'permissionGroup': '',
'description': 'Prevents the user from switching to another app.',
'protectionLevel': 'signature|system',
'label': 'prevent app switches'
},
'android.permission.ACCESS_WIFI_STATE': {
'permissionGroup': 'android.permission-group.NETWORK',
'description':
'Allows the app to view information about Wi-Fi networking, such as whether Wi-Fi is enabled and name of connected Wi-Fi devices.',
'protectionLevel': 'normal',
'label': 'view Wi-Fi connections'
},
'android.permission.GLOBAL_SEARCH_CONTROL': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.ACCESS_DOWNLOAD_MANAGER': {
'permissionGroup': '',
'description':
'Allows the app to access the download manager and to use it to download files. Malicious apps can use this to disrupt downloads and access private information.',
'protectionLevel': 'signatureOrSystem',
'label': 'Access download manager.'
},
'com.android.launcher3.permission.WRITE_SETTINGS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to change the settings and shortcuts in Home.',
'protectionLevel': 'signatureOrSystem',
'label': 'write Home settings and shortcuts'
},
'android.permission.MODIFY_PARENTAL_CONTROLS': {
'permissionGroup': '',
'description':
'Allows the holder to modify the system\'s parental controls data. Should never be needed for normal apps.',
'protectionLevel': 'signature|system',
'label': 'modify parental controls'
},
'org.chromium.chromecast.shell.permission.SANDBOX': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.READ_SYNC_SETTINGS': {
'permissionGroup': 'android.permission-group.SYNC_SETTINGS',
'description':
'Allows the app to read the sync settings for an account. For example, this can determine whether the People app is synced with an account.',
'protectionLevel': 'normal',
'label': 'read sync settings'
},
'android.permission.OEM_UNLOCK_STATE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.SUBSCRIBED_FEEDS_WRITE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to modify your currently synced feeds. Malicious apps may change your synced feeds.',
'protectionLevel': 'dangerous',
'label': 'write subscribed feeds'
},
'android.permission.READ_USER_DICTIONARY': {
'permissionGroup': 'android.permission-group.USER_DICTIONARY',
'description':
'Allows the app to read all words, names and phrases that the user may have stored in the user dictionary.',
'protectionLevel': 'dangerous',
'label': 'read terms you added to the dictionary'
},
'android.permission.READ_INSTALL_SESSIONS': {
'permissionGroup': '',
'description':
'Allows an application to read install sessions. This allows it to see details about active package installations.',
'protectionLevel': '',
'label': 'Read install sessions'
},
'android.permission.MANAGE_USERS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows apps to manage users on the device, including query, creation and deletion.',
'protectionLevel': 'signature|system',
'label': 'manage users'
},
'android.permission.FACTORY_TEST': {
'permissionGroup': '',
'description':
'Run as a low-level manufacturer test, allowing complete access to the phone hardware. Only available when a phone is running in manufacturer test mode.',
'protectionLevel': 'signature',
'label': 'run in factory test mode'
},
'android.permission.CHANGE_COMPONENT_ENABLED_STATE': {
'permissionGroup': '',
'description':
'Allows the app to change whether a component of another app is enabled or not. Malicious apps may use this to disable important phone capabilities. Care must be used with this permission, as it is possible to get app components into an unusable, inconsistent, or unstable state.',
'protectionLevel': 'signature|system',
'label': 'enable or disable app components'
},
'android.permission.RECEIVE_BOOT_COMPLETED': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to have itself started as soon as the system has finished booting. This can make it take longer to start the phone and allow the app to slow down the overall phone by always running.',
'protectionLevel': 'normal',
'label': 'run at startup'
},
'android.permission.SET_POINTER_SPEED': {
'permissionGroup': '',
'description':
'Allows the app to change the mouse or trackpad pointer speed at any time. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'change pointer speed'
},
'android.permission.BACKUP': {
'permissionGroup': '',
'description':
'Allows the app to control the system\'s backup and restore mechanism. Not for use by normal apps.',
'protectionLevel': 'signature|system',
'label': 'control system backup and restore'
},
'android.permission.TEMPORARY_ENABLE_ACCESSIBILITY': {
'permissionGroup': '',
'description':
'Allows an application to temporarily enable accessibility on the device. Malicious apps may enable accessibility without user consent.',
'protectionLevel': 'signature',
'label': 'temporary enable accessibility'
},
'android.permission.EXPAND_STATUS_BAR': {
'permissionGroup': 'android.permission-group.STATUS_BAR',
'description': 'Allows the app to expand or collapse the status bar.',
'protectionLevel': 'normal',
'label': 'expand/collapse status bar'
},
'android.permission.ACCESS_FINE_LOCATION': {
'permissionGroup': 'android.permission-group.LOCATION',
'description':
'Allows the app to get your precise location using the Global Positioning System (GPS) or network location sources such as cell towers and Wi-Fi. These location services must be turned on and available to your device for the app to use them. Apps may use this to determine where you are, and may consume additional battery power.',
'protectionLevel': 'dangerous',
'label': 'precise location (GPS and network-based)'
},
'android.permission.ASEC_RENAME': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': 'Allows the app to rename internal storage.',
'protectionLevel': 'signature',
'label': 'rename internal storage'
},
'android.permission.LOCATION_HARDWARE': {
'permissionGroup': 'android.permission-group.LOCATION',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'com.android.frameworks.coretests.keysets_permdef.keyset_perm': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.GET_APP_OPS_STATS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to retrieve collected application operation statistics. Not for use by normal apps.',
'protectionLevel': 'signature|system|development',
'label': 'retrieve app ops statistics'
},
'android.permission.REORDER_TASKS': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to move tasks to the foreground and background. The app may do this without your input.',
'protectionLevel': 'normal',
'label': 'reorder running apps'
},
'com.android.cts.permissionNormal': {
'permissionGroup': '',
'description': '',
'protectionLevel': '',
'label': ''
},
'android.permission.CONTROL_KEYGUARD': {
'permissionGroup': '',
'description': 'Allows an application to control keguard.',
'protectionLevel': 'signature',
'label': 'Control displaying and hiding keyguard'
},
'android.permission.ASEC_DESTROY': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': 'Allows the app to destroy internal storage.',
'protectionLevel': 'signature',
'label': 'destroy internal storage'
},
'android.permission.BROADCAST_PACKAGE_REMOVED': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to broadcast a notification that an app package has been removed. Malicious apps may use this to kill any other running app.',
'protectionLevel': 'signature',
'label': 'send package removed broadcast'
},
'android.permission.MANAGE_ACTIVITY_STACKS': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to add, remove, and modify the activity stacks in which other apps run. Malicious apps may disrupt the behavior of other apps.',
'protectionLevel': 'signature|system',
'label': 'manage activity stacks'
},
'android.permission.CONNECTIVITY_INTERNAL': {
'permissionGroup': 'android.permission-group.NETWORK',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.SET_SCREEN_COMPATIBILITY': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to control the screen compatibility mode of other applications. Malicious applications may break the behavior of other applications.',
'protectionLevel': 'signature',
'label': 'set screen compatibility'
},
'android.permission.WRITE_EXTERNAL_STORAGE': {
'permissionGroup': 'android.permission-group.STORAGE',
'description': 'Allows the app to write to the SD card.',
'protectionLevel': 'dangerous',
'label': 'modify or delete the contents of your SD card'
},
'android.permission.CAMERA_DISABLE_TRANSMIT_LED': {
'permissionGroup': 'android.permission-group.CAMERA',
'description':
'Allows a pre-installed system application to disable the camera use indicator LED.',
'protectionLevel': 'signature|system',
'label': 'disable transmit indicator LED when camera is in use'
},
'com.android.frameworks.coretests.DANGEROUS': {
'permissionGroup': 'android.permission-group.COST_MONEY',
'description': '',
'protectionLevel': 'dangerous',
'label': ''
},
'android.permission.WRITE_SOCIAL_STREAM': {
'permissionGroup': 'android.permission-group.SOCIAL_INFO',
'description':
'Allows the app to display social updates from your friends. Be careful when sharing information -- this allows the app to produce messages that may appear to come from a friend. Note: this permission may not be enforced on all social networks.',
'protectionLevel': 'dangerous',
'label': 'write to your social stream'
},
'com.android.printspooler.permission.ACCESS_ALL_PRINT_JOBS': {
'permissionGroup': '',
'description':
'Allows the holder to access print jobs created by another app. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'access all print jobs'
},
'android.permission.ASEC_MOUNT_UNMOUNT': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': 'Allows the app to mount/unmount internal storage.',
'protectionLevel': 'signature',
'label': 'mount/unmount internal storage'
},
'android.permission.INSTALL_PACKAGES': {
'permissionGroup': '',
'description':
'Allows the app to install new or updated Android packages. Malicious apps may use this to add new apps with arbitrarily powerful permissions.',
'protectionLevel': 'signature|system',
'label': 'directly install apps'
},
'com.android.providers.tv.permission.WRITE_EPG_DATA': {
'permissionGroup': '',
'description':
'Allows the app to provide and modify the TV channel/program information on your device.',
'protectionLevel': 'dangerous',
'label': 'write TV channel/program information'
},
'android.permission.RECEIVE_EMERGENCY_BROADCAST': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to receive and process emergency broadcast messages. This permission is only available to system apps.',
'protectionLevel': 'signature|system',
'label': 'receive emergency broadcasts'
},
'com.android.launcher.permission.READ_SETTINGS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to read the settings and shortcuts in Home.',
'protectionLevel': 'normal',
'label': 'read Home settings and shortcuts'
},
'com.android.alarm.permission.SET_ALARM': {
'permissionGroup': 'android.permission-group.DEVICE_ALARMS',
'description':
'Allows the app to set an alarm in an installed alarm clock app. Some alarm clock apps may not implement this feature.',
'protectionLevel': 'normal',
'label': 'set an alarm'
},
'android.permission.CAPTURE_VIDEO_OUTPUT': {
'permissionGroup': '',
'description': 'Allows the app to capture and redirect video output.',
'protectionLevel': 'signature|system',
'label': 'capture video output'
},
'org.chromium.chrome.shell.permission.C2D_MESSAGE': {
'permissionGroup': '',
'description': '',
'protectionLevel': 'signature',
'label': ''
},
'android.permission.READ_PRECISE_PHONE_STATE': {
'permissionGroup': 'android.permission-group.PHONE_CALLS',
'description':
'Allows the app to access the precise phone states. This permission allows the app to determine the real call status, whether a call is active or in the background, call fails, precise data connection status and data connection fails.',
'protectionLevel': 'signature|system',
'label': 'read precise phone states'
},
'android.permission.READ_SMS': {
'permissionGroup': 'android.permission-group.MESSAGES',
'description':
'Allows the app to read SMS messages stored on your phone or SIM card. This allows the app to read all SMS messages, regardless of content or confidentiality.',
'protectionLevel': 'dangerous',
'label': 'read your text messages (SMS or MMS)'
},
'android.permission.ACCESS_INPUT_FLINGER': {
'permissionGroup': '',
'description': 'Allows the app to use InputFlinger low-level features.',
'protectionLevel': 'signature',
'label': 'access InputFlinger'
},
'android.permission.BATTERY_STATS': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows an application to read the current low-level battery use data. May allow the application to find out detailed information about which apps you use.',
'protectionLevel': 'signature|system|development',
'label': 'read battery statistics'
},
'android.permission.GLOBAL_SEARCH': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description': '',
'protectionLevel': 'signature|system',
'label': ''
},
'android.permission.SET_INPUT_CALIBRATION': {
'permissionGroup': '',
'description':
'Allows the app to modify the calibration parameters of the touch screen. Should never be needed for normal apps.',
'protectionLevel': 'signature',
'label': 'change input device calibration'
},
'android.permission.REMOVE_TASKS': {
'permissionGroup': 'android.permission-group.APP_INFO',
'description':
'Allows the app to remove tasks and kill their apps. Malicious apps may disrupt the behavior of other apps.',
'protectionLevel': 'signature',
'label': 'stop running apps'
},
'android.permission.SET_ALWAYS_FINISH': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to control whether activities are always finished as soon as they go to the background. Never needed for normal apps.',
'protectionLevel': 'signature|system|development',
'label': 'force background apps to close'
},
'android.permission.ACCESS_ALL_EXTERNAL_STORAGE': {
'permissionGroup': 'android.permission-group.DEVELOPMENT_TOOLS',
'description':
'Allows the app to access external storage for all users.',
'protectionLevel': 'signature',
'label': 'access external storage of all users'
},
'android.permission.CLEAR_APP_CACHE': {
'permissionGroup': 'android.permission-group.SYSTEM_TOOLS',
'description':
'Allows the app to free phone storage by deleting files in the cache directories of other applications. This may cause other applications to start up more slowly as they need to re-retrieve their data.',
'protectionLevel': 'dangerous',
'label': 'delete all app cache data'
},
'android.permission.MANAGE_NETWORK_POLICY': {
'permissionGroup': '',
'description':
'Allows the app to manage network policies and define app-specific rules.',
'protectionLevel': 'signature',
'label': 'manage network policy'
},
}
AOSP_PERMISSION_GROUPS = {
'android.permission-group.AFFECTS_BATTERY': {
'description': 'Use features that can quickly drain battery.',
'label': 'Affects Battery'
},
'android.permission-group.PERSONAL_INFO': {
'description':
'Direct access to information about you, stored in on your contact card.',
'label': 'Your personal information'
},
'android.permission-group.WRITE_USER_DICTIONARY': {
'description': 'Add words to the user dictionary.',
'label': 'Write User Dictionary'
},
'android.permission-group.BLUETOOTH_NETWORK': {
'description': 'Access devices and networks through Bluetooth.',
'label': 'Bluetooth'
},
'android.permission-group.DISPLAY': {
'description': 'Effect the UI of other applications.',
'label': 'Other Application UI'
},
'android.permission-group.SCREENLOCK': {
'description':
'Ability to affect behavior of the lock screen on your device.',
'label': 'Lock screen'
},
'android.permission-group.STORAGE':
{'description': 'Access the SD card.',
'label': 'Storage'},
'android.permission-group.APP_INFO': {
'description':
'Ability to affect behavior of other applications on your device.',
'label': 'Your applications information'
},
'android.permission-group.SYNC_SETTINGS':
{'description': 'Access to the sync settings.',
'label': 'Sync Settings'},
'android.permission-group.AUDIO_SETTINGS':
{'description': 'Change audio settings.',
'label': 'Audio Settings'},
'android.permission-group.WALLPAPER': {
'description': 'Change the device wallpaper settings.',
'label': 'Wallpaper'
},
'android.permission-group.CALENDAR': {
'description': 'Direct access to calendar and events.',
'label': 'Calendar'
},
'android.permission-group.DEVICE_ALARMS':
{'description': 'Set the alarm clock.',
'label': 'Alarm'},
'android.permission-group.DEVELOPMENT_TOOLS': {
'description': 'Features only needed for app developers.',
'label': 'Development tools'
},
'android.permission-group.USER_DICTIONARY': {
'description': 'Read words in user dictionary.',
'label': 'Read User Dictionary'
},
'android.permission-group.VOICEMAIL':
{'description': 'Direct access to voicemail.',
'label': 'Voicemail'},
'android.permission-group.LOCATION': {
'description': 'Monitor your physical location.',
'label': 'Your location'
},
'android.permission-group.STATUS_BAR': {
'description': 'Change the device status bar settings.',
'label': 'Status Bar'
},
'android.permission-group.SYSTEM_TOOLS': {
'description': 'Lower-level access and control of the system.',
'label': 'System tools'
},
'android.permission-group.SYSTEM_CLOCK':
{'description': 'Change the device time or timezone.',
'label': 'Clock'},
'android.permission-group.SOCIAL_INFO': {
'description':
'Direct access to information about your contacts and social connections.',
'label': 'Your social information'
},
'android.permission-group.NETWORK': {
'description': 'Access various network features.',
'label': 'Network communication'
},
'android.permission-group.MESSAGES': {
'description': 'Read and write your SMS, email, and other messages.',
'label': 'Your messages'
},
'android.permission-group.MICROPHONE': {
'description': 'Direct access to the microphone to record audio.',
'label': 'Microphone'
},
'android.permission-group.ACCESSIBILITY_FEATURES': {
'description': 'Features that assistive technology can request.',
'label': 'Accessibility features'
},
'android.permission-group.ACCOUNTS': {
'description': 'Access the available accounts.',
'label': 'Your accounts'
},
'android.permission-group.COST_MONEY': {'description': '',
'label': ''},
'android.permission-group.CAMERA': {
'description': 'Direct access to camera for image or video capture.',
'label': 'Camera'
},
'android.permission-group.HARDWARE_CONTROLS': {
'description': 'Direct access to hardware on the handset.',
'label': 'Hardware controls'
},
'android.permission-group.PHONE_CALLS': {
'description': 'Monitor, record, and process phone calls.',
'label': 'Phone calls'
},
'android.permission-group.BOOKMARKS': {
'description': 'Direct access to bookmarks and browser history.',
'label': 'Bookmarks and History'
},
}
#################################################
| apache-2.0 |
kk47/C-Cpp | deppends/python/requests/packages/charade/langthaimodel.py | 206 | 11475 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| lgpl-3.0 |
jhutar/spacewalk | backend/server/rhnSQL/sql_lib.py | 10 | 2020 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# A collection of classes and functions for handy data manipulation
# This file includes common classes and functions that are used by
# misc parts of the RHN backend
#
# Before changing any of this stuff, please grep through the sources to
# check how the function/class you are about to modify is used first.
# Or ask gafton.
#
import string
def build_sql_insert(table, hash_name, items):
""" This statement builds a sql statement for an insert
of 'items' into "table" indexed by "hash_name"
"""
sql = "insert into %s ( %s, %s ) values ( :p0, %s )" % (
table, hash_name,
string.join([a[0] for a in items], ", "),
string.join([":p_%s" % a[0] for a in items], ", "))
pdict = {"p0": None} # This must be reset after we return from this call
list(map(pdict.update, [{"p_%s" % a[0]: a[1]} for a in items]))
return sql, pdict
def build_sql_update(table, hash_name, items):
""" This statement builds a sql statement for an update
of 'items' into "table" indexed by "hash_name"
"""
sql = "update %s set %s where %s = :p0" % (
table,
string.join(["%s = :p_%s" % (a, a) for a in [a[0] for a in items]],
", "),
hash_name)
pdict = {"p0": None} # This must be reset after we return from this call
list(map(pdict.update, [{"p_%s" % a[0]: a[1]} for a in items]))
return sql, pdict
| gpl-2.0 |
reedloden/ansible | test/units/template/test_safe_eval.py | 205 | 1956 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from collections import defaultdict
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.template.safe_eval import safe_eval
class TestSafeEval(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_safe_eval_usage(self):
# test safe eval calls with different possible types for the
# locals dictionary, to ensure we don't run into problems like
# ansible/ansible/issues/12206 again
for locals_vars in (dict(), defaultdict(dict)):
self.assertEqual(safe_eval('True', locals=locals_vars), True)
self.assertEqual(safe_eval('False', locals=locals_vars), False)
self.assertEqual(safe_eval('0', locals=locals_vars), 0)
self.assertEqual(safe_eval('[]', locals=locals_vars), [])
self.assertEqual(safe_eval('{}', locals=locals_vars), {})
@unittest.skipUnless(sys.version_info[:2] >= (2, 7), "Python 2.6 has no set literals")
def test_set_literals(self):
self.assertEqual(safe_eval('{0}'), set([0]))
| gpl-3.0 |
woggle/mesos-old | third_party/boto-2.0b2/boto/rds/regioninfo.py | 44 | 1466 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
class RDSRegionInfo(RegionInfo):
def __init__(self, connection=None, name=None, endpoint=None):
from boto.rds import RDSConnection
RegionInfo.__init__(self, connection, name, endpoint,
RDSConnection)
| apache-2.0 |
tbinjiayou/Odoo | addons/website_quote/controllers/main.py | 64 | 8730 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
import werkzeug
import datetime
import time
from openerp.tools.translate import _
class sale_quote(http.Controller):
@http.route([
"/quote/<int:order_id>",
"/quote/<int:order_id>/<token>"
], type='http', auth="public", website=True)
def view(self, order_id, token=None, message=False, **post):
# use SUPERUSER_ID allow to access/view order for public user
# only if he knows the private token
order = request.registry.get('sale.order').browse(request.cr, token and SUPERUSER_ID or request.uid, order_id)
now = time.strftime('%Y-%m-%d')
if token:
if token != order.access_token:
return request.website.render('website.404')
# Log only once a day
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by customer')
self.__message_post(body, order_id, type='comment')
days = 0
if order.validity_date:
days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1
values = {
'quotation': order,
'message': message and int(message) or False,
'option': bool(filter(lambda x: not x.line_id, order.options)),
'order_valid': (not order.validity_date) or (now <= order.validity_date),
'days_valid': max(days, 0)
}
return request.website.render('website_quote.so_quotation', values)
@http.route(['/quote/accept'], type='json', auth="public", website=True)
def accept(self, order_id, token=None, signer=None, sign=None, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
attachments=sign and [('signature.png', sign.decode('base64'))] or []
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'order_confirm', context=request.context)
message = _('Order signed by %s') % (signer,)
self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments)
return True
@http.route(['/quote/<int:order_id>/<token>/decline'], type='http', auth="public", website=True)
def decline(self, order_id, token, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
request.registry.get('sale.order').action_cancel(request.cr, SUPERUSER_ID, [order_id])
message = post.get('decline_message')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/quote/%s/%s?message=2" % (order_id, token))
@http.route(['/quote/<int:order_id>/<token>/post'], type='http', auth="public", website=True)
def post(self, order_id, token, **post):
# use SUPERUSER_ID allow to access/view order for public user
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
message = post.get('comment')
if token != order.access_token:
return request.website.render('website.404')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/quote/%s/%s?message=1" % (order_id, token))
def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]):
request.session.body = message
cr, uid, context = request.cr, request.uid, request.context
user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if 'body' in request.session and request.session.body:
request.registry.get('sale.order').message_post(cr, SUPERUSER_ID, order_id,
body=request.session.body,
type=type,
subtype=subtype,
author_id=user.partner_id.id,
context=context,
attachments=attachments
)
request.session.body = False
return True
@http.route(['/quote/update_line'], type='json', auth="public", website=True)
def update(self, line_id, remove=False, unlink=False, order_id=None, token=None, **post):
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, int(order_id))
if token != order.access_token:
return request.website.render('website.404')
if order.state not in ('draft','sent'):
return False
line_id=int(line_id)
if unlink:
request.registry.get('sale.order.line').unlink(request.cr, SUPERUSER_ID, [line_id], context=request.context)
return False
number=(remove and -1 or 1)
order_line_obj = request.registry.get('sale.order.line')
order_line_val = order_line_obj.read(request.cr, SUPERUSER_ID, [line_id], [], context=request.context)[0]
quantity = order_line_val['product_uom_qty'] + number
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], {'product_uom_qty': (quantity)}, context=request.context)
return [str(quantity), str(order.amount_total)]
@http.route(["/quote/template/<model('sale.quote.template'):quote>"], type='http', auth="user", website=True)
def template_view(self, quote, **post):
values = { 'template': quote }
return request.website.render('website_quote.so_template', values)
@http.route(["/quote/add_line/<int:option_id>/<int:order_id>/<token>"], type='http', auth="public", website=True)
def add(self, option_id, order_id, token, **post):
vals = {}
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
option_obj = request.registry.get('sale.order.option')
option = option_obj.browse(request.cr, SUPERUSER_ID, option_id)
res = request.registry.get('sale.order.line').product_id_change(request.cr, SUPERUSER_ID, order_id,
False, option.product_id.id, option.quantity, option.uom_id.id, option.quantity, option.uom_id.id,
option.name, order.partner_id.id, False, True, time.strftime('%Y-%m-%d'),
False, order.fiscal_position.id, True, request.context)
vals = res.get('value', {})
if 'tax_id' in vals:
vals['tax_id'] = [(6, 0, vals['tax_id'])]
vals.update({
'price_unit': option.price_unit,
'website_description': option.website_description,
'name': option.name,
'order_id': order.id,
'product_id' : option.product_id.id,
'product_uos_qty': option.quantity,
'product_uos': option.uom_id.id,
'product_uom_qty': option.quantity,
'product_uom': option.uom_id.id,
'discount': option.discount,
})
line = request.registry.get('sale.order.line').create(request.cr, SUPERUSER_ID, vals, context=request.context)
option_obj.write(request.cr, SUPERUSER_ID, [option.id], {'line_id': line}, context=request.context)
return werkzeug.utils.redirect("/quote/%s/%s#pricing" % (order.id, token))
| agpl-3.0 |
alexmandujano/django | django/conf/locale/sl/formats.py | 200 | 2120 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
citrix-openstack-build/python-cinderclient | cinderclient/v2/volume_snapshots.py | 2 | 4489 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume snapshot interface (1.1 extension)."""
import six
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from cinderclient import base
class Snapshot(base.Resource):
"""A Snapshot is a point-in-time snapshot of an openstack volume."""
def __repr__(self):
return "<Snapshot: %s>" % self.id
def delete(self):
"""Delete this snapshot."""
self.manager.delete(self)
def update(self, **kwargs):
"""Update the name or description for this snapshot."""
self.manager.update(self, **kwargs)
@property
def progress(self):
return self._info.get('os-extended-snapshot-attributes:progress')
@property
def project_id(self):
return self._info.get('os-extended-snapshot-attributes:project_id')
def reset_state(self, state):
"""Update the snapshot with the provided state."""
self.manager.reset_state(self, state)
class SnapshotManager(base.ManagerWithFind):
"""Manage :class:`Snapshot` resources."""
resource_class = Snapshot
def create(self, volume_id, force=False,
name=None, description=None):
"""Create a snapshot of the given volume.
:param volume_id: The ID of the volume to snapshot.
:param force: If force is True, create a snapshot even if the volume is
attached to an instance. Default is False.
:param name: Name of the snapshot
:param description: Description of the snapshot
:rtype: :class:`Snapshot`
"""
body = {'snapshot': {'volume_id': volume_id,
'force': force,
'name': name,
'description': description}}
return self._create('/snapshots', body, 'snapshot')
def get(self, snapshot_id):
"""Get a snapshot.
:param snapshot_id: The ID of the snapshot to get.
:rtype: :class:`Snapshot`
"""
return self._get("/snapshots/%s" % snapshot_id, "snapshot")
def list(self, detailed=True, search_opts=None):
"""Get a list of all snapshots.
:rtype: list of :class:`Snapshot`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/snapshots%s%s" % (detail, query_string),
"snapshots")
def delete(self, snapshot):
"""Delete a snapshot.
:param snapshot: The :class:`Snapshot` to delete.
"""
self._delete("/snapshots/%s" % base.getid(snapshot))
def update(self, snapshot, **kwargs):
"""Update the name or description for a snapshot.
:param snapshot: The :class:`Snapshot` to delete.
"""
if not kwargs:
return
body = {"snapshot": kwargs}
self._update("/snapshots/%s" % base.getid(snapshot), body)
def reset_state(self, snapshot, state):
"""Update the specified snapshot with the provided state."""
return self._action('os-reset_status', snapshot, {'status': state})
def _action(self, action, snapshot, info=None, **kwargs):
"""Perform a snapshot action."""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/snapshots/%s/action' % base.getid(snapshot)
return self.api.client.post(url, body=body)
def update_snapshot_status(self, snapshot, update_dict):
return self._action('os-update_snapshot_status',
base.getid(snapshot), update_dict)
| apache-2.0 |
yousafsyed/casperjs | bin/Lib/site-packages/pip/_vendor/distlib/util.py | 190 | 51230 | #
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
in os.environ):
result = os.environ['__PYVENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
| mit |
bhermanmit/openmc | openmc/lattice.py | 1 | 49870 | from __future__ import division
from abc import ABCMeta
from collections import OrderedDict, Iterable
from math import sqrt, floor
from numbers import Real, Integral
from xml.etree import ElementTree as ET
from six import add_metaclass, string_types
import numpy as np
import openmc.checkvalue as cv
import openmc
@add_metaclass(ABCMeta)
class Lattice(object):
"""A repeating structure wherein each element is a universe.
Parameters
----------
lattice_id : int, optional
Unique identifier for the lattice. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the lattice. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the lattice
name : str
Name of the lattice
pitch : Iterable of float
Pitch of the lattice in each direction in cm
outer : openmc.Universe
A universe to fill all space outside the lattice
universes : Iterable of Iterable of openmc.Universe
A two- or three-dimensional list/array of universes filling each element
of the lattice
"""
def __init__(self, lattice_id=None, name=''):
# Initialize Lattice class attributes
self.id = lattice_id
self.name = name
self._pitch = None
self._outer = None
self._universes = None
def __eq__(self, other):
if not isinstance(other, Lattice):
return False
elif self.id != other.id:
return False
elif self.name != other.name:
return False
elif self.pitch != other.pitch:
return False
elif self.outer != other.outer:
return False
elif self.universes != other.universes:
return False
else:
return True
def __ne__(self, other):
return not self == other
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def pitch(self):
return self._pitch
@property
def outer(self):
return self._outer
@property
def universes(self):
return self._universes
@id.setter
def id(self, lattice_id):
if lattice_id is None:
self._id = openmc.universe.AUTO_UNIVERSE_ID
openmc.universe.AUTO_UNIVERSE_ID += 1
else:
cv.check_type('lattice ID', lattice_id, Integral)
cv.check_greater_than('lattice ID', lattice_id, 0, equality=True)
self._id = lattice_id
@name.setter
def name(self, name):
if name is not None:
cv.check_type('lattice name', name, string_types)
self._name = name
else:
self._name = ''
@outer.setter
def outer(self, outer):
cv.check_type('outer universe', outer, openmc.Universe)
self._outer = outer
@staticmethod
def from_hdf5(group, universes):
"""Create lattice from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
universes : dict
Dictionary mapping universe IDs to instances of
:class:`openmc.Universe`.
Returns
-------
openmc.Lattice
Instance of lattice subclass
"""
lattice_id = int(group.name.split('/')[-1].lstrip('lattice '))
name = group['name'].value.decode() if 'name' in group else ''
lattice_type = group['type'].value.decode()
if lattice_type == 'rectangular':
dimension = group['dimension'][...]
lower_left = group['lower_left'][...]
pitch = group['pitch'][...]
outer = group['outer'].value
universe_ids = group['universes'][...]
# Create the Lattice
lattice = openmc.RectLattice(lattice_id, name)
lattice.lower_left = lower_left
lattice.pitch = pitch
# If the Universe specified outer the Lattice is not void
if outer >= 0:
lattice.outer = universes[outer]
# Build array of Universe pointers for the Lattice
uarray = np.empty(universe_ids.shape, dtype=openmc.Universe)
for z in range(universe_ids.shape[0]):
for y in range(universe_ids.shape[1]):
for x in range(universe_ids.shape[2]):
uarray[z, y, x] = universes[universe_ids[z, y, x]]
# Use 2D NumPy array to store lattice universes for 2D lattices
if len(dimension) == 2:
uarray = np.squeeze(uarray)
uarray = np.atleast_2d(uarray)
# Set the universes for the lattice
lattice.universes = uarray
elif lattice_type == 'hexagonal':
n_rings = group['n_rings'].value
n_axial = group['n_axial'].value
center = group['center'][...]
pitch = group['pitch'][...]
outer = group['outer'].value
universe_ids = group['universes'][...]
# Create the Lattice
lattice = openmc.HexLattice(lattice_id, name)
lattice.center = center
lattice.pitch = pitch
# If the Universe specified outer the Lattice is not void
if outer >= 0:
lattice.outer = universes[outer]
# Build array of Universe pointers for the Lattice. Note that
# we need to convert between the HDF5's square array of
# (x, alpha, z) to the Python API's format of a ragged nested
# list of (z, ring, theta).
uarray = []
for z in range(n_axial):
# Add a list for this axial level.
uarray.append([])
x = n_rings - 1
a = 2*n_rings - 2
for r in range(n_rings - 1, 0, -1):
# Add a list for this ring.
uarray[-1].append([])
# Climb down the top-right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x += 1
a -= 1
# Climb down the right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
a -= 1
# Climb down the bottom-right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x -= 1
# Climb up the bottom-left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x -= 1
a += 1
# Climb up the left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
a += 1
# Climb up the top-left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x += 1
# Move down to the next ring.
a -= 1
# Convert the ids into Universe objects.
uarray[-1][-1] = [universes[u_id]
for u_id in uarray[-1][-1]]
# Handle the degenerate center ring separately.
u_id = universe_ids[z, a, x]
uarray[-1].append([universes[u_id]])
# Add the universes to the lattice.
if len(pitch) == 2:
# Lattice is 3D
lattice.universes = uarray
else:
# Lattice is 2D; extract the only axial level
lattice.universes = uarray[0]
return lattice
def get_unique_universes(self):
"""Determine all unique universes in the lattice
Returns
-------
universes : collections.OrderedDict
Dictionary whose keys are universe IDs and values are
:class:`openmc.Universe` instances
"""
univs = OrderedDict()
for k in range(len(self._universes)):
for j in range(len(self._universes[k])):
if isinstance(self._universes[k][j], openmc.Universe):
u = self._universes[k][j]
univs[u._id] = u
else:
for i in range(len(self._universes[k][j])):
u = self._universes[k][j][i]
assert isinstance(u, openmc.Universe)
univs[u._id] = u
if self.outer is not None:
univs[self.outer._id] = self.outer
return univs
def get_nuclides(self):
"""Returns all nuclides in the lattice
Returns
-------
nuclides : list of str
List of nuclide names
"""
nuclides = []
# Get all unique Universes contained in each of the lattice cells
unique_universes = self.get_unique_universes()
# Append all Universes containing each cell to the dictionary
for universe in unique_universes.values():
for nuclide in universe.get_nuclides():
if nuclide not in nuclides:
nuclides.append(nuclide)
return nuclides
def get_all_cells(self):
"""Return all cells that are contained within the lattice
Returns
-------
cells : collections.OrderedDict
Dictionary whose keys are cell IDs and values are :class:`Cell`
instances
"""
cells = OrderedDict()
unique_universes = self.get_unique_universes()
for universe_id, universe in unique_universes.items():
cells.update(universe.get_all_cells())
return cells
def get_all_materials(self):
"""Return all materials that are contained within the lattice
Returns
-------
materials : collections.OrderedDict
Dictionary whose keys are material IDs and values are
:class:`Material` instances
"""
materials = OrderedDict()
# Append all Cells in each Cell in the Universe to the dictionary
cells = self.get_all_cells()
for cell_id, cell in cells.items():
materials.update(cell.get_all_materials())
return materials
def get_all_universes(self):
"""Return all universes that are contained within the lattice
Returns
-------
universes : collections.OrderedDict
Dictionary whose keys are universe IDs and values are
:class:`Universe` instances
"""
# Initialize a dictionary of all Universes contained by the Lattice
# in each nested Universe level
all_universes = OrderedDict()
# Get all unique Universes contained in each of the lattice cells
unique_universes = self.get_unique_universes()
# Add the unique Universes filling each Lattice cell
all_universes.update(unique_universes)
# Append all Universes containing each cell to the dictionary
for universe_id, universe in unique_universes.items():
all_universes.update(universe.get_all_universes())
return all_universes
def get_universe(self, idx):
"""Return universe corresponding to a lattice element index
Parameters
----------
idx : Iterable of int
Lattice element indices. For a rectangular lattice, the indices are
given in the :math:`(x,y)` or :math:`(x,y,z)` coordinate system. For
hexagonal lattices, they are given in the :math:`x,\alpha` or
:math:`x,\alpha,z` coordinate systems.
Returns
-------
openmc.Universe
Universe with given indices
"""
idx_u = self.get_universe_index(idx)
if self.ndim == 2:
return self.universes[idx_u[0]][idx_u[1]]
else:
return self.universes[idx_u[0]][idx_u[1]][idx_u[2]]
def find(self, point):
"""Find cells/universes/lattices which contain a given point
Parameters
----------
point : 3-tuple of float
Cartesian coordinates of the point
Returns
-------
list
Sequence of universes, cells, and lattices which are traversed to
find the given point
"""
idx, p = self.find_element(point)
if self.is_valid_index(idx):
u = self.get_universe(idx)
else:
if self.outer is not None:
u = self.outer
else:
return []
return [(self, idx)] + u.find(p)
class RectLattice(Lattice):
"""A lattice consisting of rectangular prisms.
To completely define a rectangular lattice, the
:attr:`RectLattice.lower_left` :attr:`RectLattice.pitch`,
:attr:`RectLattice.outer`, and :attr:`RectLattice.universes` properties need
to be set.
Most methods for this class use a natural indexing scheme wherein elements
are assigned an index corresponding to their position relative to the
(x,y,z) axes in a Cartesian coordinate system, i.e., an index of (0,0,0) in
the lattice gives the element whose x, y, and z coordinates are the
smallest. However, note that when universes are assigned to lattice elements
using the :attr:`RectLattice.universes` property, the array indices do not
correspond to natural indices.
Parameters
----------
lattice_id : int, optional
Unique identifier for the lattice. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the lattice. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the lattice
name : str
Name of the lattice
pitch : Iterable of float
Pitch of the lattice in the x, y, and (if applicable) z directions in
cm.
outer : openmc.Universe
A universe to fill all space outside the lattice
universes : Iterable of Iterable of openmc.Universe
A two- or three-dimensional list/array of universes filling each element
of the lattice. The first dimension corresponds to the z-direction (if
applicable), the second dimension corresponds to the y-direction, and
the third dimension corresponds to the x-direction. Note that for the
y-direction, a higher index corresponds to a lower physical
y-value. Each z-slice in the array can be thought of as a top-down view
of the lattice.
lower_left : Iterable of float
The Cartesian coordinates of the lower-left corner of the lattice. If
the lattice is two-dimensional, only the x- and y-coordinates are
specified.
indices : list of tuple
A list of all possible (z,y,x) or (y,x) lattice element indices. These
indices correspond to indices in the :attr:`RectLattice.universes`
property.
ndim : int
The number of dimensions of the lattice
shape : Iterable of int
An array of two or three integers representing the number of lattice
cells in the x- and y- (and z-) directions, respectively.
"""
def __init__(self, lattice_id=None, name=''):
super(RectLattice, self).__init__(lattice_id, name)
# Initialize Lattice class attributes
self._lower_left = None
def __eq__(self, other):
if not isinstance(other, RectLattice):
return False
elif not super(RectLattice, self).__eq__(other):
return False
elif self.shape != other.shape:
return False
elif self.lower_left != other.lower_left:
return False
else:
return True
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def __repr__(self):
string = 'RectLattice\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self._id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self._name)
string += '{0: <16}{1}{2}\n'.format('\tShape', '=\t',
self.shape)
string += '{0: <16}{1}{2}\n'.format('\tLower Left', '=\t',
self._lower_left)
string += '{0: <16}{1}{2}\n'.format('\tPitch', '=\t', self._pitch)
if self._outer is not None:
string += '{0: <16}{1}{2}\n'.format('\tOuter', '=\t',
self._outer._id)
else:
string += '{0: <16}{1}{2}\n'.format('\tOuter', '=\t',
self._outer)
string += '{0: <16}\n'.format('\tUniverses')
# Lattice nested Universe IDs - column major for Fortran
for i, universe in enumerate(np.ravel(self._universes)):
string += '{0} '.format(universe._id)
# Add a newline character every time we reach end of row of cells
if (i+1) % self.shape[0] == 0:
string += '\n'
string = string.rstrip('\n')
return string
@property
def indices(self):
if self.ndim == 2:
return list(np.broadcast(*np.ogrid[
:self.shape[1], :self.shape[0]]))
else:
return list(np.broadcast(*np.ogrid[
:self.shape[2], :self.shape[1], :self.shape[0]]))
@property
def _natural_indices(self):
"""Iterate over all possible (x,y) or (x,y,z) lattice element indices.
This property is used when constructing distributed cell and material
paths. Most importantly, the iteration order matches that used on the
Fortran side.
"""
if self.ndim == 2:
nx, ny = self.shape
return np.broadcast(*np.ogrid[:nx, :ny])
else:
nx, ny, nz = self.shape
return np.broadcast(*np.ogrid[:nx, :ny, :nz])
@property
def lower_left(self):
return self._lower_left
@property
def ndim(self):
return len(self.pitch)
@property
def shape(self):
return self._universes.shape[::-1]
@lower_left.setter
def lower_left(self, lower_left):
cv.check_type('lattice lower left corner', lower_left, Iterable, Real)
cv.check_length('lattice lower left corner', lower_left, 2, 3)
self._lower_left = lower_left
@Lattice.pitch.setter
def pitch(self, pitch):
cv.check_type('lattice pitch', pitch, Iterable, Real)
cv.check_length('lattice pitch', pitch, 2, 3)
for dim in pitch:
cv.check_greater_than('lattice pitch', dim, 0.0)
self._pitch = pitch
@Lattice.universes.setter
def universes(self, universes):
cv.check_iterable_type('lattice universes', universes, openmc.Universe,
min_depth=2, max_depth=3)
self._universes = np.asarray(universes)
def find_element(self, point):
"""Determine index of lattice element and local coordinates for a point
Parameters
----------
point : Iterable of float
Cartesian coordinates of point
Returns
-------
2- or 3-tuple of int
A tuple of the corresponding (x,y,z) lattice element indices
3-tuple of float
Carestian coordinates of the point in the corresponding lattice
element coordinate system
"""
ix = floor((point[0] - self.lower_left[0])/self.pitch[0])
iy = floor((point[1] - self.lower_left[1])/self.pitch[1])
if self.ndim == 2:
idx = (ix, iy)
else:
iz = floor((point[2] - self.lower_left[2])/self.pitch[2])
idx = (ix, iy, iz)
return idx, self.get_local_coordinates(point, idx)
def get_local_coordinates(self, point, idx):
"""Determine local coordinates of a point within a lattice element
Parameters
----------
point : Iterable of float
Cartesian coordinates of point
idx : Iterable of int
(x,y,z) indices of lattice element. If the lattice is 2D, the z
index can be omitted.
Returns
-------
3-tuple of float
Cartesian coordinates of point in the lattice element coordinate
system
"""
x = point[0] - (self.lower_left[0] + (idx[0] + 0.5)*self.pitch[0])
y = point[1] - (self.lower_left[1] + (idx[1] + 0.5)*self.pitch[1])
if self.ndim == 2:
z = point[2]
else:
z = point[2] - (self.lower_left[2] + (idx[2] + 0.5)*self.pitch[2])
return (x, y, z)
def get_universe_index(self, idx):
"""Return index in the universes array corresponding to a lattice element index
Parameters
----------
idx : Iterable of int
Lattice element indices in the :math:`(x,y,z)` coordinate system
Returns
-------
2- or 3-tuple of int
Indices used when setting the :attr:`RectLattice.universes` property
"""
max_y = self.shape[1] - 1
if self.ndim == 2:
x, y = idx
return (max_y - y, x)
else:
x, y, z = idx
return (z, max_y - y, x)
def is_valid_index(self, idx):
"""Determine whether lattice element index is within defined range
Parameters
----------
idx : Iterable of int
Lattice element indices in the :math:`(x,y,z)` coordinate system
Returns
-------
bool
Whether index is valid
"""
if self.ndim == 2:
return (0 <= idx[0] < self.shape[0] and
0 <= idx[1] < self.shape[1])
else:
return (0 <= idx[0] < self.shape[0] and
0 <= idx[1] < self.shape[1] and
0 <= idx[2] < self.shape[2])
def create_xml_subelement(self, xml_element):
# Determine if XML element already contains subelement for this Lattice
path = './lattice[@id=\'{0}\']'.format(self._id)
test = xml_element.find(path)
# If the element does contain the Lattice subelement, then return
if test is not None:
return
lattice_subelement = ET.Element("lattice")
lattice_subelement.set("id", str(self._id))
if len(self._name) > 0:
lattice_subelement.set("name", str(self._name))
# Export the Lattice cell pitch
pitch = ET.SubElement(lattice_subelement, "pitch")
pitch.text = ' '.join(map(str, self._pitch))
# Export the Lattice outer Universe (if specified)
if self._outer is not None:
outer = ET.SubElement(lattice_subelement, "outer")
outer.text = '{0}'.format(self._outer._id)
self._outer.create_xml_subelement(xml_element)
# Export Lattice cell dimensions
dimension = ET.SubElement(lattice_subelement, "dimension")
dimension.text = ' '.join(map(str, self.shape))
# Export Lattice lower left
lower_left = ET.SubElement(lattice_subelement, "lower_left")
lower_left.text = ' '.join(map(str, self._lower_left))
# Export the Lattice nested Universe IDs - column major for Fortran
universe_ids = '\n'
# 3D Lattices
if self.ndim == 3:
for z in range(self.shape[2]):
for y in range(self.shape[1]):
for x in range(self.shape[0]):
universe = self._universes[z][y][x]
# Append Universe ID to the Lattice XML subelement
universe_ids += '{0} '.format(universe._id)
# Create XML subelement for this Universe
universe.create_xml_subelement(xml_element)
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# 2D Lattices
else:
for y in range(self.shape[1]):
for x in range(self.shape[0]):
universe = self._universes[y][x]
# Append Universe ID to Lattice XML subelement
universe_ids += '{0} '.format(universe._id)
# Create XML subelement for this Universe
universe.create_xml_subelement(xml_element)
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# Remove trailing newline character from Universe IDs string
universe_ids = universe_ids.rstrip('\n')
universes = ET.SubElement(lattice_subelement, "universes")
universes.text = universe_ids
# Append the XML subelement for this Lattice to the XML element
xml_element.append(lattice_subelement)
class HexLattice(Lattice):
r"""A lattice consisting of hexagonal prisms.
To completely define a hexagonal lattice, the :attr:`HexLattice.center`,
:attr:`HexLattice.pitch`, :attr:`HexLattice.universes`, and
:attr:`HexLattice.outer` properties need to be set.
Most methods for this class use a natural indexing scheme wherein elements
are assigned an index corresponding to their position relative to skewed
:math:`(x,\alpha,z)` axes as described fully in
:ref:`hexagonal_indexing`. However, note that when universes are assigned to
lattice elements using the :attr:`HexLattice.universes` property, the array
indices do not correspond to natural indices.
Parameters
----------
lattice_id : int, optional
Unique identifier for the lattice. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the lattice. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the lattice
name : str
Name of the lattice
pitch : Iterable of float
Pitch of the lattice in cm. The first item in the iterable specifies the
pitch in the radial direction and, if the lattice is 3D, the second item
in the iterable specifies the pitch in the axial direction.
outer : openmc.Universe
A universe to fill all space outside the lattice
universes : Nested Iterable of openmc.Universe
A two- or three-dimensional list/array of universes filling each element
of the lattice. Each sub-list corresponds to one ring of universes and
should be ordered from outermost ring to innermost ring. The universes
within each sub-list are ordered from the "top" and proceed in a
clockwise fashion. The :meth:`HexLattice.show_indices` method can be
used to help figure out indices for this property.
center : Iterable of float
Coordinates of the center of the lattice. If the lattice does not have
axial sections then only the x- and y-coordinates are specified
indices : list of tuple
A list of all possible (z,r,i) or (r,i) lattice element indices that are
possible, where z is the axial index, r is in the ring index (starting
from the outermost ring), and i is the index with a ring starting from
the top and proceeding clockwise.
num_rings : int
Number of radial ring positions in the xy-plane
num_axial : int
Number of positions along the z-axis.
"""
def __init__(self, lattice_id=None, name=''):
super(HexLattice, self).__init__(lattice_id, name)
# Initialize Lattice class attributes
self._num_rings = None
self._num_axial = None
self._center = None
def __eq__(self, other):
if not isinstance(other, HexLattice):
return False
elif not super(HexLattice, self).__eq__(other):
return False
elif self.num_rings != other.num_rings:
return False
elif self.num_axial != other.num_axial:
return False
elif self.center != other.center:
return False
else:
return True
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def __repr__(self):
string = 'HexLattice\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self._id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self._name)
string += '{0: <16}{1}{2}\n'.format('\t# Rings', '=\t', self._num_rings)
string += '{0: <16}{1}{2}\n'.format('\t# Axial', '=\t', self._num_axial)
string += '{0: <16}{1}{2}\n'.format('\tCenter', '=\t',
self._center)
string += '{0: <16}{1}{2}\n'.format('\tPitch', '=\t', self._pitch)
if self._outer is not None:
string += '{0: <16}{1}{2}\n'.format('\tOuter', '=\t',
self._outer._id)
else:
string += '{0: <16}{1}{2}\n'.format('\tOuter', '=\t',
self._outer)
string += '{0: <16}\n'.format('\tUniverses')
if self._num_axial is not None:
slices = [self._repr_axial_slice(x) for x in self._universes]
string += '\n'.join(slices)
else:
string += self._repr_axial_slice(self._universes)
return string
@property
def num_rings(self):
return self._num_rings
@property
def num_axial(self):
return self._num_axial
@property
def center(self):
return self._center
@property
def indices(self):
if self.num_axial is None:
return [(r, i) for r in range(self.num_rings)
for i in range(max(6*(self.num_rings - 1 - r), 1))]
else:
return [(z, r, i) for z in range(self.num_axial)
for r in range(self.num_rings)
for i in range(max(6*(self.num_rings - 1 - r), 1))]
@property
def _natural_indices(self):
"""Iterate over all possible (x,alpha) or (x,alpha,z) lattice element
indices.
This property is used when constructing distributed cell and material
paths. Most importantly, the iteration order matches that used on the
Fortran side.
"""
r = self.num_rings
if self.num_axial is None:
for a in range(-r + 1, r):
for x in range(-r + 1, r):
idx = (x, a)
if self.is_valid_index(idx):
yield idx
else:
for z in range(self.num_axial):
for a in range(-r + 1, r):
for x in range(-r + 1, r):
idx = (x, a, z)
if self.is_valid_index(idx):
yield idx
@property
def ndim(self):
return 2 if isinstance(self.universes[0][0], openmc.Universe) else 3
@center.setter
def center(self, center):
cv.check_type('lattice center', center, Iterable, Real)
cv.check_length('lattice center', center, 2, 3)
self._center = center
@Lattice.pitch.setter
def pitch(self, pitch):
cv.check_type('lattice pitch', pitch, Iterable, Real)
cv.check_length('lattice pitch', pitch, 1, 2)
for dim in pitch:
cv.check_greater_than('lattice pitch', dim, 0)
self._pitch = pitch
@Lattice.universes.setter
def universes(self, universes):
cv.check_iterable_type('lattice universes', universes, openmc.Universe,
min_depth=2, max_depth=3)
self._universes = universes
# NOTE: This routine assumes that the user creates a "ragged" list of
# lists, where each sub-list corresponds to one ring of Universes.
# The sub-lists are ordered from outermost ring to innermost ring.
# The Universes within each sub-list are ordered from the "top" in a
# clockwise fashion.
# Set the number of axial positions.
if self.ndim == 3:
self._num_axial = len(self._universes)
else:
self._num_axial = None
# Set the number of rings and make sure this number is consistent for
# all axial positions.
if self.ndim == 3:
self._num_rings = len(self._universes[0])
for rings in self._universes:
if len(rings) != self._num_rings:
msg = 'HexLattice ID={0:d} has an inconsistent number of ' \
'rings per axial positon'.format(self._id)
raise ValueError(msg)
else:
self._num_rings = len(self._universes)
# Make sure there are the correct number of elements in each ring.
if self.ndim == 3:
for axial_slice in self._universes:
# Check the center ring.
if len(axial_slice[-1]) != 1:
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in the innermost ring. Only 1 element is ' \
'allowed in the innermost ring.'.format(self._id)
raise ValueError(msg)
# Check the outer rings.
for r in range(self._num_rings-1):
if len(axial_slice[r]) != 6*(self._num_rings - 1 - r):
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in ring number {1:d} (counting from the '\
'outermost ring). This ring should have {2:d} ' \
'elements.'.format(self._id, r,
6*(self._num_rings - 1 - r))
raise ValueError(msg)
else:
axial_slice = self._universes
# Check the center ring.
if len(axial_slice[-1]) != 1:
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in the innermost ring. Only 1 element is ' \
'allowed in the innermost ring.'.format(self._id)
raise ValueError(msg)
# Check the outer rings.
for r in range(self._num_rings-1):
if len(axial_slice[r]) != 6*(self._num_rings - 1 - r):
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in ring number {1:d} (counting from the '\
'outermost ring). This ring should have {2:d} ' \
'elements.'.format(self._id, r,
6*(self._num_rings - 1 - r))
raise ValueError(msg)
def find_element(self, point):
r"""Determine index of lattice element and local coordinates for a point
Parameters
----------
point : Iterable of float
Cartesian coordinates of point
Returns
-------
3-tuple of int
Indices of corresponding lattice element in :math:`(x,\alpha,z)`
bases
numpy.ndarray
Carestian coordinates of the point in the corresponding lattice
element coordinate system
"""
# Convert coordinates to skewed bases
x = point[0] - self.center[0]
y = point[1] - self.center[1]
if self._num_axial is None:
iz = 1
else:
z = point[2] - self.center[2]
iz = floor(z/self.pitch[1] + 0.5*self.num_axial)
alpha = y - x/sqrt(3.)
ix = floor(x/(sqrt(0.75) * self.pitch[0]))
ia = floor(alpha/self.pitch[0])
# Check four lattice elements to see which one is closest based on local
# coordinates
d_min = np.inf
for idx in [(ix, ia, iz), (ix + 1, ia, iz), (ix, ia + 1, iz),
(ix + 1, ia + 1, iz)]:
p = self.get_local_coordinates(point, idx)
d = p[0]**2 + p[1]**2
if d < d_min:
d_min = d
idx_min = idx
p_min = p
return idx_min, p_min
def get_local_coordinates(self, point, idx):
r"""Determine local coordinates of a point within a lattice element
Parameters
----------
point : Iterable of float
Cartesian coordinates of point
idx : Iterable of int
Indices of lattice element in :math:`(x,\alpha,z)` bases
Returns
-------
3-tuple of float
Cartesian coordinates of point in the lattice element coordinate
system
"""
x = point[0] - (self.center[0] + sqrt(0.75)*self.pitch[0]*idx[0])
y = point[1] - (self.center[1] + (0.5*idx[0] + idx[1])*self.pitch[0])
if self._num_axial is None:
z = point[2]
else:
z = point[2] - (self.center[2] + (idx[2] + 0.5 - 0.5*self.num_axial)*
self.pitch[1])
return (x, y, z)
def get_universe_index(self, idx):
r"""Return index in the universes array corresponding to a lattice element index
Parameters
----------
idx : Iterable of int
Lattice element indices in the :math:`(x,\alpha,z)` coordinate
system
Returns
-------
2- or 3-tuple of int
Indices used when setting the :attr:`HexLattice.universes` property
"""
# First we determine which ring the index corresponds to.
x = idx[0]
a = idx[1]
z = -a - x
g = max(abs(x), abs(a), abs(z))
# Next we use a clever method to figure out where along the ring we are.
i_ring = self._num_rings - 1 - g
if x >= 0:
if a >= 0:
i_within = x
else:
i_within = 2*g + z
else:
if a <= 0:
i_within = 3*g - x
else:
i_within = 5*g - z
if self.num_axial is None:
return (i_ring, i_within)
else:
return (idx[2], i_ring, i_within)
def is_valid_index(self, idx):
r"""Determine whether lattice element index is within defined range
Parameters
----------
idx : Iterable of int
Lattice element indices in the :math:`(x,\alpha,z)` coordinate
system
Returns
-------
bool
Whether index is valid
"""
x = idx[0]
y = idx[1]
z = 0 - y - x
g = max(abs(x), abs(y), abs(z))
if self.num_axial is None:
return g < self.num_rings
else:
return g < self.num_rings and 0 <= idx[2] < self.num_axial
def create_xml_subelement(self, xml_element):
# Determine if XML element already contains subelement for this Lattice
path = './hex_lattice[@id=\'{0}\']'.format(self._id)
test = xml_element.find(path)
# If the element does contain the Lattice subelement, then return
if test is not None:
return
lattice_subelement = ET.Element("hex_lattice")
lattice_subelement.set("id", str(self._id))
if len(self._name) > 0:
lattice_subelement.set("name", str(self._name))
# Export the Lattice cell pitch
pitch = ET.SubElement(lattice_subelement, "pitch")
pitch.text = ' '.join(map(str, self._pitch))
# Export the Lattice outer Universe (if specified)
if self._outer is not None:
outer = ET.SubElement(lattice_subelement, "outer")
outer.text = '{0}'.format(self._outer._id)
self._outer.create_xml_subelement(xml_element)
lattice_subelement.set("n_rings", str(self._num_rings))
if self._num_axial is not None:
lattice_subelement.set("n_axial", str(self._num_axial))
# Export Lattice cell center
center = ET.SubElement(lattice_subelement, "center")
center.text = ' '.join(map(str, self._center))
# Export the Lattice nested Universe IDs.
# 3D Lattices
if self._num_axial is not None:
slices = []
for z in range(self._num_axial):
# Initialize the center universe.
universe = self._universes[z][-1][0]
universe.create_xml_subelement(xml_element)
# Initialize the remaining universes.
for r in range(self._num_rings-1):
for theta in range(6*(self._num_rings - 1 - r)):
universe = self._universes[z][r][theta]
universe.create_xml_subelement(xml_element)
# Get a string representation of the universe IDs.
slices.append(self._repr_axial_slice(self._universes[z]))
# Collapse the list of axial slices into a single string.
universe_ids = '\n'.join(slices)
# 2D Lattices
else:
# Initialize the center universe.
universe = self._universes[-1][0]
universe.create_xml_subelement(xml_element)
# Initialize the remaining universes.
for r in range(self._num_rings - 1):
for theta in range(6*(self._num_rings - 1 - r)):
universe = self._universes[r][theta]
universe.create_xml_subelement(xml_element)
# Get a string representation of the universe IDs.
universe_ids = self._repr_axial_slice(self._universes)
universes = ET.SubElement(lattice_subelement, "universes")
universes.text = '\n' + universe_ids
# Append the XML subelement for this Lattice to the XML element
xml_element.append(lattice_subelement)
def _repr_axial_slice(self, universes):
"""Return string representation for the given 2D group of universes.
The 'universes' argument should be a list of lists of universes where
each sub-list represents a single ring. The first list should be the
outer ring.
"""
# Find the largest universe ID and count the number of digits so we can
# properly pad the output string later.
largest_id = max([max([univ._id for univ in ring])
for ring in universes])
n_digits = len(str(largest_id))
pad = ' '*n_digits
id_form = '{: ^' + str(n_digits) + 'd}'
# Initialize the list for each row.
rows = [[] for i in range(1 + 4 * (self._num_rings-1))]
middle = 2 * (self._num_rings - 1)
# Start with the degenerate first ring.
universe = universes[-1][0]
rows[middle] = [id_form.format(universe._id)]
# Add universes one ring at a time.
for r in range(1, self._num_rings):
# r_prime increments down while r increments up.
r_prime = self._num_rings - 1 - r
theta = 0
y = middle + 2*r
# Climb down the top-right.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
y -= 1
theta += 1
# Climb down the right.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
y -= 2
theta += 1
# Climb down the bottom-right.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
y -= 1
theta += 1
# Climb up the bottom-left.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
y += 1
theta += 1
# Climb up the left.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
y += 2
theta += 1
# Climb up the top-left.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
y += 1
theta += 1
# Flip the rows and join each row into a single string.
rows = [pad.join(x) for x in rows[::-1]]
# Pad the beginning of the rows so they line up properly.
for y in range(self._num_rings - 1):
rows[y] = (self._num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (self._num_rings - 1 - y)*pad + rows[-1 - y]
for y in range(self._num_rings % 2, self._num_rings, 2):
rows[middle + y] = pad + rows[middle + y]
if y != 0:
rows[middle - y] = pad + rows[middle - y]
# Join the rows together and return the string.
universe_ids = '\n'.join(rows)
return universe_ids
@staticmethod
def show_indices(num_rings):
"""Return a diagram of the hexagonal lattice layout with indices.
This method can be used to show the proper indices to be used when
setting the :attr:`HexLattice.universes` property. For example, running
this method with num_rings=3 will return the following diagram::
(0, 0)
(0,11) (0, 1)
(0,10) (1, 0) (0, 2)
(1, 5) (1, 1)
(0, 9) (2, 0) (0, 3)
(1, 4) (1, 2)
(0, 8) (1, 3) (0, 4)
(0, 7) (0, 5)
(0, 6)
Parameters
----------
num_rings : int
Number of rings in the hexagonal lattice
Returns
-------
str
Diagram of the hexagonal lattice showing indices
"""
# Find the largest string and count the number of digits so we can
# properly pad the output string later
largest_index = 6*(num_rings - 1)
n_digits_index = len(str(largest_index))
n_digits_ring = len(str(num_rings - 1))
str_form = '({{:{}}},{{:{}}})'.format(n_digits_ring, n_digits_index)
pad = ' '*(n_digits_index + n_digits_ring + 3)
# Initialize the list for each row.
rows = [[] for i in range(1 + 4 * (num_rings-1))]
middle = 2 * (num_rings - 1)
# Start with the degenerate first ring.
rows[middle] = [str_form.format(num_rings - 1, 0)]
# Add universes one ring at a time.
for r in range(1, num_rings):
# r_prime increments down while r increments up.
r_prime = num_rings - 1 - r
theta = 0
y = middle + 2*r
for i in range(r):
# Climb down the top-right.
rows[y].append(str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
# Climb down the right.
rows[y].append(str_form.format(r_prime, theta))
y -= 2
theta += 1
for i in range(r):
# Climb down the bottom-right.
rows[y].append(str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
# Climb up the bottom-left.
rows[y].insert(0, str_form.format(r_prime, theta))
y += 1
theta += 1
for i in range(r):
# Climb up the left.
rows[y].insert(0, str_form.format(r_prime, theta))
y += 2
theta += 1
for i in range(r):
# Climb up the top-left.
rows[y].insert(0, str_form.format(r_prime, theta))
y += 1
theta += 1
# Flip the rows and join each row into a single string.
rows = [pad.join(x) for x in rows[::-1]]
# Pad the beginning of the rows so they line up properly.
for y in range(num_rings - 1):
rows[y] = (num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (num_rings - 1 - y)*pad + rows[-1 - y]
for y in range(num_rings % 2, num_rings, 2):
rows[middle + y] = pad + rows[middle + y]
if y != 0:
rows[middle - y] = pad + rows[middle - y]
# Join the rows together and return the string.
return '\n'.join(rows)
| mit |
Dioptas/pymatgen | pymatgen/analysis/diffraction/xrd.py | 2 | 14721 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements an XRD pattern calculator.
"""
from six.moves import filter
from six.moves import map
from six.moves import zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "5/22/14"
from math import sin, cos, asin, pi, degrees, radians
import os
import numpy as np
import json
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
#XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__),
"atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDCalculator(object):
"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent planes. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
#Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
#Tolerance in which to treat two peaks as having the same two theta.
TWO_THETA_TOL = 1e-5
# Tolerance in which to treat a peak as effectively 0 if the scaled
# intensity is less than this number. Since the max intensity is 100,
# this means the peak must be less than 1e-5 of the peak intensity to be
# considered as zero. This deals with numerical issues where systematic
# absences do not cancel exactly to zero.
SCALED_INTENSITY_TOL = 1e-3
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
else:
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_xrd_data(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the XRD data for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRD pattern) in the form of
[[two_theta, intensity, {(h, k, l): mult}, d_hkl], ...]
Two_theta is in degrees. Intensity is in arbitrary units and if
scaled (the default), has a maximum value of 100 for the highest
peak. {(h, k, l): mult} is a dict of Miller indices for all
diffracted lattice planes contributing to that intensity and
their multiplicities. d_hkl is the interplanar spacing.
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (0, 2 / wavelength) if two_theta_range is None else \
[2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere(
[[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = filter(lambda d: d[1] >= min_r, recip_pts)
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species_and_occu.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError("Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind in sorted(
recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
#Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(
coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r)
* dw_correction)
#Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / \
(sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
#Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], - hkl[0] - hkl[1], hkl[2])
#Deal with floating point precision issues.
ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) <
XRDCalculator.TWO_THETA_TOL)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)],
d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
data = []
for k in sorted(peaks.keys()):
v = peaks[k]
scaled_intensity = v[0] / max_intensity * 100 if scaled else v[0]
fam = get_unique_families(v[1])
if scaled_intensity > XRDCalculator.SCALED_INTENSITY_TOL:
data.append([k, scaled_intensity, fam, v[2]])
return data
def get_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Returns the XRD plot as a matplotlib.pyplot.
Args:
structure: Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks: Whether to annotate the peaks with plane
information.
Returns:
(matplotlib.pyplot)
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(16, 10)
for two_theta, i, hkls, d_hkl in self.get_xrd_data(
structure, two_theta_range=two_theta_range):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
label = ", ".join([str(hkl) for hkl in hkls.keys()])
plt.plot([two_theta, two_theta], [0, i], color='k',
linewidth=3, label=label)
if annotate_peaks:
plt.annotate(label, xy=[two_theta, i],
xytext=[two_theta, i], fontsize=16)
plt.xlabel(r"2\theta (degrees)")
plt.ylabel("Intensities (scaled)")
plt.tight_layout()
return plt
def show_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Shows the XRD plot.
Args:
structure (Structure): Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
"""
self.get_xrd_plot(structure, two_theta_range=two_theta_range,
annotate_peaks=annotate_peaks).show()
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
#TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = map(abs, hkl1)
h2 = map(abs, hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = {}
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2] += 1
break
if not found:
unique[hkl1] = 1
return unique
| mit |
mlperf/training_results_v0.7 | Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-16/lingvo/core/gpipe.py | 3 | 21822 | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A recurrent model which enables pipelining model parallelism.
Reference:
'GPipe: Efficient Training of Giant Neural Networks using Pipeline Parallelism'
https://arxiv.org/abs/1811.06965
Example implementation of Transformer Language model:
tasks/lm/layers.GPipeTransformerLm
Sample params for the one billion words task:
tasks/lm/params/one_billion_wds.OneBWdsGPipeTransformer.
More examples in machine translation, image classifications and others
will be included.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_layer
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import builder_layers
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import recurrent
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import tshape
from six.moves import range
_MICRO_BATCH_STATE_NAME = 'micro_batch_state'
_OVERWRITE_GLOBAL_STEP_COLLECTION = 'lingvo__OVERWRITE_GLOBAL_STEP_COLLECTION'
def GetOverWriteGlobalStep(graph=None):
graph = graph or tf.get_default_graph()
mb_tensors = graph.get_collection_ref(_OVERWRITE_GLOBAL_STEP_COLLECTION)
if len(mb_tensors) == 1:
mb_tensor = mb_tensors[0]
else:
mb_tensor = py_utils.GetGlobalStep()
return mb_tensor
def SetOverWriteGlobalStep(tensor, graph=None):
graph = graph or tf.get_default_graph()
mb_tensors = graph.get_collection_ref(_OVERWRITE_GLOBAL_STEP_COLLECTION)
if len(mb_tensors) == 1:
mb_tensors[0] = tensor
else:
graph.add_to_collection(_OVERWRITE_GLOBAL_STEP_COLLECTION, tensor)
def GenerateStepSeedPair(p, unused_global_step=None, op_seed=None):
"""Override py_utils.GenerateStepSeedPair to use GetOverWriteGlobalStep."""
seed_dtype = tf.int32 if py_utils.use_tpu() else tf.int64
if p.is_inference and p.random_seed is None:
# Unlike tf.random*, stateless random ops are completely determined by the
# passed-in seeds. This means at inference time the same inputs will produce
# the same outputs, even if the model is supposed to have randomness such as
# dropout during inference. We inject additional randomness only during
# inference if the graph is exported with random_seed=None as a workaround.
return tf.random.uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)
with tf.name_scope('op_seed') as scope:
global_step = tf.cast(GetOverWriteGlobalStep(), seed_dtype)
step_seed = tf.cast(py_utils.GenerateSeedFromName(scope), seed_dtype)
seeds = tf.stack([global_step, step_seed])
if p.random_seed is not None:
seeds += p.random_seed
if op_seed is not None:
seeds += op_seed
return seeds
@contextlib.contextmanager
def CellFnFPropOpReplacementWrapper():
"""Hacks to replace certain unwanted tensorflow ops."""
# Hack to replace GenerateStepSeedPair since global_step is not available
# in temp graph created by optional.while.
saved_get_op_seed = py_utils.GenerateStepSeedPair
py_utils.GenerateStepSeedPair = GenerateStepSeedPair
yield
py_utils.GenerateStepSeedPair = saved_get_op_seed
def _ToTuple(x):
if isinstance(x, list):
return tuple(x)
return x if isinstance(x, tuple) else (x,)
class FeatureExtractionLayer(base_layer.BaseLayer):
"""A layer that extrac features from a sequence of layers.
FeatureExtractionLayer is a layer which connects a few layers in a sequence.
It is also capable of fetching and forwarding activation endpoints.
# TODO(huangyp): Make it a sublayer of builder_layers.SequentialLayer
"""
@classmethod
def Params(cls):
p = super(FeatureExtractionLayer, cls).Params()
p.Define('variable_name_prefix', '',
'Prefix for variable names in sub layers')
p.Define('sub', [], 'A list of layers\' params.')
p.Define('num_act_inputs', 0, 'Number of activation inputs.')
p.Define('num_act_outputs', 0, 'Number of activation outputs.')
p.Define('act_fetch_layers', [],
'Names of fetch layers that cached extra activations')
return p
@base_layer.initializer
def __init__(self, params):
super(FeatureExtractionLayer, self).__init__(params)
p = self.params
assert p.num_act_inputs >= 0
assert p.num_act_outputs >= 0
p.act_fetch_layers = p.act_fetch_layers or []
assert p.num_act_outputs == p.num_act_inputs + len(p.act_fetch_layers)
self._seq = []
for sub in p.sub:
assert sub.name
sub.name = p.variable_name_prefix + sub.name
self.CreateChild(sub.name, sub)
self._seq.append((sub.name, self.children[sub.name]))
def FProp(self, theta, *args):
p = self.params
assert len(args) > p.num_act_inputs
out_args = args[:-p.num_act_inputs] if p.num_act_inputs > 0 else args
extra_args = args[-p.num_act_inputs:] if p.num_act_inputs > 0 else ()
for (name, ch) in self._seq:
th = theta[name]
out_args = _ToTuple(out_args)
out_args = ch.FProp(th, *out_args)
# Append fetched activations to fprop outputs.
for fetch_layer in p.act_fetch_layers:
assert fetch_layer in self.children
activation = self.children[fetch_layer].activation
if isinstance(activation, (tuple, list)):
activation = activation[0]
extra_args += (activation,)
if extra_args:
out_args = _ToTuple(out_args) + extra_args
return out_args
@classmethod
def FPropMeta(cls, p, *args):
assert len(args) > p.num_act_inputs
seq_args = args[:-p.num_act_inputs] if p.num_act_inputs > 0 else args
extra_args = args[-p.num_act_inputs:] if p.num_act_inputs > 0 else ()
total = 0
act_fetch_metas = {}
for sub in p.sub:
meta = sub.cls.FPropMeta(sub, *seq_args)
if sub.name in p.act_fetch_layers:
act_fetch_metas[sub.name] = meta.out_shapes[0]
total += meta.flops
seq_args = meta.out_shapes
for fetch_layer in p.act_fetch_layers:
extra_args += (act_fetch_metas[fetch_layer],)
return py_utils.NestedMap(flops=total, out_shapes=seq_args + extra_args)
def PartitionSequentialLayers(params, num_partitions, *shapes):
r"""Partition a layer composed of sequential layers.
This routine strives to partition layers so that each partition costs roughly
the same flops given the input shapes.
Args:
params: A layer param or a list of layer param.
num_partitions: The desired number of partitions.
*shapes: A tuple of tshape.Shape representing input tensors to the first
layer.
Returns:
A list of FeatureExtractionLayer params.
"""
# Recursively concatenate SequentialLayer into a list.
def FlattenSeq(p):
if isinstance(p, list):
return p
if p.cls not in [builder_layers.SequentialLayer, FeatureExtractionLayer]:
return [p.Copy()]
subs = []
for _ in range(p.repeat):
for s in p.sub:
subs += FlattenSeq(s)
return subs
subs = FlattenSeq(params)
assert len(shapes) == 1
tf.logging.info('num_partitions: {} input_shape: {}'.format(
num_partitions, shapes[0]))
# Computes the estimate cost for each sub layer.
total, histo, output_shapes = 0, [], []
for i, s in enumerate(subs):
s.name = 'cell_%03d' % i
meta = s.cls.FPropMeta(s, *shapes)
total += meta.flops
histo.append(total)
output_shapes.append(meta.out_shapes)
shapes = meta.out_shapes
tf.logging.vlog(1, 'len %d histogram = %s', len(subs), histo)
# Computes the normalized cumulative histogram of the layer's cost.
histo_pct = [float(x / total) for x in histo]
tf.logging.vlog(1, 'cost pct = %s', histo_pct)
# i-th sub layer is put into partition j, where j is roughly i-th cumulative
# histogram times num_partitions.
parts = [[] for _ in range(num_partitions)]
parts_cost = [0] * num_partitions
pre_hist_cost = 0
for i, s in enumerate(subs):
j = min(int(histo_pct[i] * num_partitions), num_partitions - 1)
# The boundary at parts[j] where j > 0
if j > 0 and not parts[j]:
parts_cost[j - 1] = histo_pct[i - 1] - pre_hist_cost
pre_hist_cost = histo_pct[i - 1]
parts[j].append(s)
parts_cost[num_partitions - 1] = 1.0 - pre_hist_cost
seqs = []
for i, pa in enumerate(parts):
tf.logging.info('Partition %d #subs %d #cost %.3f', i, len(pa),
parts_cost[i])
seqs.append(FeatureExtractionLayer.Params().Set(name='d%d' % i, sub=pa))
return seqs
class SeqLayer(base_layer.BaseLayer):
"""Round-robin every children cells in cell_tpl among worker devices."""
@classmethod
def Params(cls):
p = super(SeqLayer, cls).Params()
p.Define('before_tpl', [],
'Config for the CNN layers that runs before pipelining.')
p.Define('cell_tpl', [], 'A list of FeatureExtractionLayer layers.')
return p
@base_layer.initializer
def __init__(self, params):
super(SeqLayer, self).__init__(params)
p = self.params
assert p.name
num_cells = len(p.cell_tpl)
self._before_layers = []
self._cells = []
before_tpl_device = ''
cell_devices = [''] * num_cells
if py_utils.use_tpu():
cluster = self.cluster
before_tpl_device = cluster.WorkerDeviceInModelSplit(0)
cell_devices = [
cluster.WorkerDeviceInModelSplit(i) for i in range(num_cells)
]
for l in p.before_tpl:
with tf.device(before_tpl_device):
assert l.name
self.CreateChild(l.name, l)
self._before_layers.append((l.name, self.children[l.name]))
for i, l in enumerate(p.cell_tpl):
with tf.device(cell_devices[i]):
assert l.name
self.CreateChild(l.name, l)
self._cells.append((l.name, self.children[l.name]))
def FProp(self, theta, *args):
"""Round-robin every children cells in cell_tpl among worker devices.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
*args: Input args
Returns:
A list contains one tensor of [batch_size, feature_height, feature_width,
channel].
"""
num_layers = len(self.params.cell_tpl)
cluster = self.cluster
for (name, l) in self._before_layers:
l_theta = theta[name]
args = _ToTuple(args)
args = l.FProp(l_theta, *args)
for i in range(num_layers):
with tf.device(cluster.WorkerDeviceInModelSplit(i)):
cell_name, cell = self._cells[i]
args = _ToTuple(args)
args = cell.FProp(theta[cell_name], *args)
return args
class PipeliningLayer(SeqLayer):
"""Pipelining a sequence of layers on multiple devices."""
@classmethod
def Params(cls):
p = super(PipeliningLayer, cls).Params()
p.Define('num_micro_batches', 1, 'Number of micro batches.')
p.Define('micro_batch_size', None, 'Size of a micro batch.')
p.Define('batch_dim', 0, 'The batch dimension.')
p.Define('state_dtype', None, 'Externally specify dtype for states.')
p.Define(
'nested_map_fprop', False, 'Whether arguments and returns of '
'cell fprop functions are nested maps')
return p
def _CalculateOutputShapes(self, input_shapes):
"""Calcuate the output shape of intermediate layers.
Given the FPropMeta function in each FeatureExtractionLayer, calcuates
the shapes of outputs of that layer. This is used to recover the shape
information in StackedRecurrent.
Args:
input_shapes: NestedMap or tuple of input TensorShapes.
Returns:
Return a list of K + 1 NestedMaps or lists of tShape where K is
the number of partitions.
"""
p = self.params
shapes = []
# Converts TensorShape to tshape.Shape.
def _ToTShape(x):
if x is None:
return None
return tshape.Shape(x.as_list())
shapes = py_utils.Transform(_ToTShape, input_shapes)
shapes = _ToTuple(shapes)
state_shapes = []
for (_, cell) in self._before_layers:
shapes = cell.FPropMeta(cell.params, *shapes).out_shapes
state_shapes.append(shapes[0] if p.nested_map_fprop else shapes)
for (_, cell) in self._cells:
shapes = cell.FPropMeta(cell.params, *shapes).out_shapes
state_shapes.append(shapes[0] if p.nested_map_fprop else shapes)
return state_shapes
def _get_state_dtype(self, *args):
if self.params.state_dtype:
return self.params.state_dtype
if self.params.nested_map_fprop:
inputs = args[0].Filter(lambda x: x is not None)
return py_utils.Flatten(inputs)[0].dtype
return args[0].dtype
def _get_input_shapes(self, *args):
p = self.params
if p.nested_map_fprop:
assert len(args) == 1
assert isinstance(args[0], py_utils.NestedMap)
input_tensors = py_utils.Flatten(args[0])
else:
input_tensors = _ToTuple(args)
# Get batch size from the first tensor which is not None.
mini_batch_size = None
for input_tensor in input_tensors:
if input_tensor is not None:
mini_batch_size = input_tensor.get_shape().as_list()[p.batch_dim]
assert mini_batch_size is not None
micro_batch_size = p.micro_batch_size
if not micro_batch_size:
if p.num_micro_batches > mini_batch_size:
p.num_micro_batches = mini_batch_size
micro_batch_size = mini_batch_size // p.num_micro_batches
if mini_batch_size is not None:
if micro_batch_size * p.num_micro_batches != mini_batch_size:
raise ValueError('micro_batch_size * num_micro_batches != batch_size.')
input_shapes = ()
for input_tensor in input_tensors:
if input_tensor is not None:
input_shape = input_tensor.get_shape().as_list()
input_shape[p.batch_dim] = micro_batch_size
input_shapes += (tf.TensorShape(input_shape),)
else:
input_shapes += (None,)
if p.nested_map_fprop:
input_shapes = py_utils.Pack(args[0], input_shapes)
return input_shapes
def FProp(self, theta, *args):
"""Run multiple cells in different devices in a pipelining manner.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
*args: Non-keyworded variable length argument list of input tensors.
Returns:
A list of output tensors
"""
# TODO(huangyp): handle optional None inputs.
p = self.params
if self.do_eval:
outputs = copy.copy(args)
for (name, l) in self._before_layers + self._cells:
outputs = _ToTuple(outputs)
outputs = l.FProp(theta[name], *outputs)
return outputs
num_cells = len(p.cell_tpl)
cluster = self.cluster
# Compute shapes of input and output tensors.
input_shapes = self._get_input_shapes(*args)
state_dtype = self._get_state_dtype(*args)
state_shapes = self._CalculateOutputShapes(input_shapes)
tf.logging.info('state_shapes={}'.format(state_shapes))
def GetCellFn(i):
"""Get the ith feature extraction layer."""
def CellFn(theta, state0, inputs):
"""A cell fn is exectued inside of StackedRecurrent."""
del state0
def _FPropInputSetShape(name, t_shape):
if t_shape is None:
return None
inputs[name].set_shape(t_shape.ToTensorShape().as_list())
return inputs[name]
if p.nested_map_fprop:
# pylint: disable=protected-access
fprop_inputs = state_shapes[i]._RecursiveMap(_FPropInputSetShape)
# pylint: enable=protected-access
else:
fprop_inputs = []
for input_idx, input_shape in enumerate(state_shapes[i]):
name = 's{}'.format(input_idx)
fprop_inputs.append(_FPropInputSetShape(name, input_shape))
with py_utils.RemoveAssertContext(remove=True):
with CellFnFPropOpReplacementWrapper():
tf.logging.info('cell {} input {}'.format(i, fprop_inputs))
mb_tensor = inputs[_MICRO_BATCH_STATE_NAME]
SetOverWriteGlobalStep(mb_tensor)
_, cell = self._cells[i]
fprop_inputs = _ToTuple(fprop_inputs)
outputs = cell.FProp(theta, *fprop_inputs)
if p.nested_map_fprop:
assert py_utils.IsCompatible(outputs, state_shapes[i + 1])
state1 = outputs.Filter(lambda x: x is not None)
else:
state1 = py_utils.NestedMap()
outputs = _ToTuple(outputs)
assert len(outputs) == len(state_shapes[i + 1])
for output_idx in range(len(outputs)):
if outputs[output_idx] is not None:
name = 's{}'.format(output_idx)
state1[name] = outputs[output_idx]
state1[_MICRO_BATCH_STATE_NAME] = mb_tensor
return state1, py_utils.NestedMap()
return CellFn
cell_fns = []
accumulator_layers = []
thetas = []
init_states = []
devices = []
for cell_idx in range(num_cells):
cell_name, cell = self._cells[cell_idx]
accumulator_layers.append(cell)
cell_fns.append(GetCellFn(cell_idx))
thetas.append(theta[cell_name])
def _TfZeros(t_shape):
if t_shape is None:
return None
return tf.zeros(t_shape.ToTensorShape().as_list(), dtype=state_dtype)
if p.nested_map_fprop:
init_state = py_utils.Transform(_TfZeros, state_shapes[cell_idx + 1])
init_state = init_state.Filter(lambda x: x is not None)
else:
init_state = py_utils.NestedMap()
for output_idx, state in enumerate(state_shapes[cell_idx + 1]):
state = _TfZeros(state)
if state is not None:
name = 's{}'.format(output_idx)
init_state[name] = state
init_state[_MICRO_BATCH_STATE_NAME] = tf.cast(0, dtype=state_dtype)
init_states.append(init_state)
devices.append(cluster.WorkerDeviceInModelSplit(cell_idx))
cell_grads = [None] * num_cells
cell_outs = [lambda x: x] * num_cells
cell_out_grads = [lambda x: x] * num_cells
with tf.device(devices[0]):
previous = _ToTuple(args)
for (name, l) in self._before_layers:
previous = l.FProp(theta[name], *previous)
previous = _ToTuple(previous)
def _StackAndSplit(x):
# Split tensors into microbatches.
if x is None:
return None
return tf.stack(tf.split(x, p.num_micro_batches, axis=p.batch_dim))
if p.nested_map_fprop:
inputs = py_utils.Transform(_StackAndSplit, previous[0])
inputs = inputs.Filter(lambda x: x is not None)
else:
inputs = py_utils.NestedMap()
for output_idx, output_tensor in enumerate(previous):
output_tensor = _StackAndSplit(output_tensor)
if output_tensor is not None:
name = 's{}'.format(output_idx)
inputs[name] = output_tensor
gs_tensor = py_utils.GetGlobalStep()
inputs[_MICRO_BATCH_STATE_NAME] = tf.stack([
tf.cast(gs_tensor * p.num_micro_batches + t, dtype=state_dtype)
for t in range(p.num_micro_batches)
])
tf.logging.info('pipeline input = {}'.format(inputs))
output_state, _ = recurrent.StackedRecurrent(
devices=devices,
cell_fns=cell_fns,
cell_grads=cell_grads,
cell_outs=cell_outs,
cell_out_grads=cell_out_grads,
thetas=thetas,
init_states=init_states,
inputs=inputs,
accumulator_layers=accumulator_layers,
unused_acc_state=True)
with tf.device(devices[-1]):
def _ReshapeRetVal(name, t_shape):
"""Restore shape for tensors in microbatches."""
if t_shape is None:
return None
output_tensor = output_state[name]
if p.batch_dim != 0:
perm = list(range(1, p.batch_dim + 1)) + [0]
perm += list(range(p.batch_dim + 1, t_shape.rank + 1))
output_tensor = tf.transpose(output_tensor, perm=perm)
output_shape = t_shape.ToTensorShape().as_list()
output_shape[p.batch_dim] *= p.num_micro_batches
output_tensor = tf.reshape(output_tensor, output_shape)
return output_tensor
# Construct the final return values from output_state.
if p.nested_map_fprop:
# pylint: disable=protected-access
output_tensors = state_shapes[-1]._RecursiveMap(_ReshapeRetVal)
# pylint: enable=protected-access
else:
output_tensors = []
for output_idx, state_shape in enumerate(state_shapes[-1]):
output_name = 's{}'.format(output_idx)
output_tensor = _ReshapeRetVal(output_name, state_shape)
output_tensors.append(output_tensor)
if len(output_tensors) == 1:
output_tensors = output_tensors[0]
else:
output_tensors = tuple(output_tensors)
tf.logging.info('pipeline output = {}'.format(output_tensors))
return output_tensors
| apache-2.0 |
hperala/kontuwikibot | scripts/transferbot.py | 1 | 5140 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script transfers pages from a source wiki to a target wiki.
It also copies edit history to a subpage.
-tolang: The target site code.
-tosite: The target site family.
-prefix: Page prefix on the new site.
-overwrite: Existing pages are skipped by default. Use his option to
overwrite pages.
Internal links are *not* repaired!
Pages to work on can be specified using any of:
¶ms;
Example commands:
# Transfer all pages in category "Query service" from the Toolserver wiki to
# wikitech, adding Nova_Resource:Tools/Tools/ as prefix
transferbot.py -v -family:toolserver -tofamily:wikitech -cat:"Query service" -prefix:Nova_Resource:Tools/Tools/
# Copy the template "Query service" from the Toolserver wiki to wikitech
transferbot.py -v -family:toolserver -tofamily:wikitech -page:"Template:Query service"
"""
#
# (C) Merlijn van Deen, 2014
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 239a991db4a52b15b91c70162f541c0c8d2beb21 $'
#
import pywikibot
from pywikibot import pagegenerators
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class WikiTransferException(Exception):
"""Base class for exceptions from this script.
Makes it easier for clients to catch all expected exceptions that the script might
throw
"""
pass
class TargetSiteMissing(WikiTransferException):
"""Thrown when the target site is the same as the source site.
Based on the way each are initialized, this is likely to happen when the target site
simply hasn't been specified.
"""
pass
class TargetPagesMissing(WikiTransferException):
"""Thrown if no page range has been specified for the script to operate on."""
pass
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
local_args = pywikibot.handle_args(args)
fromsite = pywikibot.Site()
tolang = fromsite.code
tofamily = fromsite.family.name
prefix = ''
overwrite = False
gen_args = []
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if genFactory.handleArg(arg):
gen_args.append(arg)
continue
if arg.startswith('-tofamily'):
tofamily = arg[len('-tofamily:'):]
elif arg.startswith('-tolang'):
tolang = arg[len('-tolang:'):]
elif arg.startswith('-prefix'):
prefix = arg[len('-prefix:'):]
elif arg == "-overwrite":
overwrite = True
tosite = pywikibot.Site(tolang, tofamily)
if fromsite == tosite:
raise TargetSiteMissing('Target site not different from source site')
gen = genFactory.getCombinedGenerator()
if not gen:
raise TargetPagesMissing('Target pages not specified')
gen_args = ' '.join(gen_args)
pywikibot.output(u"""
Page transfer configuration
---------------------------
Source: %(fromsite)r
Target: %(tosite)r
Pages to transfer: %(gen_args)s
Prefix for transferred pages: %(prefix)s
""" % locals())
for page in gen:
summary = "Moved page from %s" % page.title(asLink=True)
targetpage = pywikibot.Page(tosite, prefix + page.title())
edithistpage = pywikibot.Page(tosite, prefix + page.title() + '/edithistory')
if targetpage.exists() and not overwrite:
pywikibot.output(
u"Skipped %s (target page %s exists)" % (
page.title(asLink=True),
targetpage.title(asLink=True)
)
)
continue
pywikibot.output(u"Moving %s to %s..."
% (page.title(asLink=True),
targetpage.title(asLink=True)))
pywikibot.log("Getting page text.")
text = page.get(get_redirect=True)
text += "<noinclude>\n\n<small>This page was moved from %s. It's edit history can be viewed at %s</small></noinclude>" % (
page.title(asLink=True, insite=targetpage.site),
edithistpage.title(asLink=True, insite=targetpage.site))
pywikibot.log("Getting edit history.")
historytable = page.getVersionHistoryTable()
pywikibot.log("Putting page text.")
targetpage.put(text, summary=summary)
pywikibot.log("Putting edit history.")
edithistpage.put(historytable, summary=summary)
if __name__ == "__main__":
try:
main()
except TargetSiteMissing as e:
pywikibot.error(u'Need to specify a target site and/or language')
pywikibot.error(u'Try running this script with -help for help/usage')
pywikibot.exception()
except TargetPagesMissing as e:
pywikibot.error(u'Need to specify a page range')
pywikibot.error(u'Try running this script with -help for help/usage')
pywikibot.exception()
| mit |
pollen/pyrobus | pyluos/modules/stepper.py | 1 | 6265 | from .module import Module, interact
from copy import copy
import time
class Stepper(Module):
# target modes
_MODE_COMPLIANT = 11
_MODE_POWER = 10
_MODE_ROT_SPEED = 8
_MODE_ROT_POSITION = 7
_MODE_TRANS_SPEED = 6
_MODE_TRANS_POSITION = 5
# report modes
_ROTATION_POSITION = 4
_ROTATION_SPEED = 3
_TRANSLATION_POSITION = 2
_TRANSLATION_SPEED = 1
_CURRENT = 0
def __init__(self, id, alias, device):
Module.__init__(self, 'Stepper', id, alias, device)
self._config = [False] * (Stepper._MODE_COMPLIANT + 1)
# default configs, enable compliant, power_mode, and rotation position report
self._config[Stepper._MODE_ROT_POSITION] = True
self._config[Stepper._MODE_COMPLIANT] = True
#configuration
self._resolution = 200.0
self._dimension = 0.0
#targets
self._target_rot_speed = 100.0
self._target_rot_position = 0.0
self._target_trans_speed = 0.0
self._target_trans_position = 0.0
def _convert_config(self):
return int(''.join(['1' if c else '0' for c in self._config]), 2) # Table read reversly
def bit(self, i, enable):
self._config = self._config[:i] + () + self._config[i + 1:]
#************************** configurations *****************************
def setToZero(self):
self._push_value('reinit', None)
@property
def stepPerTurn(self):
return self._resolution
@stepPerTurn.setter
def stepPerTurn(self, s):
self._resolution = s
self._push_value("resolution", s)
@property
def wheel_size(self):
return self._dimension
@wheel_size.setter
def wheel_size(self, s):
self._dimension = s
self._push_value("dimension", s)
#************************** target modes *****************************
# compliant
@property
def compliant(self):
self._compliant
@compliant.setter
def compliant(self, enable):
self._config[Stepper._MODE_COMPLIANT] = True if enable != 0 else False
self._compliant = enable
self._push_value('parameters', self._convert_config())
time.sleep(0.01)
# rotation speed
@property
def target_rot_speed(self):
return self._target_rot_speed
@target_rot_speed.setter
def target_rot_speed(self, s):
self._target_rot_speed = s
self._push_value("target_rot_speed", s)
# rotation position
@property
def target_rot_position(self):
if (self._config[Stepper._MODE_ROT_POSITION] != True):
print("rotation position mode is not enabled in the module please use 'device.module.rot_position_mode(True)' to enable it")
return self._target_rot_position
@target_rot_position.setter
def target_rot_position(self, s):
if (self._config[Stepper._MODE_ROT_POSITION] != True):
print("rotation position mode is not enabled in the module please use 'device.module.rot_position_mode(True)' to enable it")
self._target_rot_position = s
self._push_value("target_rot_position", s)
def rot_position_mode(self, enable):
self._config[Stepper._MODE_ROT_POSITION] = True if enable != 0 else False
if (enable == True) :
self._config[Stepper._MODE_TRANS_POSITION] = False
self._push_value('parameters', self._convert_config())
time.sleep(0.01)
# translation speed
@property
def target_trans_speed(self):
return self._target_trans_speed
@target_trans_speed.setter
def target_trans_speed(self, s):
if (self._dimension == 0) :
print("you have to setup a wheel_size before using translation command")
self._target_trans_speed = s
self._push_value("target_trans_speed", s)
# translation position
@property
def target_trans_position(self):
if (self._config[Stepper._MODE_TRANS_POSITION] != True):
print("translation speed mode is not enabled in the module please use 'device.module.trans_speed_mode(True)' to enable it")
return self._target_trans_position
@target_trans_position.setter
def target_trans_position(self, s):
if (self._config[Stepper._MODE_TRANS_POSITION] != True):
print("translation speed mode is not enabled in the module please use 'device.module.trans_speed_mode(True)' to enable it")
self._target_trans_position = s
self._push_value("target_trans_position", s)
def trans_position_mode(self, enable):
if (self._dimension == 0) :
print("you have to setup a wheel_size before using translation command")
self._config[Stepper._MODE_TRANS_POSITION] = True if enable != 0 else False
if (enable == True) :
self._config[Stepper._MODE_ROT_POSITION] = False
self._push_value('parameters', self._convert_config())
time.sleep(0.01)
#************************** controls and updates *****************************
def _update(self, new_state):
Module._update(self, new_state)
def control(self):
def change_config(compliant_mode, rot_speed, rot_position_mode, rot_position, trans_speed, trans_position_mode, trans_position):
# target mode
self.compliant = compliant_mode
self.target_rot_speed = rot_speed
self.rot_position_mode(rot_position_mode)
if (rot_position_mode) :
self.target_rot_position = rot_position
self.target_trans_speed = trans_speed
self.trans_position_mode(trans_position_mode)
if (trans_position_mode) :
self.target_trans_position = trans_position
w = interact(change_config,
compliant_mode = self._config[Stepper._MODE_COMPLIANT],
rot_speed = (-700, 700, 1),
rot_position_mode = self._config[Stepper._MODE_ROT_POSITION],
rot_position = (-360.0, 360.0, 1.0),
trans_speed = (-1000, 1000, 1),
trans_position_mode = self._config[Stepper._MODE_TRANS_POSITION],
trans_position = (-1000, 1000, 1))
| mit |
arante/pyloc | microblog/flask/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pymssql.py | 32 | 3143 | # mssql/pymssql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>/?\
charset=utf8
:url: http://pymssql.org/
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_. Compatible builds are available for
Linux, MacOSX and Windows platforms.
"""
from .base import MSDialect
from ... import types as sqltypes, util, processors
import re
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSDialect_pymssql(MSDialect):
supports_sane_rowcount = False
driver = 'pymssql'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pymssql,
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
module = __import__('pymssql')
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (2, 1, 1):
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI.")
return module
def __init__(self, **params):
super(MSDialect_pymssql, self).__init__(**params)
self.use_scope_identity = True
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(
r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
port = opts.pop('port', None)
if port and 'host' in opts:
opts['host'] = "%s:%s" % (opts['host'], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
):
if msg in str(e):
return True
else:
return False
dialect = MSDialect_pymssql
| gpl-3.0 |
rigdenlab/conkit | conkit/io/tests/test_stockholm.py | 2 | 26385 | """Testing facility for conkit.io.StockholmIO"""
__author__ = "Felix Simkovic"
__date__ = "12 Sep 2016"
import os
import unittest
from conkit.io.stockholm import StockholmParser
from conkit.io.tests.helpers import ParserTestCase
class TestStockholmParser(ParserTestCase):
def test_read_1(self):
msa = """# STOCKHOLM 1.0
#=GF ID 1EAZ:A|PDBID|CHAIN|SEQUENCE-i5
#=GS UniRef100_A0A0D2WIY8/647-745 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3
#=GS UniRef100_A0A0D2WIY8/761-857 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3
#=GS UniRef100_A0A0D2WIY8/1126-1228 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3
#=GS UniRef100_A0A0D2WIY8/1245-1341 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3
#=GS UniRef100_A0A0D2WIY8/1752-1857 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3
1EAZ:A|PDBID|CHAIN|SEQUENCE GSMFTPKPPQDSAVIKAG-YC-V------K-Q-------------------------------------------------G-A------------VM------------------------------------------------------------------------------------------------------
UniRef100_A0A0D2WIY8/647-745 ---------------IEG-YL-S------K-Q-------------------------------------------------G-GV----------NNN------------------------------------------------------------------------------------------------------
#=GR UniRef100_A0A0D2WIY8/647-745 PP ...............79*.**.*......*.*.................................................*.99..........9**......................................................................................................
UniRef100_A0A0D2WIY8/761-857 -----------ANPDKEG-WL-K------K-Q-------------------------------------------------G-N-----------SMA------------------------------------------------------------------------------------------------------
#=GR UniRef100_A0A0D2WIY8/761-857 PP ...........5899***.**.*......*.*.................................................*.9...........9**......................................................................................................
UniRef100_A0A0D2WIY8/1126-1228 ------------AVRKLG-FL-Y------K-Q-------------------------------------------------G-G------------SN------------------------------------------------------------------------------------------------------
#=GR UniRef100_A0A0D2WIY8/1126-1228 PP ............68999*.**.*......*.*.................................................*.*............**......................................................................................................
UniRef100_A0A0D2WIY8/1245-1341 ------------NPARQG-WL-C------K-R-------------------------------------------------G-G------------TY------------------------------------------------------------------------------------------------------
#=GR UniRef100_A0A0D2WIY8/1245-1341 PP ............6789**.**.*......*.*.................................................*.*............**......................................................................................................
UniRef100_A0A0D2WIY8/1752-1857 -------QRATPGFKMKG-WL-H------K-E-------------------------------------------------G-G------------SV------------------------------------------------------------------------------------------------------
#=GR UniRef100_A0A0D2WIY8/1752-1857 PP .......556677889**.**.*......*.*.................................................*.*............**......................................................................................................
#=GC PP_cons .......*....*...................................*.....*..*................*.................*..*....*................................................*****999875555677766776.
#=GC RF .......x....x...................................x.....x..x................x.................x..x....x................................................xxxxxxxxxxxxxxxxxxxxxxxx
1EAZ:A|PDBID|CHAIN|SEQUENCE -------------------K-----------------------NW---------------------------------------------------------------------------------------------K--R-R------------Y----F-QL--------D--E-----------------------
UniRef100_A0A0D2WIY8/647-745 -------------------K-----------------------GW---------------------------------------------------------------------------------------------K--R-R------------Y----C-VL--------E--N-----------------------
#=GR UniRef100_A0A0D2WIY8/647-745 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................
UniRef100_A0A0D2WIY8/761-857 -------------------K-----------------------DW---------------------------------------------------------------------------------------------K--K-R------------Y----I-AI--------K--E-----------------------
#=GR UniRef100_A0A0D2WIY8/761-857 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................
UniRef100_A0A0D2WIY8/1126-1228 -------------------K-----------------------GW---------------------------------------------------------------------------------------------R--K-R------------W----I-VM--------E--H-----------------------
#=GR UniRef100_A0A0D2WIY8/1126-1228 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................
UniRef100_A0A0D2WIY8/1245-1341 -------------------T-----------------------SW---------------------------------------------------------------------------------------------K--K-R------------W----L-VL--------K--G-----------------------
#=GR UniRef100_A0A0D2WIY8/1245-1341 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................
UniRef100_A0A0D2WIY8/1752-1857 -------------------K-----------------------TW---------------------------------------------------------------------------------------------K--R-R------------W----F-ST--------T--P-----------------------
#=GR UniRef100_A0A0D2WIY8/1752-1857 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................
#=GC PP_cons ...................9.......................**.............................................................................................*..*.*............*....*.**........9..9.......................
#=GC RF ...................x.......................xx.............................................................................................x..x.x............x....x.xx........x..x.......................
//
"""
f_name = self.tempfile(content=msa)
parser = StockholmParser()
with open(f_name, "r") as f_in:
hierarchy = parser.read(f_in)
for i, sequence_entry in enumerate(hierarchy):
if i == 0:
self.assertEqual("1EAZ:A|PDBID|CHAIN|SEQUENCE", sequence_entry.id)
self.assertEqual(
"GSMFTPKPPQDSAVIKAG-YC-V------K-Q-------------------------------------------------G-A-"
"-----------VM------------------------------------------------------------------------"
"-------------------------------------------------K-----------------------NW----------"
"-----------------------------------------------------------------------------------K-"
"-R-R------------Y----F-QL--------D--E-----------------------",
sequence_entry.seq,
)
elif i == 1:
self.assertEqual("UniRef100_A0A0D2WIY8/647-745", sequence_entry.id)
self.assertEqual(
"---------------IEG-YL-S------K-Q-------------------------------------------------G-GV"
"----------NNN------------------------------------------------------------------------"
"-------------------------------------------------K-----------------------GW----------"
"-----------------------------------------------------------------------------------K-"
"-R-R------------Y----C-VL--------E--N-----------------------",
sequence_entry.seq,
)
self.assertEqual(
[
"[subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=C"
"apsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3"
],
sequence_entry.remark,
)
elif i == 2:
self.assertEqual("UniRef100_A0A0D2WIY8/761-857", sequence_entry.id)
self.assertEqual(
"-----------ANPDKEG-WL-K------K-Q-------------------------------------------------G-N-"
"----------SMA------------------------------------------------------------------------"
"-------------------------------------------------K-----------------------DW----------"
"-----------------------------------------------------------------------------------K-"
"-K-R------------Y----I-AI--------K--E-----------------------",
sequence_entry.seq,
)
self.assertEqual(
[
"[subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=C"
"apsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3"
],
sequence_entry.remark,
)
elif i == 3:
self.assertEqual("UniRef100_A0A0D2WIY8/1126-1228", sequence_entry.id)
self.assertEqual(
"------------AVRKLG-FL-Y------K-Q-------------------------------------------------G-G-"
"-----------SN------------------------------------------------------------------------"
"-------------------------------------------------K-----------------------GW----------"
"-----------------------------------------------------------------------------------R-"
"-K-R------------W----I-VM--------E--H-----------------------",
sequence_entry.seq,
)
self.assertEqual(
[
"[subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=C"
"apsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3"
],
sequence_entry.remark,
)
elif i == 4:
self.assertEqual("UniRef100_A0A0D2WIY8/1245-1341", sequence_entry.id)
self.assertEqual(
"------------NPARQG-WL-C------K-R-------------------------------------------------G-G-"
"-----------TY------------------------------------------------------------------------"
"-------------------------------------------------T-----------------------SW----------"
"-----------------------------------------------------------------------------------K-"
"-K-R------------W----L-VL--------K--G-----------------------",
sequence_entry.seq,
)
self.assertEqual(
[
"[subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=C"
"apsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3"
],
sequence_entry.remark,
)
elif i == 5:
self.assertEqual("UniRef100_A0A0D2WIY8/1752-1857", sequence_entry.id)
self.assertEqual(
"-------QRATPGFKMKG-WL-H------K-E-------------------------------------------------G-G-"
"-----------SV------------------------------------------------------------------------"
"-------------------------------------------------K-----------------------TW----------"
"-----------------------------------------------------------------------------------K-"
"-R-R------------W----F-ST--------T--P-----------------------",
sequence_entry.seq,
)
self.assertEqual(
[
"[subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=C"
"apsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3"
],
sequence_entry.remark,
)
def test_write_1(self):
msa = [
"# STOCKHOLM 1.0",
"#=GF ID 1EAZ:A|PDBID|CHAIN|SEQUENCE-i5",
"",
"#=GS UniRef100_A0A0D2WIY8/647-745 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"#=GS UniRef100_A0A0D2WIY8/761-857 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"#=GS UniRef100_A0A0D2WIY8/1126-1228 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"#=GS UniRef100_A0A0D2WIY8/1245-1341 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"#=GS UniRef100_A0A0D2WIY8/1752-1857 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"",
"1EAZ:A|PDBID|CHAIN|SEQUENCE GSMFTPKPPQDSAVIKAG-YC-V------K-Q-------------------------------------------------G-A------------VM------------------------------------------------------------------------------------------------------",
"UniRef100_A0A0D2WIY8/647-745 ---------------IEG-YL-S------K-Q-------------------------------------------------G-GV----------NNN------------------------------------------------------------------------------------------------------",
"#=GR UniRef100_A0A0D2WIY8/647-745 PP ...............79*.**.*......*.*.................................................*.99..........9**......................................................................................................",
"UniRef100_A0A0D2WIY8/761-857 -----------ANPDKEG-WL-K------K-Q-------------------------------------------------G-N-----------SMA------------------------------------------------------------------------------------------------------",
"#=GR UniRef100_A0A0D2WIY8/761-857 PP ...........5899***.**.*......*.*.................................................*.9...........9**......................................................................................................",
"UniRef100_A0A0D2WIY8/1126-1228 ------------AVRKLG-FL-Y------K-Q-------------------------------------------------G-G------------SN------------------------------------------------------------------------------------------------------",
"#=GR UniRef100_A0A0D2WIY8/1126-1228 PP ............68999*.**.*......*.*.................................................*.*............**......................................................................................................",
"UniRef100_A0A0D2WIY8/1245-1341 ------------NPARQG-WL-C------K-R-------------------------------------------------G-G------------TY------------------------------------------------------------------------------------------------------",
"#=GR UniRef100_A0A0D2WIY8/1245-1341 PP ............6789**.**.*......*.*.................................................*.*............**......................................................................................................",
"UniRef100_A0A0D2WIY8/1752-1857 -------QRATPGFKMKG-WL-H------K-E-------------------------------------------------G-G------------SV------------------------------------------------------------------------------------------------------",
"#=GR UniRef100_A0A0D2WIY8/1752-1857 PP .......556677889**.**.*......*.*.................................................*.*............**......................................................................................................",
"#=GC PP_cons .......*....*...................................*.....*..*................*.................*..*....*................................................*****999875555677766776.",
"#=GC RF .......x....x...................................x.....x..x................x.................x..x....x................................................xxxxxxxxxxxxxxxxxxxxxxxx",
"",
"1EAZ:A|PDBID|CHAIN|SEQUENCE -------------------K-----------------------NW---------------------------------------------------------------------------------------------K--R-R------------Y----F-QL--------D--E-----------------------",
"UniRef100_A0A0D2WIY8/647-745 -------------------K-----------------------GW---------------------------------------------------------------------------------------------K--R-R------------Y----C-VL--------E--N-----------------------",
"#=GR UniRef100_A0A0D2WIY8/647-745 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................",
"UniRef100_A0A0D2WIY8/761-857 -------------------K-----------------------DW---------------------------------------------------------------------------------------------K--K-R------------Y----I-AI--------K--E-----------------------",
"#=GR UniRef100_A0A0D2WIY8/761-857 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................",
"UniRef100_A0A0D2WIY8/1126-1228 -------------------K-----------------------GW---------------------------------------------------------------------------------------------R--K-R------------W----I-VM--------E--H-----------------------",
"#=GR UniRef100_A0A0D2WIY8/1126-1228 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................",
"UniRef100_A0A0D2WIY8/1245-1341 -------------------T-----------------------SW---------------------------------------------------------------------------------------------K--K-R------------W----L-VL--------K--G-----------------------",
"#=GR UniRef100_A0A0D2WIY8/1245-1341 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................",
"UniRef100_A0A0D2WIY8/1752-1857 -------------------K-----------------------TW---------------------------------------------------------------------------------------------K--R-R------------W----F-ST--------T--P-----------------------",
"#=GR UniRef100_A0A0D2WIY8/1752-1857 PP ...................*.......................**.............................................................................................*..*.*............*....*.**........*..*.......................",
"#=GC PP_cons ...................9.......................**.............................................................................................*..*.*............*....*.**........9..9.......................",
"#=GC RF ...................x.......................xx.............................................................................................x..x.x............x....x.xx........x..x.......................",
"//",
]
parser = StockholmParser()
f_name_in = self.tempfile(content="\n".join(msa))
f_name_out = self.tempfile()
with open(f_name_in, "r") as f_in, open(f_name_out, "w") as f_out:
hierarchy = parser.read(f_in)
parser.write(f_out, hierarchy)
ref = [
"# STOCKHOLM 1.0",
"#=GF ID 1EAZ:A|PDBID|CHAIN|SEQUENCE",
"",
"#=GS UniRef100_A0A0D2WIY8/647-745 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"#=GS UniRef100_A0A0D2WIY8/761-857 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"#=GS UniRef100_A0A0D2WIY8/1126-1228 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"#=GS UniRef100_A0A0D2WIY8/1245-1341 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"#=GS UniRef100_A0A0D2WIY8/1752-1857 DE [subseq from] Stromal membrane-associated GTPase-activating protein 2 n=1 Tax=Capsaspora owczarzaki (strain ATCC 30864) RepID=A0A0D2WIY8_CAPO3",
"",
"1EAZ:A|PDBID|CHAIN|SEQUENCE GSMFTPKPPQDSAVIKAG-YC-V------K-Q-------------------------------------------------G-A------------VM------------------------------------------------------------------------------------------------------",
"UniRef100_A0A0D2WIY8/647-745 ---------------IEG-YL-S------K-Q-------------------------------------------------G-GV----------NNN------------------------------------------------------------------------------------------------------",
"UniRef100_A0A0D2WIY8/761-857 -----------ANPDKEG-WL-K------K-Q-------------------------------------------------G-N-----------SMA------------------------------------------------------------------------------------------------------",
"UniRef100_A0A0D2WIY8/1126-1228 ------------AVRKLG-FL-Y------K-Q-------------------------------------------------G-G------------SN------------------------------------------------------------------------------------------------------",
"UniRef100_A0A0D2WIY8/1245-1341 ------------NPARQG-WL-C------K-R-------------------------------------------------G-G------------TY------------------------------------------------------------------------------------------------------",
"UniRef100_A0A0D2WIY8/1752-1857 -------QRATPGFKMKG-WL-H------K-E-------------------------------------------------G-G------------SV------------------------------------------------------------------------------------------------------",
"",
"1EAZ:A|PDBID|CHAIN|SEQUENCE -------------------K-----------------------NW---------------------------------------------------------------------------------------------K--R-R------------Y----F-QL--------D--E-----------------------",
"UniRef100_A0A0D2WIY8/647-745 -------------------K-----------------------GW---------------------------------------------------------------------------------------------K--R-R------------Y----C-VL--------E--N-----------------------",
"UniRef100_A0A0D2WIY8/761-857 -------------------K-----------------------DW---------------------------------------------------------------------------------------------K--K-R------------Y----I-AI--------K--E-----------------------",
"UniRef100_A0A0D2WIY8/1126-1228 -------------------K-----------------------GW---------------------------------------------------------------------------------------------R--K-R------------W----I-VM--------E--H-----------------------",
"UniRef100_A0A0D2WIY8/1245-1341 -------------------T-----------------------SW---------------------------------------------------------------------------------------------K--K-R------------W----L-VL--------K--G-----------------------",
"UniRef100_A0A0D2WIY8/1752-1857 -------------------K-----------------------TW---------------------------------------------------------------------------------------------K--R-R------------W----F-ST--------T--P-----------------------",
"//",
]
with open(f_name_out, "r") as f_in:
output = f_in.read().splitlines()
self.assertEqual(ref, output)
if __name__ == "__main__":
unittest.main(verbosity=2)
| bsd-3-clause |
markpasc/leapfrog | leapfrog/management/commands/fetchnewcontent.py | 1 | 3457 | from datetime import datetime, timedelta
import logging
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from sentry.client.base import SentryClient
from leapfrog.models import *
from leapfrog.poll import facebook
from leapfrog.poll import flickr
from leapfrog.poll import mlkshk
from leapfrog.poll import tumblr
from leapfrog.poll import typepad
from leapfrog.poll import vimeo
pollers = {
'facebook.com': facebook.poll_facebook,
'flickr.com': flickr.poll_flickr,
'mlkshk.com': mlkshk.poll_mlkshk,
'tumblr.com': tumblr.poll_tumblr,
'typepad.com': typepad.poll_typepad,
'vimeo.com': vimeo.poll_vimeo,
}
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--force',
action='store_true',
dest='force',
default=False,
help='Update all accounts, even ones that have been updated recently',
),
make_option('--service',
dest='service',
help='Update only accounts on this service',
),
)
def fetch_new_content(self, **options):
update_horizon = datetime.utcnow() - timedelta(minutes=15)
last_viewed_horizon = datetime.utcnow() - timedelta(days=5)
users = User.objects.all()
for user in users:
try:
person = user.person
except Person.DoesNotExist:
continue
# Don't update accounts if someone hasn't viewed the site in some days.
if person.last_viewed_home < last_viewed_horizon:
logging.getLogger(__name__).debug("User %s hasn't viewed the site in a while; skipping", user.username)
continue
for account in person.accounts.all():
log = logging.getLogger('%s.%s' % (__name__, account.service))
try:
poller = pollers[account.service]
except KeyError:
log.debug("Account service %s has no poller, skipping", account.service)
continue
if options['service'] and account.service != options['service']:
log.debug("Account is for service %s but we're only polling %s, skipping", account.service, options['service'])
continue
if not options['force'] and account.last_updated > update_horizon:
log.debug("Account %s %s was updated fewer than 15 minutes ago, skipping", account.service, account.display_name)
continue
# Mark the account as updated even if the update fails later.
log.debug("Polling account %s %s", account.service, account.display_name)
account.last_updated = datetime.utcnow()
account.save()
try:
poller(account)
except Exception, exc:
log.exception(exc)
SentryClient().create_from_exception(view='%s.%s' % (__name__, account.service))
else:
account.last_success = datetime.utcnow()
account.save()
def handle_noargs(self, **options):
try:
self.fetch_new_content(**options)
except Exception, exc:
logging.exception(exc)
SentryClient().create_from_exception(view=__name__)
| mit |
kkhenriquez/kkhenriquez.github.io | node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py | 1569 | 23354 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
jhsenjaliya/incubator-airflow | airflow/hooks/dbapi_hook.py | 14 | 9338 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import sys
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
return '{conn.conn_type}://{login}{host}/{conn.schema}'.format(
conn=conn, login=login, host=host)
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
self.log.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
conn.commit()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
l = []
for cell in row:
l.append(self._serialize_cell(cell, conn))
values = tuple(l)
placeholders = ["%s",]*len(values)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded {i} into {table} rows so far".format(**locals())
)
conn.commit()
self.log.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
cstipkovic/spidermonkey-research | python/mock-1.0.0/mock.py | 12 | 75204 | # mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 1.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
__version__ = '1.0.0'
import pprint
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
from functools import wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
next
except NameError:
def next(obj):
return obj.next()
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
# Needed to work around Python 3 bug where use of "super" interferes with
# defining __class__ as a descriptor
_super = super
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
FILTER_DIR = True
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
if inPy3k:
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
else:
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
if inPy3k:
signature = inspect.formatargspec(
regargs, varargs, varkw, defaults,
kwonly, kwonlydef, ann, formatvalue=lambda value: "")
else:
signature = inspect.formatargspec(
regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if not inPy3k:
ClassTypes = (type, ClassType)
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_delegate
if sig is None:
return self._mock_side_effect
return sig.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
sig = self._mock_delegate
if sig is None:
self._mock_side_effect = value
else:
sig.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members.
XXXX
"""
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
return result
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = set()
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with Python 2.4)
extra_args = []
entered_patchers = []
# can't use try...except...finally because of Python 2.4
# compatibility
exc_info = tuple()
try:
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not _callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches."""
for patch in list(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex truediv rtruediv '
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, basestring):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, basestring):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, basestring):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, basestring):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, ClassTypes)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
# allow a mock to actually be a function
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return is_type
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# in Python 2, _sre.SRE_Pattern objects have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_doc',
'func_globals',
'func_name',
])
file_spec = None
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
# set on first use
if inPy3k:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
else:
file_spec = file
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
| mpl-2.0 |
maciekcc/tensorflow | tensorflow/python/training/tensorboard_logging_test.py | 132 | 4456 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.tensorboard_logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.training import tensorboard_logging
class EventLoggingTest(test.TestCase):
def setUp(self):
self._work_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self._sw = writer.FileWriter(self._work_dir)
tensorboard_logging.set_summary_writer(self._sw)
self.addCleanup(shutil.rmtree, self._work_dir)
# Stop the clock to avoid test flakiness.
now = time.time()
time._real_time = time.time
time.time = lambda: now
# Mock out logging calls so we can verify that the right number of messages
# get logged.
self.logged_message_count = 0
self._actual_log = logging.log
def mockLog(*args, **kwargs):
self.logged_message_count += 1
self._actual_log(*args, **kwargs)
logging.log = mockLog
def tearDown(self):
time.time = time._real_time
logging.log = self._actual_log
def assertLoggedMessagesAre(self, expected_messages):
self._sw.close()
event_paths = glob.glob(os.path.join(self._work_dir, "event*"))
# If the tests runs multiple time in the same directory we can have
# more than one matching event file. We only want to read the last one.
self.assertTrue(event_paths)
event_reader = summary_iterator.summary_iterator(event_paths[-1])
# Skip over the version event.
next(event_reader)
for level, message in expected_messages:
event = next(event_reader)
self.assertEqual(event.wall_time, time.time())
self.assertEqual(event.log_message.level, level)
self.assertEqual(event.log_message.message, message)
def testBasic(self):
tensorboard_logging.set_summary_writer(self._sw)
tensorboard_logging.error("oh no!")
tensorboard_logging.error("for%s", "mat")
self.assertLoggedMessagesAre([(event_pb2.LogMessage.ERROR, "oh no!"),
(event_pb2.LogMessage.ERROR, "format")])
self.assertEqual(2, self.logged_message_count)
def testVerbosity(self):
tensorboard_logging.set_summary_writer(self._sw)
tensorboard_logging.set_verbosity(tensorboard_logging.ERROR)
tensorboard_logging.warn("warn")
tensorboard_logging.error("error")
tensorboard_logging.set_verbosity(tensorboard_logging.DEBUG)
tensorboard_logging.debug("debug")
self.assertLoggedMessagesAre([(event_pb2.LogMessage.ERROR, "error"),
(event_pb2.LogMessage.DEBUGGING, "debug")])
# All message should be logged because tensorboard_logging verbosity doesn't
# affect logging verbosity.
self.assertEqual(3, self.logged_message_count)
def testBadVerbosity(self):
with self.assertRaises(ValueError):
tensorboard_logging.set_verbosity("failure")
with self.assertRaises(ValueError):
tensorboard_logging.log("bad", "dead")
def testNoSummaryWriter(self):
"""Test that logging without a SummaryWriter succeeds."""
tensorboard_logging.set_summary_writer(None)
tensorboard_logging.warn("this should work")
self.assertEqual(1, self.logged_message_count)
def testSummaryWriterFailsAfterClear(self):
tensorboard_logging._clear_summary_writer()
with self.assertRaises(RuntimeError):
tensorboard_logging.log(tensorboard_logging.ERROR, "failure")
if __name__ == "__main__":
test.main()
| apache-2.0 |
dgies/incubator-airflow | tests/plugins_manager.py | 10 | 2500 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import logging
import unittest
from flask.blueprints import Blueprint
from flask_admin import BaseView
from flask_admin.menu import MenuLink, MenuView
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.executors.base_executor import BaseExecutor
from airflow.www.app import cached_app
class PluginsTest(unittest.TestCase):
def test_operators(self):
from airflow.operators.test_plugin import PluginOperator
self.assertTrue(issubclass(PluginOperator, BaseOperator))
def test_hooks(self):
from airflow.hooks.test_plugin import PluginHook
self.assertTrue(issubclass(PluginHook, BaseHook))
def test_executors(self):
from airflow.executors.test_plugin import PluginExecutor
self.assertTrue(issubclass(PluginExecutor, BaseExecutor))
def test_macros(self):
from airflow.macros.test_plugin import plugin_macro
self.assertTrue(callable(plugin_macro))
def test_admin_views(self):
app = cached_app()
[admin] = app.extensions['admin']
category = admin._menu_categories['Test Plugin']
[admin_view] = [v for v in category.get_children()
if isinstance(v, MenuView)]
self.assertEqual('Test View', admin_view.name)
def test_flask_blueprints(self):
app = cached_app()
self.assertIsInstance(app.blueprints['test_plugin'], Blueprint)
def test_menu_links(self):
app = cached_app()
[admin] = app.extensions['admin']
category = admin._menu_categories['Test Plugin']
[menu_link] = [ml for ml in category.get_children()
if isinstance(ml, MenuLink)]
self.assertEqual('Test Menu Link', menu_link.name)
| apache-2.0 |
Verteiron/JContainers | JContainers/lib/boost/tools/build/v2/test/library_property.py | 44 | 1126 | #!/usr/bin/python
# Copyright 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that the <library> property has no effect on "obj" targets. Previously,
# it affected all targets, so
#
# project : requirements <library>foo ;
# exe a : a.cpp helper ;
# obj helper : helper.cpp : <optimization>off ;
#
# caused 'foo' to be built with and without optimization.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """
project : requirements <library>lib//x ;
exe a : a.cpp foo ;
obj foo : foo.cpp : <variant>release ;
""")
t.write("a.cpp", """
void aux();
int main() { aux(); }
""")
t.write("foo.cpp", """
void gee();
void aux() { gee(); }
""")
t.write("lib/x.cpp", """
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
gee() {}
""")
t.write("lib/jamfile.jam", """
lib x : x.cpp ;
""")
t.write("lib/jamroot.jam", """
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a.exe")
t.expect_nothing("lib/bin/$toolset/release/x.obj")
t.cleanup()
| mit |
DataONEorg/d1_python | client_onedrive/src/d1_onedrive/impl/object_tree.py | 1 | 7907 | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object Tree.
Based on a source tree that contains only PIDs and queries, maintain the object
tree that is browsed through the ONEDrive filesystem.
Cache the information on disk between runs of ONEDrive.
"""
import logging
import pickle
import d1_onedrive.impl.clients.onedrive_d1_client
import d1_onedrive.impl.clients.onedrive_solr_client
import d1_onedrive.impl.onedrive_exceptions
class ObjectTree:
def __init__(self, options, source_tree):
self._options = options
self._source_tree = source_tree
self._solr_client = d1_onedrive.impl.clients.onedrive_solr_client.OneDriveSolrClient(
options
)
self._d1_client = d1_onedrive.impl.clients.onedrive_d1_client.DataONEClient(
options
)
def __enter__(self):
self._create_cache()
self.refresh()
return self
def __exit__(self, type, value, traceback):
self._pickle_cache_to_disk()
def refresh(self):
"""Synchronize the local tree of Solr records for DataONE identifiers and
queries with the reference tree."""
if self._source_tree.cache_is_stale():
self._source_tree.refresh()
logging.info("Refreshing object tree")
self._init_cache()
self.sync_cache_with_source_tree()
def get_folder(self, path, root=None):
"""Get the contents of an object tree folder."""
return self._get_cache_folder_recursive(path, root)
def get_object_tree_folder_name(self, object_tree_folder):
return object_tree_folder["name"]
def get_object_record(self, pid):
"""Get an object that has already been cached in the object tree.
Caching happens when the object tree is refreshed.
"""
try:
return self._cache["records"][pid]
except KeyError:
raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException("Unknown PID")
def get_object_record_with_sync(self, pid):
"""Get an object that may not currently be in the cache.
If the object is not in the cache, an attempt is made to retrieve the record
from a CN on the fly. If the object is found, it is cached before being returned
to the user. This allows the object tree caching system to be used for objects
that are not in the object tree. ONEDrive uses this functionality for the
FlatSpace folder.
"""
try:
return self._cache["records"][pid]
except KeyError:
return self._get_uncached_object_record(pid)
def add_object_to_cache(self, pid):
"""Attempt to add a specific object to the cache.
Objects are normally only added to the object tree during refresh. This method
is used by the FlatSpace resolver.
"""
self._create_cache_item_for_pid(None, pid)
def get_science_object(self, pid):
return self._d1_client.get_science_object(pid)
def get_system_metadata(self, pid):
return self._d1_client.get_system_metadata_as_string(pid)
def get_source_tree_folder(self, path):
return self._source_tree.get_filtered_sub_tree(path)
def _get_individually_synced_object_pids(self):
return list(self._cache["individually_synced"].keys())
#
# Private.
#
def _create_cache(self):
self._init_cache()
self._unpickle_cache_from_disk()
def _init_cache(self):
self._cache = {"tree": {}, "records": {}, "individually_synced": {}}
def _get_uncached_object_record(self, pid):
self._create_cache_item_for_pid(None, pid)
try:
return self._cache["records"][pid]
except KeyError:
raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException("Unknown PID")
def _unpickle_cache_from_disk(self):
try:
with open(self._options.object_tree_cache_path, "rb") as f:
self._cache = pickle.load(f)
except (IOError, pickle.PickleError):
pass
def _pickle_cache_to_disk(self):
with open(self._options.object_tree_cache_path, "wb") as f:
pickle.dump(self._cache, f)
def sync_cache_with_source_tree(self):
for folder, path in self._source_tree.iterate_filtered_tree():
self._add_filtered_tree_to_cache(folder, path)
def _add_filtered_tree_to_cache(self, filtered_tree, path):
cache_folder = self._get_or_create_cache_folder_recursive(path)
self._create_cache_items(cache_folder, filtered_tree)
def _get_or_create_cache_folder_recursive(self, path, folder=None, rpath=None):
if folder is None:
folder = self._cache["tree"]
if rpath is None:
rpath = []
dirs = folder.setdefault("dirs", {})
if not path:
return folder
return self._get_or_create_cache_folder_recursive(
path[1:], dirs.setdefault(path[0], {"name": path[0]}), rpath + [path[0]]
)
def _create_cache_items(self, cache_folder, source_tree_folder):
items = cache_folder.setdefault("items", {})
self._create_cache_item_for_pids(items, source_tree_folder)
self._create_cache_items_for_queries(items, source_tree_folder)
def _create_cache_item_for_pids(self, cache_folder, source_tree_folder):
for pid in source_tree_folder["identifiers"]:
self._create_cache_item_for_pid(cache_folder, pid)
def _create_cache_item_for_pid(self, cache_folder, pid):
"""The source tree can contain identifiers that are no longer valid (or were
never valid).
Any items for which a Solr record cannot be retrieved are silently skipped.
"""
try:
record = self._solr_client.get_solr_record(pid)
except d1_onedrive.impl.onedrive_exceptions.ONEDriveException:
pass
else:
self._create_cache_item(cache_folder, record)
def _create_cache_items_for_queries(self, cache_folder, source_tree_folder):
for query in source_tree_folder["queries"]:
self._create_cache_items_for_query(cache_folder, query)
def _create_cache_items_for_query(self, cache_folder, query):
records = self._solr_client.run_solr_query(query)
for record in records:
self._create_cache_item(cache_folder, record)
def _create_cache_item(self, cache_folder, record):
if cache_folder is not None:
cache_folder[record["id"]] = True
else:
self._cache["individually_synced"][record["id"]] = True
self._cache["records"][record["id"]] = record
def _get_cache_folder_recursive(self, path, folder=None):
logging.debug("path={}".format(path))
if folder is None:
folder = self._cache["tree"]
if not path:
return folder
try:
return self._get_cache_folder_recursive(path[1:], folder["dirs"][path[0]])
except KeyError:
raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException("Invalid path")
| apache-2.0 |
VitalPet/odoo | addons/hr_attendance/report/timesheet.py | 53 | 6044 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from openerp import pooler, tools
from openerp.report import report_sxw
from openerp.report.interface import report_rml, toxml
from openerp.tools.translate import _
one_week = relativedelta(days=7)
num2day = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
def to_hour(h):
return int(h), int(round((h - int(h)) * 60, 0))
class report_custom(report_rml):
def create_xml(self, cr, uid, ids, datas, context=None):
obj_emp = pooler.get_pool(cr.dbname).get('hr.employee')
emp_ids = datas['active_ids']
start_date = datetime.strptime(datas['form']['init_date'], '%Y-%m-%d')
end_date = datetime.strptime(datas['form']['end_date'], '%Y-%m-%d')
first_monday = start_date - relativedelta(days=start_date.date().weekday())
last_monday = end_date + relativedelta(days=7 - end_date.date().weekday())
if last_monday < first_monday:
first_monday, last_monday = last_monday, first_monday
rpt_obj = pooler.get_pool(cr.dbname).get('hr.employee')
rml_obj=report_sxw.rml_parse(cr, uid, rpt_obj._name,context)
header_xml = '''
<header>
<date>%s</date>
<company>%s</company>
</header>
''' % (str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M")),pooler.get_pool(cr.dbname).get('res.users').browse(cr,uid,uid).company_id.name)
user_xml = []
for employee_id in emp_ids:
emp = obj_emp.read(cr, uid, [employee_id], ['id', 'name'])[0]
monday, n_monday = first_monday, first_monday + one_week
stop, week_xml = False, []
user_repr = '''
<user>
<name>%s</name>
%%s
</user>
''' % tools.ustr(toxml(emp['name']))
while monday != last_monday:
#### Work hour calculation
sql = '''
select action, att.name
from hr_employee as emp inner join hr_attendance as att
on emp.id = att.employee_id
where att.name between %s and %s and emp.id = %s
order by att.name
'''
for idx in range(7):
cr.execute(sql, (monday.strftime('%Y-%m-%d %H:%M:%S'), (monday + relativedelta(days=idx+1)).strftime('%Y-%m-%d %H:%M:%S'), employee_id))
attendances = cr.dictfetchall()
week_wh = {}
# Fake sign ins/outs at week ends, to take attendances across week ends into account
# XXX this is wrong for the first sign-in ever and the last sign out to this date
if attendances and attendances[0]['action'] == 'sign_out':
attendances.insert(0, {'name': monday.strftime('%Y-%m-%d %H:%M:%S'), 'action': 'sign_in'})
if attendances and attendances[-1]['action'] == 'sign_in':
attendances.append({'name': n_monday.strftime('%Y-%m-%d %H:%M:%S'), 'action': 'sign_out'})
# sum up the attendances' durations
ldt = None
for att in attendances:
dt = datetime.strptime(att['name'], '%Y-%m-%d %H:%M:%S')
if ldt and att['action'] == 'sign_out':
week_wh[ldt.date().weekday()] = week_wh.get(ldt.date().weekday(), 0) + (float((dt - ldt).seconds)/3600)
else:
ldt = dt
# Week xml representation
week_repr = ['<week>', '<weekstart>%s</weekstart>' % monday.strftime('%Y-%m-%d'), '<weekend>%s</weekend>' % (n_monday - relativedelta(days=1)).strftime('%Y-%m-%d')]
for idx in range(7):
week_repr.append('<%s>' % num2day[idx])
if idx in week_wh:
week_repr.append('<workhours>%sh%02d</workhours>' % to_hour(week_wh[idx]))
week_repr.append('</%s>' % num2day[idx])
week_repr.append('<total>')
week_repr.append('<worked>%sh%02d</worked>' % to_hour(reduce(lambda x,y:x+y, week_wh.values(), 0)))
week_repr.append('</total>')
week_repr.append('</week>')
week_xml.append('\n'.join(week_repr))
monday, n_monday = n_monday, n_monday + one_week
user_xml.append(user_repr % '\n'.join(week_xml))
xml = '''<?xml version="1.0" encoding="UTF-8" ?>
<report>
%s
<title>%s</title>
%s
</report>
''' % (header_xml,_('Attendances by Week'),'\n'.join(user_xml))
xml = tools.ustr(xml).encode('utf8')
return self.post_process_xml_data(cr, uid, xml, context)
report_custom('report.hr.attendance.allweeks', 'hr.employee', '', 'addons/hr_attendance/report/timesheet.xsl')
# vim:noexpandtab:tw=0
| agpl-3.0 |
madphysicist/numpy | numpy/core/umath.py | 17 | 2040 | """
Create the numpy.core.umath namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
from . import _multiarray_umath
from ._multiarray_umath import * # noqa: F403
# These imports are needed for backward compatibility,
# do not change them. issue gh-11862
# _ones_like is semi-public, on purpose not added to __all__
from ._multiarray_umath import _UFUNC_API, _add_newdoc_ufunc, _ones_like
__all__ = [
'_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG',
'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT',
'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN',
'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID',
'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT',
'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', 'absolute', 'add',
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj',
'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide',
'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', 'expm1', 'fabs',
'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', 'fmod', 'frexp',
'frompyfunc', 'gcd', 'geterrobj', 'greater', 'greater_equal', 'heaviside',
'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp',
'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2',
'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative',
'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians',
'reciprocal', 'remainder', 'right_shift', 'rint', 'seterrobj', 'sign',
'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan',
'tanh', 'true_divide', 'trunc']
| bsd-3-clause |
dya2/python-for-android | python3-alpha/python3-src/Lib/plat-os2emx/SOCKET.py | 134 | 1804 | # Generated by h2py from f:/emx/include/sys/socket.h
# Included from sys/types.h
FD_SETSIZE = 256
# Included from sys/uio.h
FREAD = 1
FWRITE = 2
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SO_DEBUG = 0x0001
SO_ACCEPTCONN = 0x0002
SO_REUSEADDR = 0x0004
SO_KEEPALIVE = 0x0008
SO_DONTROUTE = 0x0010
SO_BROADCAST = 0x0020
SO_USELOOPBACK = 0x0040
SO_LINGER = 0x0080
SO_OOBINLINE = 0x0100
SO_L_BROADCAST = 0x0200
SO_RCV_SHUTDOWN = 0x0400
SO_SND_SHUTDOWN = 0x0800
SO_SNDBUF = 0x1001
SO_RCVBUF = 0x1002
SO_SNDLOWAT = 0x1003
SO_RCVLOWAT = 0x1004
SO_SNDTIMEO = 0x1005
SO_RCVTIMEO = 0x1006
SO_ERROR = 0x1007
SO_TYPE = 0x1008
SO_OPTIONS = 0x1010
SOL_SOCKET = 0xffff
AF_UNSPEC = 0
AF_UNIX = 1
AF_INET = 2
AF_IMPLINK = 3
AF_PUP = 4
AF_CHAOS = 5
AF_NS = 6
AF_NBS = 7
AF_ISO = 7
AF_OSI = AF_ISO
AF_ECMA = 8
AF_DATAKIT = 9
AF_CCITT = 10
AF_SNA = 11
AF_DECnet = 12
AF_DLI = 13
AF_LAT = 14
AF_HYLINK = 15
AF_APPLETALK = 16
AF_NB = 17
AF_NETBIOS = AF_NB
AF_OS2 = AF_UNIX
AF_MAX = 18
PF_UNSPEC = AF_UNSPEC
PF_UNIX = AF_UNIX
PF_INET = AF_INET
PF_IMPLINK = AF_IMPLINK
PF_PUP = AF_PUP
PF_CHAOS = AF_CHAOS
PF_NS = AF_NS
PF_NBS = AF_NBS
PF_ISO = AF_ISO
PF_OSI = AF_ISO
PF_ECMA = AF_ECMA
PF_DATAKIT = AF_DATAKIT
PF_CCITT = AF_CCITT
PF_SNA = AF_SNA
PF_DECnet = AF_DECnet
PF_DLI = AF_DLI
PF_LAT = AF_LAT
PF_HYLINK = AF_HYLINK
PF_APPLETALK = AF_APPLETALK
PF_NB = AF_NB
PF_NETBIOS = AF_NB
PF_OS2 = AF_UNIX
PF_MAX = AF_MAX
SOMAXCONN = 5
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_DONTROUTE = 0x4
MSG_EOR = 0x8
MSG_TRUNC = 0x10
MSG_CTRUNC = 0x20
MSG_WAITALL = 0x40
MSG_MAXIOVLEN = 16
SCM_RIGHTS = 0x01
MT_FREE = 0
MT_DATA = 1
MT_HEADER = 2
MT_SOCKET = 3
MT_PCB = 4
MT_RTABLE = 5
MT_HTABLE = 6
MT_ATABLE = 7
MT_SONAME = 8
MT_ZOMBIE = 9
MT_SOOPTS = 10
MT_FTABLE = 11
MT_RIGHTS = 12
MT_IFADDR = 13
MAXSOCKETS = 2048
| apache-2.0 |
cosmoharrigan/opencog | opencog/python/utility/evolutionary.py | 34 | 9725 | __author__ = 'keyvan'
from random import randrange, random as rand
def new_offspring(parent):
return type(parent)(parent=parent)
def new_individual(population):
return population.type_of_individuals(population=population)
class IndividualBase(object):
"""
An individual object should be a collection of Genes,
each gene should be accessible via a 'locus', so if
individual is implemented in terms of dic, locus is
key and
"""
#############################################################
## User of this code should implement the following.
## Refer to examples/genetic_algorithm for demo
loci = None
_out_of_date_fitness_value = True
def __init__(self, **kwargs):
"""
Do not override __init___
instead, override __init_normal___ and/or __init_by_parent__
"""
if 'parent' in kwargs:
parent = kwargs['parent']
self.__dict__.update(parent.population.common_attributes)
self.population = parent.population
self.__init_by_parent__(parent)
elif 'population' in kwargs:
self.population = kwargs['population']
self.__dict__.update(self.population.common_attributes)
self.__init_normal__()
def __fitness__(self):
pass
def __mutate__(self):
"""
return an offspring with a mutated gene
"""
pass
def __crossover__(self, other):
"""
return an offspring
"""
return self.fitness_proportionate_crossover(other)
# implement getitem and setitem if you're not using an
# standard structure. If list, set or dict satisfy you're
# needs, you can subclass from IndividualListBase,
# IndividualSetBase or IndividualDictBase, respectively.
# def __getitem__(self, key):
# pass
#
# def __setitem__(self, key, value):
# pass
def __init_by_parent__(self, parent):
pass
def __init_normal__(self):
pass
#############################################################
@property
def fitness(self):
if self._out_of_date_fitness_value:
self._fitness = self.__fitness__()
self._out_of_date_fitness_value = False
return self._fitness
def mutate(self):
self._out_of_date_fitness_value = True
return self.__mutate__()
def __add__(self, other): # + operator does crossover
return self.__crossover__(other)
def __cmp__(self, other):
return other.fitness - self.fitness
# return self.fitness - other.fitness
#############################################################
## Predefined crossover methods
def proportionate_crossover(self, other, self_share):
offspring = new_offspring(self)
for locus in set(self.loci) | set(other.loci):
if rand() < self_share:
if locus not in self.loci:
continue
offspring[locus] = self[locus]
else:
if locus not in other.loci:
continue
offspring[locus] = other[locus]
return offspring
def uniform_crossover(self, other):
return self.proportionate_crossover(other, 0.5)
def fitness_proportionate_crossover(self, other):
if self.fitness == 0 and other.fitness == 0:
self_share = 0.5
else:
self_share = float(self.fitness) / (self.fitness + other.fitness)
return self.proportionate_crossover(other, self_share)
def one_point_crossover(self, other, point_index):
pass # TODO
def two_point_crossover(self, other, first_point_index = None,
second_point_index = None):
pass # TODO
#############################################################
class Population(object):
def __init__(self, type_of_individuals, number_of_individuals=0,
**common_attributes_between_individuals):
self.common_attributes = common_attributes_between_individuals
self.current_generation = []
self.next_generation = []
self.type_of_individuals = type_of_individuals
self.generation_count = 0
self.add_many(number_of_individuals)
def __selection__(self):
"""
Override this method to control selection behaviour.
Select method should return one individual.
The default implementation returns an individual from the
fitter half of the population. Population is sorted in
the entry point of this method.
"""
return self.current_generation[randrange(0, len(self.current_generation)/2)]
def __crossover_selection__(self):
"""
Override if your crossover selection method is
different from your mutation selection.
"""
return self.__selection__(), self.__selection__()
def select_for_mutation(self):
return self.__selection__()
def select_for_crossover(self):
return self.__crossover_selection__()
def add_many(self, quantity):
for _ in range(quantity):
individual = new_individual(self)
self.add_to_current_generation(individual)
def add_to_current_generation(self, individual):
self.current_generation.append(individual)
def add_to_next_generation(self, offspring):
self.next_generation.append(offspring)
def switch_to_next_generation(self):
self.current_generation = self.next_generation
self.next_generation = []
self._sorted = False
self.generation_count += 1
def sort(self):
self.current_generation.sort()
def __len__(self):
return len(self.current_generation)
def __getitem__(self, index):
return self.current_generation[index]
class GeneticAlgorithm(object):
sort_population_each_step = True
def __init__(self, **kwargs):
"""
Two ways for initialising:
1) giving the population by passing 'population' parameter
2) specifying 'type_of_individuals' and 'type_of_individuals'
"""
self.__dict__.update(kwargs)
if not self.__dict__.has_key('population'):
if not self.__dict__.has_key('type_of_individuals')\
or not self.__dict__.has_key('number_of_individuals'):
raise ValueError('since population is not specified,'
' type_of_individuals and number_of_individuals'
' should be present')
self.population = Population(self.type_of_individuals, self.number_of_individuals)
self.highest_fitness_found = 0
def step(self, mutation_rate=1, crossover_rate = 1,
number_of_individuals=0):
highest_fitness_this_generation = 0
fittest_individual_this_generation = None
if number_of_individuals <= 0:
number_of_individuals = len(self.population)
while len(self.population.next_generation) < number_of_individuals:
if self.sort_population_each_step:
self.population.sort()
if 0 < crossover_rate > rand():
parent, other_parent = self.population.select_for_crossover()
offspring = parent + other_parent # crossover
else:
offspring = self.population.select_for_mutation()
if 0 < mutation_rate > rand():
offspring = offspring.mutate()
self.population.add_to_next_generation(offspring)
if offspring.fitness > highest_fitness_this_generation:
fittest_individual_this_generation = offspring
highest_fitness_this_generation = offspring.fitness
if highest_fitness_this_generation > self.highest_fitness_found:
self.highest_fitness_found = fittest_individual_this_generation.fitness
self.fittest_individual_found = fittest_individual_this_generation
self.population.switch_to_next_generation()
return fittest_individual_this_generation
def run(self, show_population_each_generation=True):
while True:
if show_population_each_generation:
print '#################### Generation ' +\
str(self.population.generation_count) +\
' ####################'
for individual in self.population:
print individual
print 'Fittest:', str(self.step())
class IndividualListBase(IndividualBase, list):
@property
def loci(self):
return range(len(self))
class IndividualDictBase(IndividualBase, dict):
@property
def loci(self):
return self
class IndividualSetBase(IndividualBase, set):
@property
def loci(self):
return self
def __getitem__(self, item):
return item
def __setitem__(self, key, value):
self.remove(key)
self.add(value)
#class NoneEpistaticGeneticAlgorithm(GeneticAlgorithm):
#
# fitness_unit = 1
#
# class _contribution_dict(dict):
# def __getitem__(self, item):
# if item not in self:
# return NoneEpistaticGeneticAlgorithm.fitness_unit
# return dict.__getitem__(self, item)
#
# fitness_contribution_by_locus = _contribution_dict()
#
# def __init__(self, type_of_individuals, number_of_individuals):
# self.population = _none_epistatic_population(type_of_individuals, number_of_individuals)
# self.population.a
#
# def step(self, mutation_rate=1, crossover_rate = 1,
# number_of_individuals=0):
# pass
| agpl-3.0 |
vijeth-aradhya/coala-bears | bears/c_languages/codeclone_detection/ClangCloneDetectionBear.py | 23 | 1992 | from bears.c_languages.ClangBear import clang_available, ClangBear
from bears.c_languages.codeclone_detection.ClangFunctionDifferenceBear import (
ClangFunctionDifferenceBear)
from coalib.bears.GlobalBear import GlobalBear
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class ClangCloneDetectionBear(GlobalBear):
check_prerequisites = classmethod(clang_available)
LANGUAGES = ClangBear.LANGUAGES
REQUIREMENTS = ClangBear.REQUIREMENTS
CAN_DETECT = {'Duplication'}
BEAR_DEPS = {ClangFunctionDifferenceBear}
def run(self,
dependency_results: dict,
max_clone_difference: float=0.185):
'''
Checks the given code for similar functions that are probably
redundant.
:param max_clone_difference: The maximum difference a clone should
have.
'''
differences = dependency_results[
ClangFunctionDifferenceBear.__name__][0].contents
count_matrices = dependency_results[
ClangFunctionDifferenceBear.__name__][1].contents
self.debug('Creating results...')
for function_1, function_2, difference in differences:
if difference < max_clone_difference:
yield Result.from_values(
self,
'Code clone found. The other occurrence is at file '
'{file}, line {line}, function {function}. The '
'difference is {difference}%.'.format(
file=function_2[0],
line=function_2[1],
function=function_2[2],
difference=difference),
file=function_1[0],
severity=RESULT_SEVERITY.MAJOR,
line=function_1[1],
debug_msg=[count_matrices[function_1],
count_matrices[function_2]])
| agpl-3.0 |
Wafflespeanut/servo | tests/wpt/web-platform-tests/old-tests/webdriver/navigation/auth_tests.py | 141 | 1389 | import os
import sys
import unittest
import ConfigParser
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from selenium.common import exceptions
from wptserve import server
from wptserve.router import any_method
from wptserve.handlers import basic_auth_handler
class WebDriverAuthTest(unittest.TestCase):
# Set up class to start HTTP Server that responds to
# test URLs with various 401 responses
@classmethod
def setUpClass(cls):
cls.driver = base_test.create_driver()
cls.webserver = server.WebTestHttpd(routes=[(any_method, "*", basic_auth_handler)])
cls.webserver.start()
@classmethod
def tearDownClass(cls):
cls.driver.quit()
cls.webserver.stop()
# Test that when 401 is seen by browser, a WebDriver response is still sent
def test_response_401_auth_basic(self):
page = self.webserver.get_url('navigation/res/authenticated.html')
self.driver.set_page_load_timeout(5)
try:
self.driver.get( page )
# if we got a responses instead of timeout, that's success
self.assertTrue(True)
except exceptions.TimeoutException:
self.fail("Did not get response from browser.")
except:
self.fail("Unexpected failure. Please investigate.")
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
bhargav2408/python-for-android | python3-alpha/python3-src/Lib/test/test_unicode.py | 46 | 70367 | """ Test script for the Unicode implementation.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
import struct
import sys
import unittest
import warnings
from test import support, string_tests
import _string
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
codecs.register(search_function)
class UnicodeTest(string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest):
type2test = str
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
def test_literals(self):
self.assertEqual('\xff', '\u00ff')
self.assertEqual('\uffff', '\U0000ffff')
self.assertRaises(SyntaxError, eval, '\'\\Ufffffffe\'')
self.assertRaises(SyntaxError, eval, '\'\\Uffffffff\'')
self.assertRaises(SyntaxError, eval, '\'\\U%08x\'' % 0x110000)
# raw strings should not have unicode escapes
self.assertNotEqual(r"\u0020", " ")
def test_ascii(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(ascii('abc'), "'abc'")
self.assertEqual(ascii('ab\\c'), "'ab\\\\c'")
self.assertEqual(ascii('ab\\'), "'ab\\\\'")
self.assertEqual(ascii('\\c'), "'\\\\c'")
self.assertEqual(ascii('\\'), "'\\\\'")
self.assertEqual(ascii('\n'), "'\\n'")
self.assertEqual(ascii('\r'), "'\\r'")
self.assertEqual(ascii('\t'), "'\\t'")
self.assertEqual(ascii('\b'), "'\\x08'")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'"), '''"'"''')
self.assertEqual(ascii('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = ascii(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test ascii works on wide unicode escapes without overflow.
self.assertEqual(ascii("\U00010000" * 39 + "\uffff" * 4096),
ascii("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, ascii, WrongRepr())
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr('abc'), "'abc'")
self.assertEqual(repr('ab\\c'), "'ab\\\\c'")
self.assertEqual(repr('ab\\'), "'ab\\\\'")
self.assertEqual(repr('\\c'), "'\\\\c'")
self.assertEqual(repr('\\'), "'\\\\'")
self.assertEqual(repr('\n'), "'\\n'")
self.assertEqual(repr('\r'), "'\\r'")
self.assertEqual(repr('\t'), "'\\t'")
self.assertEqual(repr('\b'), "'\\x08'")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'"), '''"'"''')
self.assertEqual(repr('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9"
"\xaa\xab\xac\\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5"
"\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3"
"\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1"
"\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd"
"\xfe\xff'")
testrepr = repr(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test repr works on wide unicode escapes without overflow.
self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
repr("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, repr, WrongRepr())
def test_iterators(self):
# Make sure unicode objects have an __iter__ method
it = "\u1111\u2222\u3333".__iter__()
self.assertEqual(next(it), "\u1111")
self.assertEqual(next(it), "\u2222")
self.assertEqual(next(it), "\u3333")
self.assertRaises(StopIteration, next, it)
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(1, 'aaa', 'count', 'a', -1)
self.checkequalnofix(3, 'aaa', 'count', 'a', -10)
self.checkequalnofix(2, 'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, 'aaa', 'count', 'a', 0, -10)
def test_find(self):
self.checkequalnofix(0, 'abcdefghiabc', 'find', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequalnofix(-1, 'abcdefghiabc', 'find', 'def', 4)
self.assertRaises(TypeError, 'hello'.find)
self.assertRaises(TypeError, 'hello'.find, 42)
def test_rfind(self):
string_tests.CommonTest.test_rfind(self)
# check mixed argument types
self.checkequalnofix(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
def test_index(self):
string_tests.CommonTest.test_index(self)
self.checkequalnofix(0, 'abcdefghiabc', 'index', '')
self.checkequalnofix(3, 'abcdefghiabc', 'index', 'def')
self.checkequalnofix(0, 'abcdefghiabc', 'index', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'index', 'abc', 1)
self.assertRaises(ValueError, 'abcdefghiabc'.index, 'hib')
self.assertRaises(ValueError, 'abcdefghiab'.index, 'abc', 1)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', 8)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', -1)
def test_rindex(self):
string_tests.CommonTest.test_rindex(self)
self.checkequalnofix(12, 'abcdefghiabc', 'rindex', '')
self.checkequalnofix(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequalnofix(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequalnofix(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghiabc'.rindex, 'hib')
self.assertRaises(ValueError, 'defghiabc'.rindex, 'def', 1)
self.assertRaises(ValueError, 'defghiabc'.rindex, 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, 8)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, -1)
def test_maketrans_translate(self):
# these work with plain translate()
self.checkequalnofix('bbbc', 'abababc', 'translate',
{ord('a'): None})
self.checkequalnofix('iiic', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i')})
self.checkequalnofix('iiix', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i'), ord('c'): 'x'})
self.checkequalnofix('c', 'abababc', 'translate',
{ord('a'): None, ord('b'): ''})
self.checkequalnofix('xyyx', 'xzx', 'translate',
{ord('z'): 'yy'})
# this needs maketrans()
self.checkequalnofix('abababc', 'abababc', 'translate',
{'b': '<i>'})
tbl = self.type2test.maketrans({'a': None, 'b': '<i>'})
self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', tbl)
# test alternative way of calling maketrans()
tbl = self.type2test.maketrans('abc', 'xyz', 'd')
self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
self.assertRaises(TypeError, self.type2test.maketrans)
self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 2)
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def', 2)
self.assertRaises(ValueError, self.type2test.maketrans, {'xy': 2})
self.assertRaises(TypeError, self.type2test.maketrans, {(1,): 2})
self.assertRaises(TypeError, 'hello'.translate)
self.assertRaises(TypeError, 'abababc'.translate, 'abc', 'xyz')
def test_split(self):
string_tests.CommonTest.test_split(self)
# Mixed arguments
self.checkequalnofix(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequalnofix(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequalnofix(['endcase ', ''], 'endcase test', 'split', 'test')
def test_join(self):
string_tests.MixinStrUnicodeUserStringTest.test_join(self)
class MyWrapper:
def __init__(self, sval): self.sval = sval
def __str__(self): return self.sval
# mixed arguments
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkraises(TypeError, ' ', 'join', ['1', '2', MyWrapper('foo')])
self.checkraises(TypeError, ' ', 'join', ['1', '2', '3', bytes()])
self.checkraises(TypeError, ' ', 'join', [1, 2, 3])
self.checkraises(TypeError, ' ', 'join', ['1', '2', 3])
def test_replace(self):
string_tests.CommonTest.test_replace(self)
# method call forwarded from str implementation because of unicode argument
self.checkequalnofix('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.assertRaises(TypeError, 'replace'.replace, "r", 42)
def test_bytes_comparison(self):
with support.check_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertEqual('abc' == b'abc', False)
self.assertEqual('abc' != b'abc', True)
self.assertEqual('abc' == bytearray(b'abc'), False)
self.assertEqual('abc' != bytearray(b'abc'), True)
def test_comparison(self):
# Comparisons:
self.assertEqual('abc', 'abc')
self.assertTrue('abcd' > 'abc')
self.assertTrue('abc' < 'abcd')
if 0:
# Move these tests to a Unicode collation module test...
# Testing UTF-16 code point order comparisons...
# No surrogates, no fixup required.
self.assertTrue('\u0061' < '\u20ac')
# Non surrogate below surrogate value, no fixup required
self.assertTrue('\u0061' < '\ud800\udc02')
# Non surrogate above surrogate value, fixup required
def test_lecmp(s, s2):
self.assertTrue(s < s2)
def test_fixup(s):
s2 = '\ud800\udc01'
test_lecmp(s, s2)
s2 = '\ud900\udc01'
test_lecmp(s, s2)
s2 = '\uda00\udc01'
test_lecmp(s, s2)
s2 = '\udb00\udc01'
test_lecmp(s, s2)
s2 = '\ud800\udd01'
test_lecmp(s, s2)
s2 = '\ud900\udd01'
test_lecmp(s, s2)
s2 = '\uda00\udd01'
test_lecmp(s, s2)
s2 = '\udb00\udd01'
test_lecmp(s, s2)
s2 = '\ud800\ude01'
test_lecmp(s, s2)
s2 = '\ud900\ude01'
test_lecmp(s, s2)
s2 = '\uda00\ude01'
test_lecmp(s, s2)
s2 = '\udb00\ude01'
test_lecmp(s, s2)
s2 = '\ud800\udfff'
test_lecmp(s, s2)
s2 = '\ud900\udfff'
test_lecmp(s, s2)
s2 = '\uda00\udfff'
test_lecmp(s, s2)
s2 = '\udb00\udfff'
test_lecmp(s, s2)
test_fixup('\ue000')
test_fixup('\uff61')
# Surrogates on both sides, no fixup required
self.assertTrue('\ud800\udc02' < '\ud84d\udc56')
def test_islower(self):
string_tests.MixinStrUnicodeUserStringTest.test_islower(self)
self.checkequalnofix(False, '\u1FFc', 'islower')
def test_isupper(self):
string_tests.MixinStrUnicodeUserStringTest.test_isupper(self)
if not sys.platform.startswith('java'):
self.checkequalnofix(False, '\u1FFc', 'isupper')
def test_istitle(self):
string_tests.MixinStrUnicodeUserStringTest.test_title(self)
self.checkequalnofix(True, '\u1FFc', 'istitle')
self.checkequalnofix(True, 'Greek \u1FFcitlecases ...', 'istitle')
def test_isspace(self):
string_tests.MixinStrUnicodeUserStringTest.test_isspace(self)
self.checkequalnofix(True, '\u2000', 'isspace')
self.checkequalnofix(True, '\u200a', 'isspace')
self.checkequalnofix(False, '\u2014', 'isspace')
def test_isalpha(self):
string_tests.MixinStrUnicodeUserStringTest.test_isalpha(self)
self.checkequalnofix(True, '\u1FFc', 'isalpha')
def test_isdecimal(self):
self.checkequalnofix(False, '', 'isdecimal')
self.checkequalnofix(False, 'a', 'isdecimal')
self.checkequalnofix(True, '0', 'isdecimal')
self.checkequalnofix(False, '\u2460', 'isdecimal') # CIRCLED DIGIT ONE
self.checkequalnofix(False, '\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
self.checkequalnofix(True, '\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
self.checkequalnofix(True, '0123456789', 'isdecimal')
self.checkequalnofix(False, '0123456789a', 'isdecimal')
self.checkraises(TypeError, 'abc', 'isdecimal', 42)
def test_isdigit(self):
string_tests.MixinStrUnicodeUserStringTest.test_isdigit(self)
self.checkequalnofix(True, '\u2460', 'isdigit')
self.checkequalnofix(False, '\xbc', 'isdigit')
self.checkequalnofix(True, '\u0660', 'isdigit')
def test_isnumeric(self):
self.checkequalnofix(False, '', 'isnumeric')
self.checkequalnofix(False, 'a', 'isnumeric')
self.checkequalnofix(True, '0', 'isnumeric')
self.checkequalnofix(True, '\u2460', 'isnumeric')
self.checkequalnofix(True, '\xbc', 'isnumeric')
self.checkequalnofix(True, '\u0660', 'isnumeric')
self.checkequalnofix(True, '0123456789', 'isnumeric')
self.checkequalnofix(False, '0123456789a', 'isnumeric')
self.assertRaises(TypeError, "abc".isnumeric, 42)
def test_isidentifier(self):
self.assertTrue("a".isidentifier())
self.assertTrue("Z".isidentifier())
self.assertTrue("_".isidentifier())
self.assertTrue("b0".isidentifier())
self.assertTrue("bc".isidentifier())
self.assertTrue("b_".isidentifier())
self.assertTrue("µ".isidentifier())
self.assertTrue("𝔘𝔫𝔦𝔠𝔬𝔡𝔢".isidentifier())
self.assertFalse(" ".isidentifier())
self.assertFalse("[".isidentifier())
self.assertFalse("©".isidentifier())
self.assertFalse("0".isidentifier())
def test_isprintable(self):
self.assertTrue("".isprintable())
self.assertTrue(" ".isprintable())
self.assertTrue("abcdefg".isprintable())
self.assertFalse("abcdefg\n".isprintable())
# some defined Unicode character
self.assertTrue("\u0374".isprintable())
# undefined character
self.assertFalse("\u0378".isprintable())
# single surrogate character
self.assertFalse("\ud800".isprintable())
def test_contains(self):
# Testing Unicode contains method
self.assertIn('a', 'abdb')
self.assertIn('a', 'bdab')
self.assertIn('a', 'bdaba')
self.assertIn('a', 'bdba')
self.assertNotIn('a', 'bdb')
self.assertIn('a', 'bdba')
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertNotIn('a', ('x',1,'y'))
self.assertNotIn('a', ('x',1,None))
self.assertNotIn('abcd', 'abcxxxx')
self.assertIn('ab', 'abcd')
self.assertIn('ab', 'abc')
self.assertIn('ab', (1,None,'ab'))
self.assertIn('', 'abc')
self.assertIn('', '')
self.assertIn('', 'abc')
self.assertNotIn('\0', 'abc')
self.assertIn('\0', '\0abc')
self.assertIn('\0', 'abc\0')
self.assertIn('a', '\0abc')
self.assertIn('asdf', 'asdf')
self.assertNotIn('asdf', 'asd')
self.assertNotIn('asdf', '')
self.assertRaises(TypeError, "abc".__contains__)
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
# self.assertEqual('{ 0 }'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r, !s and !a coercions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!r}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!r}'.format('\u0374'), "'\u0374'") # printable
self.assertEqual('{0!r}'.format(F('\u0374')), 'F(\u0374)')
self.assertEqual('{0!a}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!a}'.format('\u0374'), "'\\u0374'") # printable
self.assertEqual('{0!a:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!a}'.format(F('\u0374')), 'F(\\u0374)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
msg = 'object.__format__ with a non-empty format string is deprecated'
with support.check_warnings((msg, PendingDeprecationWarning)):
self.assertEqual('{0:^10}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:^10s}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:>15s}'.format(G('data')), ' string is data')
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(IndexError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(IndexError, "{:}".format)
self.assertRaises(IndexError, "{:s}".format)
self.assertRaises(IndexError, "{}".format)
big = "23098475029384702983476098230754973209482573"
self.assertRaises(ValueError, ("{" + big + "}").format)
self.assertRaises(ValueError, ("{[" + big + "]}").format, [0])
# issue 6089
self.assertRaises(ValueError, "{0[0]x}".format, [None])
self.assertRaises(ValueError, "{0[0](10)}".format, [None])
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
def test_format_map(self):
self.assertEqual(''.format_map({}), '')
self.assertEqual('a'.format_map({}), 'a')
self.assertEqual('ab'.format_map({}), 'ab')
self.assertEqual('a{{'.format_map({}), 'a{')
self.assertEqual('a}}'.format_map({}), 'a}')
self.assertEqual('{{b'.format_map({}), '{b')
self.assertEqual('}}b'.format_map({}), '}b')
self.assertEqual('a{{b'.format_map({}), 'a{b')
# using mappings
class Mapping(dict):
def __missing__(self, key):
return key
self.assertEqual('{hello}'.format_map(Mapping()), 'hello')
self.assertEqual('{a} {world}'.format_map(Mapping(a='hello')), 'hello world')
class InternalMapping:
def __init__(self):
self.mapping = {'a': 'hello'}
def __getitem__(self, key):
return self.mapping[key]
self.assertEqual('{a}'.format_map(InternalMapping()), 'hello')
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{foo._x}'.format_map({'foo': C(20)}), '20')
# test various errors
self.assertRaises(TypeError, '{'.format_map)
self.assertRaises(TypeError, '}'.format_map)
self.assertRaises(TypeError, 'a{'.format_map)
self.assertRaises(TypeError, 'a}'.format_map)
self.assertRaises(TypeError, '{a'.format_map)
self.assertRaises(TypeError, '}a'.format_map)
# issue #12579: can't supply positional params to format_map
self.assertRaises(ValueError, '{}'.format_map, {'a' : 2})
self.assertRaises(ValueError, '{}'.format_map, 'a')
self.assertRaises(ValueError, '{a} {}'.format_map, {"a" : 2, "b" : 1})
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{}'.format(10), '10')
self.assertEqual('{:5}'.format('s'), 's ')
self.assertEqual('{!r}'.format('s'), "'s'")
self.assertEqual('{._x}'.format(C(10)), '10')
self.assertEqual('{[1]}'.format([1, 2]), '2')
self.assertEqual('{[a]}'.format({'a':4, 'b':2}), '4')
self.assertEqual('a{}b{}c'.format(0, 1), 'a0b1c')
self.assertEqual('a{:{}}b'.format('x', '^10'), 'a x b')
self.assertEqual('a{:{}x}b'.format(20, '#'), 'a0x14b')
# can't mix and match numbering and auto-numbering
self.assertRaises(ValueError, '{}{1}'.format, 1, 2)
self.assertRaises(ValueError, '{1}{}'.format, 1, 2)
self.assertRaises(ValueError, '{:{1}}'.format, 1, 2)
self.assertRaises(ValueError, '{0:{}}'.format, 1, 2)
# can mix and match auto-numbering and named
self.assertEqual('{f}{}'.format(4, f='test'), 'test4')
self.assertEqual('{}{f}'.format(4, f='test'), '4test')
self.assertEqual('{:{f}}{g}{}'.format(1, 3, g='g', f=2), ' 1g3')
self.assertEqual('{f:{}}{}{g}'.format(2, 4, f=1, g='g'), ' 14g')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % ("abc", "abc"), 'abc, abc')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, 2, 3), 'abc, abc, 1, 2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, -2, 3), 'abc, abc, 1, -2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.5), 'abc, abc, -1, -2.000000, 3.50')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.57), 'abc, abc, -1, -2.000000, 3.57')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 1003.57), 'abc, abc, -1, -2.000000, 1003.57')
if not sys.platform.startswith('java'):
self.assertEqual("%r, %r" % (b"abc", "abc"), "b'abc', 'abc'")
self.assertEqual("%r" % ("\u1234",), "'\u1234'")
self.assertEqual("%a" % ("\u1234",), "'\\u1234'")
self.assertEqual("%(x)s, %(y)s" % {'x':"abc", 'y':"def"}, 'abc, def')
self.assertEqual("%(x)s, %(\xfc)s" % {'x':"abc", '\xfc':"def"}, 'abc, def')
self.assertEqual('%c' % 0x1234, '\u1234')
self.assertEqual('%c' % 0x21483, '\U00021483')
self.assertRaises(OverflowError, "%c".__mod__, (0x110000,))
self.assertEqual('%c' % '\U00021483', '\U00021483')
self.assertRaises(TypeError, "%c".__mod__, "aa")
self.assertRaises(ValueError, "%.1\u1032f".__mod__, (1.0/3))
self.assertRaises(TypeError, "%i".__mod__, "aa")
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,"abc"), '...1...2...3...abc...')
self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,"abc"), '...%...%s...1...2...3...abc...')
self.assertEqual('...%s...' % "abc", '...abc...')
self.assertEqual('%*s' % (5,'abc',), ' abc')
self.assertEqual('%*s' % (-5,'abc',), 'abc ')
self.assertEqual('%*.*s' % (5,2,'abc',), ' ab')
self.assertEqual('%*.*s' % (5,3,'abc',), ' abc')
self.assertEqual('%i %*.*s' % (10, 5,3,'abc',), '10 abc')
self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, 'abc',), '103 abc')
self.assertEqual('%c' % 'a', 'a')
class Wrapper:
def __str__(self):
return '\u1234'
self.assertEqual('%s' % Wrapper(), '\u1234')
# issue 3382
NAN = float('nan')
INF = float('inf')
self.assertEqual('%f' % NAN, 'nan')
self.assertEqual('%F' % NAN, 'NAN')
self.assertEqual('%f' % INF, 'inf')
self.assertEqual('%F' % INF, 'INF')
def test_startswith_endswith_errors(self):
for meth in ('foo'.startswith, 'foo'.endswith):
with self.assertRaises(TypeError) as cm:
meth(['f'])
exc = str(cm.exception)
self.assertIn('str', exc)
self.assertIn('tuple', exc)
@support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_format_float(self):
# should not format with a comma, but always with C locale
self.assertEqual('1.0', '%.1f' % 1.0)
def test_constructor(self):
# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
self.assertEqual(
str('unicode remains unicode'),
'unicode remains unicode'
)
class UnicodeSubclass(str):
pass
self.assertEqual(
str(UnicodeSubclass('unicode subclass becomes unicode')),
'unicode subclass becomes unicode'
)
self.assertEqual(
str('strings are converted to unicode'),
'strings are converted to unicode'
)
class StringCompat:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
self.assertEqual(
str(StringCompat('__str__ compatible objects are recognized')),
'__str__ compatible objects are recognized'
)
# unicode(obj) is compatible to str():
o = StringCompat('unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
for obj in (123, 123.45, 123):
self.assertEqual(str(obj), str(str(obj)))
# unicode(obj, encoding, error) tests (this maps to
# PyUnicode_FromEncodedObject() at C level)
if not sys.platform.startswith('java'):
self.assertRaises(
TypeError,
str,
'decoding unicode is not supported',
'utf-8',
'strict'
)
self.assertEqual(
str(b'strings are decoded to unicode', 'utf-8', 'strict'),
'strings are decoded to unicode'
)
if not sys.platform.startswith('java'):
self.assertEqual(
str(
memoryview(b'character buffers are decoded to unicode'),
'utf-8',
'strict'
),
'character buffers are decoded to unicode'
)
self.assertRaises(TypeError, str, 42, 42, 42)
def test_codecs_utf7(self):
utfTests = [
('A\u2262\u0391.', b'A+ImIDkQ.'), # RFC2152 example
('Hi Mom -\u263a-!', b'Hi Mom -+Jjo--!'), # RFC2152 example
('\u65E5\u672C\u8A9E', b'+ZeVnLIqe-'), # RFC2152 example
('Item 3 is \u00a31.', b'Item 3 is +AKM-1.'), # RFC2152 example
('+', b'+-'),
('+-', b'+--'),
('+?', b'+-?'),
('\?', b'+AFw?'),
('+?', b'+-?'),
(r'\\?', b'+AFwAXA?'),
(r'\\\?', b'+AFwAXABc?'),
(r'++--', b'+-+---'),
('\U000abcde', b'+2m/c3g-'), # surrogate pairs
('/', b'/'),
]
for (x, y) in utfTests:
self.assertEqual(x.encode('utf-7'), y)
# Unpaired surrogates not supported
self.assertRaises(UnicodeError, str, b'+3ADYAA-', 'utf-7')
self.assertEqual(str(b'+3ADYAA-', 'utf-7', 'replace'), '\ufffd\ufffd')
# Issue #2242: crash on some Windows/MSVC versions
self.assertEqual(b'+\xc1'.decode('utf-7'), '\xc1')
# Direct encoded characters
set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?"
# Optional direct characters
set_o = '!"#$%&*;<=>@[]^_`{|}'
for c in set_d:
self.assertEqual(c.encode('utf7'), c.encode('ascii'))
self.assertEqual(c.encode('ascii').decode('utf7'), c)
for c in set_o:
self.assertEqual(c.encode('ascii').decode('utf7'), c)
def test_codecs_utf8(self):
self.assertEqual(''.encode('utf-8'), b'')
self.assertEqual('\u20ac'.encode('utf-8'), b'\xe2\x82\xac')
if sys.maxunicode == 65535:
self.assertEqual('\ud800\udc02'.encode('utf-8'), b'\xf0\x90\x80\x82')
self.assertEqual('\ud84d\udc56'.encode('utf-8'), b'\xf0\xa3\x91\x96')
self.assertEqual('\ud800'.encode('utf-8', 'surrogatepass'), b'\xed\xa0\x80')
self.assertEqual('\udc00'.encode('utf-8', 'surrogatepass'), b'\xed\xb0\x80')
if sys.maxunicode == 65535:
self.assertEqual(
('\ud800\udc02'*1000).encode('utf-8'),
b'\xf0\x90\x80\x82'*1000)
self.assertEqual(
'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
' Nunstuck git und'.encode('utf-8'),
b'\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
b'\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
b'\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
b'\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
b'\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
b'\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
b'\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
b'\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
b'\xe3\x80\x8cWenn ist das Nunstuck git und'
)
# UTF-8 specific decoding tests
self.assertEqual(str(b'\xf0\xa3\x91\x96', 'utf-8'), '\U00023456' )
self.assertEqual(str(b'\xf0\x90\x80\x82', 'utf-8'), '\U00010002' )
self.assertEqual(str(b'\xe2\x82\xac', 'utf-8'), '\u20ac' )
# Other possible utf-8 test cases:
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_utf8_decode_valid_sequences(self):
sequences = [
# single byte
(b'\x00', '\x00'), (b'a', 'a'), (b'\x7f', '\x7f'),
# 2 bytes
(b'\xc2\x80', '\x80'), (b'\xdf\xbf', '\u07ff'),
# 3 bytes
(b'\xe0\xa0\x80', '\u0800'), (b'\xed\x9f\xbf', '\ud7ff'),
(b'\xee\x80\x80', '\uE000'), (b'\xef\xbf\xbf', '\uffff'),
# 4 bytes
(b'\xF0\x90\x80\x80', '\U00010000'),
(b'\xf4\x8f\xbf\xbf', '\U0010FFFF')
]
for seq, res in sequences:
self.assertEqual(seq.decode('utf-8'), res)
def test_utf8_decode_invalid_sequences(self):
# continuation bytes in a sequence of 2, 3, or 4 bytes
continuation_bytes = [bytes([x]) for x in range(0x80, 0xC0)]
# start bytes of a 2-byte sequence equivalent to codepoints < 0x7F
invalid_2B_seq_start_bytes = [bytes([x]) for x in range(0xC0, 0xC2)]
# start bytes of a 4-byte sequence equivalent to codepoints > 0x10FFFF
invalid_4B_seq_start_bytes = [bytes([x]) for x in range(0xF5, 0xF8)]
invalid_start_bytes = (
continuation_bytes + invalid_2B_seq_start_bytes +
invalid_4B_seq_start_bytes + [bytes([x]) for x in range(0xF7, 0x100)]
)
for byte in invalid_start_bytes:
self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8')
for sb in invalid_2B_seq_start_bytes:
for cb in continuation_bytes:
self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8')
for sb in invalid_4B_seq_start_bytes:
for cb1 in continuation_bytes[:3]:
for cb3 in continuation_bytes[:3]:
self.assertRaises(UnicodeDecodeError,
(sb+cb1+b'\x80'+cb3).decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0xA0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\xBF').decode, 'utf-8')
# surrogates
for cb in [bytes([x]) for x in range(0xA0, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0x90)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\xBF\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x90, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\xBF\xBF').decode, 'utf-8')
def test_issue8271(self):
# Issue #8271: during the decoding of an invalid UTF-8 byte sequence,
# only the start byte and the continuation byte(s) are now considered
# invalid, instead of the number of bytes specified by the start byte.
# See http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (page 95,
# table 3-8, Row 2) for more information about the algorithm used.
FFFD = '\ufffd'
sequences = [
# invalid start bytes
(b'\x80', FFFD), # continuation byte
(b'\x80\x80', FFFD*2), # 2 continuation bytes
(b'\xc0', FFFD),
(b'\xc0\xc0', FFFD*2),
(b'\xc1', FFFD),
(b'\xc1\xc0', FFFD*2),
(b'\xc0\xc1', FFFD*2),
# with start byte of a 2-byte sequence
(b'\xc2', FFFD), # only the start byte
(b'\xc2\xc2', FFFD*2), # 2 start bytes
(b'\xc2\xc2\xc2', FFFD*3), # 2 start bytes
(b'\xc2\x41', FFFD+'A'), # invalid continuation byte
# with start byte of a 3-byte sequence
(b'\xe1', FFFD), # only the start byte
(b'\xe1\xe1', FFFD*2), # 2 start bytes
(b'\xe1\xe1\xe1', FFFD*3), # 3 start bytes
(b'\xe1\xe1\xe1\xe1', FFFD*4), # 4 start bytes
(b'\xe1\x80', FFFD), # only 1 continuation byte
(b'\xe1\x41', FFFD+'A'), # invalid continuation byte
(b'\xe1\x41\x80', FFFD+'A'+FFFD), # invalid cb followed by valid cb
(b'\xe1\x41\x41', FFFD+'AA'), # 2 invalid continuation bytes
(b'\xe1\x80\x41', FFFD+'A'), # only 1 valid continuation byte
(b'\xe1\x80\xe1\x41', FFFD*2+'A'), # 1 valid and the other invalid
(b'\xe1\x41\xe1\x80', FFFD+'A'+FFFD), # 1 invalid and the other valid
# with start byte of a 4-byte sequence
(b'\xf1', FFFD), # only the start byte
(b'\xf1\xf1', FFFD*2), # 2 start bytes
(b'\xf1\xf1\xf1', FFFD*3), # 3 start bytes
(b'\xf1\xf1\xf1\xf1', FFFD*4), # 4 start bytes
(b'\xf1\xf1\xf1\xf1\xf1', FFFD*5), # 5 start bytes
(b'\xf1\x80', FFFD), # only 1 continuation bytes
(b'\xf1\x80\x80', FFFD), # only 2 continuation bytes
(b'\xf1\x80\x41', FFFD+'A'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x41\x41', FFFD+'AA'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x80\x41', FFFD+'A'), # 2 valid cb and 1 invalid
(b'\xf1\x41\x80', FFFD+'A'+FFFD), # 1 invalid cv and 1 valid
(b'\xf1\x41\x80\x80', FFFD+'A'+FFFD*2), # 1 invalid cb and 2 invalid
(b'\xf1\x41\x80\x41', FFFD+'A'+FFFD+'A'), # 2 invalid cb and 1 invalid
(b'\xf1\x41\x41\x80', FFFD+'AA'+FFFD), # 1 valid cb and 1 invalid
(b'\xf1\x41\xf1\x80', FFFD+'A'+FFFD),
(b'\xf1\x41\x80\xf1', FFFD+'A'+FFFD*2),
(b'\xf1\xf1\x80\x41', FFFD*2+'A'),
(b'\xf1\x41\xf1\xf1', FFFD+'A'+FFFD*2),
# with invalid start byte of a 4-byte sequence (rfc2279)
(b'\xf5', FFFD), # only the start byte
(b'\xf5\xf5', FFFD*2), # 2 start bytes
(b'\xf5\x80', FFFD*2), # only 1 continuation byte
(b'\xf5\x80\x80', FFFD*3), # only 2 continuation byte
(b'\xf5\x80\x80\x80', FFFD*4), # 3 continuation bytes
(b'\xf5\x80\x41', FFFD*2+'A'), # 1 valid cb and 1 invalid
(b'\xf5\x80\x41\xf5', FFFD*2+'A'+FFFD),
(b'\xf5\x41\x80\x80\x41', FFFD+'A'+FFFD*2+'A'),
# with invalid start byte of a 5-byte sequence (rfc2279)
(b'\xf8', FFFD), # only the start byte
(b'\xf8\xf8', FFFD*2), # 2 start bytes
(b'\xf8\x80', FFFD*2), # only one continuation byte
(b'\xf8\x80\x41', FFFD*2 + 'A'), # 1 valid cb and 1 invalid
(b'\xf8\x80\x80\x80\x80', FFFD*5), # invalid 5 bytes seq with 5 bytes
# with invalid start byte of a 6-byte sequence (rfc2279)
(b'\xfc', FFFD), # only the start byte
(b'\xfc\xfc', FFFD*2), # 2 start bytes
(b'\xfc\x80\x80', FFFD*3), # only 2 continuation bytes
(b'\xfc\x80\x80\x80\x80\x80', FFFD*6), # 6 continuation bytes
# invalid start byte
(b'\xfe', FFFD),
(b'\xfe\x80\x80', FFFD*3),
# other sequences
(b'\xf1\x80\x41\x42\x43', '\ufffd\x41\x42\x43'),
(b'\xf1\x80\xff\x42\x43', '\ufffd\ufffd\x42\x43'),
(b'\xf1\x80\xc2\x81\x43', '\ufffd\x81\x43'),
(b'\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64',
'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'),
]
for n, (seq, res) in enumerate(sequences):
self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8', 'strict')
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((seq+b'b').decode('utf-8', 'replace'), res+'b')
self.assertEqual(seq.decode('utf-8', 'ignore'),
res.replace('\uFFFD', ''))
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual("www.python.org.".encode("idna"), b"www.python.org.")
def test_codecs_errors(self):
# Error handling (encoding)
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii')
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii','strict')
self.assertEqual('Andr\202 x'.encode('ascii','ignore'), b"Andr x")
self.assertEqual('Andr\202 x'.encode('ascii','replace'), b"Andr? x")
self.assertEqual('Andr\202 x'.encode('ascii', 'replace'),
'Andr\202 x'.encode('ascii', errors='replace'))
self.assertEqual('Andr\202 x'.encode('ascii', 'ignore'),
'Andr\202 x'.encode(encoding='ascii', errors='ignore'))
# Error handling (decoding)
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii')
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict')
self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x")
self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x')
# Error handling (unknown character names)
self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx")
# Error handling (truncated escape sequence)
self.assertRaises(UnicodeError, b"\\".decode, "unicode-escape")
self.assertRaises(TypeError, b"hello".decode, "test.unicode1")
self.assertRaises(TypeError, str, b"hello", "test.unicode2")
self.assertRaises(TypeError, "hello".encode, "test.unicode1")
self.assertRaises(TypeError, "hello".encode, "test.unicode2")
# executes PyUnicode_Encode()
import imp
self.assertRaises(
ImportError,
imp.find_module,
"non-existing module",
["non-existing dir"]
)
# Error handling (wrong arguments)
self.assertRaises(TypeError, "hello".encode, 42, 42, 42)
# Error handling (lone surrogate in PyUnicode_TransformDecimalToASCII())
self.assertRaises(UnicodeError, int, "\ud800")
self.assertRaises(UnicodeError, int, "\udf00")
self.assertRaises(UnicodeError, float, "\ud800")
self.assertRaises(UnicodeError, float, "\udf00")
self.assertRaises(UnicodeError, complex, "\ud800")
self.assertRaises(UnicodeError, complex, "\udf00")
def test_codecs(self):
# Encoding
self.assertEqual('hello'.encode('ascii'), b'hello')
self.assertEqual('hello'.encode('utf-7'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf8'), b'hello')
self.assertEqual('hello'.encode('utf-16-le'), b'h\000e\000l\000l\000o\000')
self.assertEqual('hello'.encode('utf-16-be'), b'\000h\000e\000l\000l\000o')
self.assertEqual('hello'.encode('latin-1'), b'hello')
# Roundtrip safety for BMP (just the first 1024 chars)
for c in range(1024):
u = chr(c)
for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
'utf-16-be', 'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 256 chars)
for c in range(256):
u = chr(c)
for encoding in ('latin-1',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 128 chars)
for c in range(128):
u = chr(c)
for encoding in ('ascii',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for non-BMP (just a few chars)
u = '\U00010001\U00020002\U00030003\U00040004\U00050005'
for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
#'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# UTF-8 must be roundtrip safe for all UCS-2 code points
# This excludes surrogates: in the full range, there would be
# a surrogate pair (\udbff\udc00), which gets converted back
# to a non-BMP character (\U0010fc00)
u = ''.join(map(chr, list(range(0,0xd800)) +
list(range(0xe000,0x10000))))
for encoding in ('utf-8',):
self.assertEqual(str(u.encode(encoding),encoding), u)
def test_codecs_charmap(self):
# 0-127
s = bytes(range(128))
for encoding in (
'cp037', 'cp1026',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
'cp1006', 'iso8859_8',
### These have undefined mappings:
#'cp424',
### These fail the round-trip:
#'cp875'
):
self.assertEqual(str(s, encoding).encode(encoding), s)
# 128-255
s = bytes(range(128, 256))
for encoding in (
'cp037', 'cp1026',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_4', 'iso8859_5',
'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
### These have undefined mappings:
#'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
#'cp1256', 'cp1257', 'cp1258',
#'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
#'iso8859_3', 'iso8859_6', 'iso8859_7',
#'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
### These fail the round-trip:
#'cp1006', 'cp875', 'iso8859_8',
):
self.assertEqual(str(s, encoding).encode(encoding), s)
def test_concatenation(self):
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
def test_printing(self):
class BitBucket:
def write(self, text):
pass
out = BitBucket()
print('abc', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc\n', file=out)
print('abc\n', end=' ', file=out)
print('abc\n', end=' ', file=out)
print('def\n', file=out)
print('def\n', file=out)
def test_ucs4(self):
x = '\U00100000'
y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00100000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00010000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
try:
br'\U11111111'.decode("raw-unicode-escape")
except UnicodeDecodeError as e:
self.assertEqual(e.start, 0)
self.assertEqual(e.end, 10)
else:
self.fail("Should have raised UnicodeDecodeError")
def test_conversion(self):
# Make sure __unicode__() works properly
class Foo0:
def __str__(self):
return "foo"
class Foo1:
def __str__(self):
return "foo"
class Foo2(object):
def __str__(self):
return "foo"
class Foo3(object):
def __str__(self):
return "foo"
class Foo4(str):
def __str__(self):
return "foo"
class Foo5(str):
def __str__(self):
return "foo"
class Foo6(str):
def __str__(self):
return "foos"
def __str__(self):
return "foou"
class Foo7(str):
def __str__(self):
return "foos"
def __str__(self):
return "foou"
class Foo8(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
class Foo9(str):
def __str__(self):
return "not unicode"
self.assertEqual(str(Foo0()), "foo")
self.assertEqual(str(Foo1()), "foo")
self.assertEqual(str(Foo2()), "foo")
self.assertEqual(str(Foo3()), "foo")
self.assertEqual(str(Foo4("bar")), "foo")
self.assertEqual(str(Foo5("bar")), "foo")
self.assertEqual(str(Foo6("bar")), "foou")
self.assertEqual(str(Foo7("bar")), "foou")
self.assertEqual(str(Foo8("foo")), "foofoo")
self.assertEqual(str(Foo9("foo")), "not unicode")
def test_unicode_repr(self):
class s1:
def __repr__(self):
return '\\n'
class s2:
def __repr__(self):
return '\\n'
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
def test_printable_repr(self):
self.assertEqual(repr('\U00010000'), "'%c'" % (0x10000,)) # printable
self.assertEqual(repr('\U00014000'), "'\\U00014000'") # nonprintable
def test_expandtabs_overflows_gracefully(self):
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
if sys.maxsize > (1 << 32) or struct.calcsize('P') != 4:
return
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxsize)
def test_raiseMemError(self):
# Ensure that the freelist contains a consistent object, even
# when a string allocation fails with a MemoryError.
# This used to crash the interpreter,
# or leak references when the number was smaller.
charwidth = 4 if sys.maxunicode >= 0x10000 else 2
# Note: sys.maxsize is half of the actual max allocation because of
# the signedness of Py_ssize_t.
alloc = lambda: "a" * (sys.maxsize // charwidth * 2)
self.assertRaises(MemoryError, alloc)
self.assertRaises(MemoryError, alloc)
def test_format_subclass(self):
class S(str):
def __str__(self):
return '__str__ overridden'
s = S('xxx')
self.assertEqual("%s" % s, '__str__ overridden')
self.assertEqual("{}".format(s), '__str__ overridden')
# Test PyUnicode_FromFormat()
def test_from_format(self):
support.import_module('ctypes')
from ctypes import pythonapi, py_object, c_int
if sys.maxunicode == 65535:
name = "PyUnicodeUCS2_FromFormat"
else:
name = "PyUnicodeUCS4_FromFormat"
_PyUnicode_FromFormat = getattr(pythonapi, name)
_PyUnicode_FromFormat.restype = py_object
def PyUnicode_FromFormat(format, *args):
cargs = tuple(
py_object(arg) if isinstance(arg, str) else arg
for arg in args)
return _PyUnicode_FromFormat(format, *cargs)
# ascii format, non-ascii argument
text = PyUnicode_FromFormat(b'ascii\x7f=%U', 'unicode\xe9')
self.assertEqual(text, 'ascii\x7f=unicode\xe9')
# non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV()
# raises an error
self.assertRaisesRegex(ValueError,
'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format '
'string, got a non-ASCII byte: 0xe9$',
PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii')
self.assertEqual(PyUnicode_FromFormat(b'%c', c_int(0xabcd)), '\uabcd')
self.assertEqual(PyUnicode_FromFormat(b'%c', c_int(0x10ffff)), '\U0010ffff')
# other tests
text = PyUnicode_FromFormat(b'%%A:%A', 'abc\xe9\uabcd\U0010ffff')
self.assertEqual(text, r"%A:'abc\xe9\uabcd\U0010ffff'")
text = PyUnicode_FromFormat(b'repr=%V', 'abc', b'xyz')
self.assertEqual(text, 'repr=abc')
# Test string decode from parameter of %s using utf-8.
# b'\xe4\xba\xba\xe6\xb0\x91' is utf-8 encoded byte sequence of
# '\u4eba\u6c11'
text = PyUnicode_FromFormat(b'repr=%V', None, b'\xe4\xba\xba\xe6\xb0\x91')
self.assertEqual(text, 'repr=\u4eba\u6c11')
#Test replace error handler.
text = PyUnicode_FromFormat(b'repr=%V', None, b'abc\xff')
self.assertEqual(text, 'repr=abc\ufffd')
# Test PyUnicode_AsWideChar()
def test_aswidechar(self):
from _testcapi import unicode_aswidechar
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidechar('abcdef', 2)
self.assertEqual(size, 2)
self.assertEqual(wchar, 'ab')
wchar, size = unicode_aswidechar('abc', 3)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc')
wchar, size = unicode_aswidechar('abc', 4)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc', 10)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc\0def', 20)
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
buflen = 3
nchar = 2
else: # sizeof(c_wchar) == 4
buflen = 2
nchar = 1
wchar, size = unicode_aswidechar(nonbmp, buflen)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsWideCharString()
def test_aswidecharstring(self):
from _testcapi import unicode_aswidecharstring
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidecharstring('abc')
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidecharstring('abc\0def')
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
nchar = 2
else: # sizeof(c_wchar) == 4
nchar = 1
wchar, size = unicode_aswidecharstring(nonbmp)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
class StringModuleTest(unittest.TestCase):
def test_formatter_parser(self):
def parse(format):
return list(_string.formatter_parser(format))
formatter = parse("prefix {2!s}xxx{0:^+10.3f}{obj.attr!s} {z[0]!s:10}")
self.assertEqual(formatter, [
('prefix ', '2', '', 's'),
('xxx', '0', '^+10.3f', None),
('', 'obj.attr', '', 's'),
(' ', 'z[0]', '10', 's'),
])
formatter = parse("prefix {} suffix")
self.assertEqual(formatter, [
('prefix ', '', '', None),
(' suffix', None, None, None),
])
formatter = parse("str")
self.assertEqual(formatter, [
('str', None, None, None),
])
formatter = parse("")
self.assertEqual(formatter, [])
formatter = parse("{0}")
self.assertEqual(formatter, [
('', '0', '', None),
])
self.assertRaises(TypeError, _string.formatter_parser, 1)
def test_formatter_field_name_split(self):
def split(name):
items = list(_string.formatter_field_name_split(name))
items[1] = list(items[1])
return items
self.assertEqual(split("obj"), ["obj", []])
self.assertEqual(split("obj.arg"), ["obj", [(True, 'arg')]])
self.assertEqual(split("obj[key]"), ["obj", [(False, 'key')]])
self.assertEqual(split("obj.arg[key1][key2]"), [
"obj",
[(True, 'arg'),
(False, 'key1'),
(False, 'key2'),
]])
self.assertRaises(TypeError, _string.formatter_field_name_split, 1)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
riteshshrv/django | django/utils/inspect.py | 146 | 2383 | from __future__ import absolute_import
import inspect
from django.utils import six
def getargspec(func):
if six.PY2:
return inspect.getargspec(func)
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty
] or None
return args, varargs, varkw, defaults
def get_func_args(func):
if six.PY2:
argspec = inspect.getargspec(func)
return argspec.args[1:] # ignore 'self'
sig = inspect.signature(func)
return [
arg_name for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def func_accepts_kwargs(func):
if six.PY2:
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(func)
except TypeError:
try:
argspec = inspect.getargspec(func.__call__)
except (TypeError, AttributeError):
argspec = None
return not argspec or argspec[2] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_KEYWORD
)
def func_has_no_args(func):
args = inspect.getargspec(func)[0] if six.PY2 else [
p for p in inspect.signature(func).parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and p.default is p.empty
]
return len(args) == 1
def func_supports_parameter(func, parameter):
if six.PY3:
return parameter in inspect.signature(func).parameters
else:
args, varargs, varkw, defaults = inspect.getargspec(func)
return parameter in args
| bsd-3-clause |
omni5cience/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/django/core/serializers/python.py | 66 | 6259 | """
A Python "serializer". Doesn't do much serializing per se -- just converts to
and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for
other serializers.
"""
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import models, DEFAULT_DB_ALIAS
from django.utils.encoding import smart_text, is_protected_type
from django.utils import six
class Serializer(base.Serializer):
"""
Serializes a QuerySet to basic Python objects.
"""
internal_use_only = True
def start_serialization(self):
self._current = None
self.objects = []
def end_serialization(self):
pass
def start_object(self, obj):
self._current = {}
def end_object(self, obj):
self.objects.append(self.get_dump_object(obj))
self._current = None
def get_dump_object(self, obj):
data = {
"model": smart_text(obj._meta),
"fields": self._current,
}
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
data["pk"] = smart_text(obj._get_pk_val(), strings_only=True)
return data
def handle_field(self, obj, field):
value = field._get_val_from_obj(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if is_protected_type(value):
self._current[field.name] = value
else:
self._current[field.name] = field.value_to_string(obj)
def handle_fk_field(self, obj, field):
if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'):
related = getattr(obj, field.name)
if related:
value = related.natural_key()
else:
value = None
else:
value = getattr(obj, field.get_attname())
self._current[field.name] = value
def handle_m2m_field(self, obj, field):
if field.rel.through._meta.auto_created:
if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'):
m2m_value = lambda value: value.natural_key()
else:
m2m_value = lambda value: smart_text(value._get_pk_val(), strings_only=True)
self._current[field.name] = [m2m_value(related)
for related in getattr(obj, field.name).iterator()]
def getvalue(self):
return self.objects
def Deserializer(object_list, **options):
"""
Deserialize simple Python objects back into Django ORM instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
db = options.pop('using', DEFAULT_DB_ALIAS)
ignore = options.pop('ignorenonexistent', False)
for d in object_list:
# Look up the model and starting build a dict of data for it.
Model = _get_model(d["model"])
data = {}
if 'pk' in d:
data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get("pk", None))
m2m_data = {}
model_fields = Model._meta.get_all_field_names()
# Handle each field
for (field_name, field_value) in six.iteritems(d["fields"]):
if ignore and field_name not in model_fields:
# skip fields no longer on model
continue
if isinstance(field_value, str):
field_value = smart_text(field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True)
field = Model._meta.get_field(field_name)
# Handle M2M relations
if field.rel and isinstance(field.rel, models.ManyToManyRel):
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(value):
if hasattr(value, '__iter__') and not isinstance(value, six.text_type):
return field.rel.to._default_manager.db_manager(db).get_by_natural_key(*value).pk
else:
return smart_text(field.rel.to._meta.pk.to_python(value))
else:
m2m_convert = lambda v: smart_text(field.rel.to._meta.pk.to_python(v))
m2m_data[field.name] = [m2m_convert(pk) for pk in field_value]
# Handle FK fields
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
if field_value is not None:
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
if hasattr(field_value, '__iter__') and not isinstance(field_value, six.text_type):
obj = field.rel.to._default_manager.db_manager(db).get_by_natural_key(*field_value)
value = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
value = value.pk
else:
value = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
data[field.attname] = value
else:
data[field.attname] = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
else:
data[field.attname] = None
# Handle all other fields
else:
data[field.name] = field.to_python(field_value)
obj = base.build_instance(Model, data, db)
yield base.DeserializedObject(obj, m2m_data)
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.model_name" string.
"""
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
| mit |
ganboing/malwarecookbook | 8/vmauto.py | 2 | 10452 | #!/usr/bin/python
# Copyright (C) 2010 Michael Ligh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# [NOTES] -----------------------------------------------------------
# 1) If you're running VirtualBox on Windows, you'll need win32com, which is
# included in the Python Extensions for Windows package
#--------------------------------------------------------------------
import sys, os, time, glob, shutil
from optparse import OptionParser
import subprocess
# -----------------------------------------------------------------------
vm_paths = {
# the standard path on Mac OS X
'/Library/Application Support/VMware Fusion/vmrun': 'fusion',
# the standard path on Linux
'/usr/bin/vmrun': 'ws',
# the standard path on Windows
'C:\\Program Files\\VMware\\VMware Workstation\\vmrun.exe': 'ws',
}
def pinfo(msg):
print "[INFO] ", msg
def perror(msg):
print "[ERROR] ", msg
# -----------------------------------------------------------------------
class VBoxAuto:
def __init__(self, machine):
self.machine = machine
self.ctx = {}
self.mach = None
def get_mach(self):
return self.ctx['global'].getArray(self.ctx['vb'], 'machines')
def check(self):
try:
from vboxapi import VirtualBoxManager
except ImportError:
perror('You need to install VirtualBox!')
return False
vbm = VirtualBoxManager(None, None)
self.ctx = {'global':vbm,
'const' :vbm.constants,
'vb' :vbm.vbox,
'mgr' :vbm.mgr}
# the machine name or id must be valid
for m in self.get_mach():
if m.name == self.machine or m.id == self.machine:
self.mach = m
break
if self.mach == None:
perror('Cannot find the machine: %s' % self.machine)
return False
pinfo('Using %s (uuid: %s)' % (self.mach.name, self.mach.id))
pinfo('Session state: %s' % self.get_const(
"SessionState", self.mach.sessionState))
pinfo('Machine state: %s' % self.get_const(
"MachineState", self.mach.state))
return True
def get_const(self, enum, elem):
# this lookup fails on Python2.6 - if that happens
# then just return the element number
try:
all = self.ctx['const'].all_values(enum)
for e in all.keys():
if str(elem) == str(all[e]):
return e
except:
return '%d' % elem
def list(self):
try:
for m in self.get_mach():
print "%-12s %s (state:%s/%s)" %(m.name, m.id,
self.get_const("MachineState", m.state),
self.get_const("SessionState", m.sessionState))
except:
perror('No machines. Did you call check() first?')
def start(self, nsecwait=20):
vb = self.ctx['vb']
session = self.ctx['mgr'].getSessionObject(vb)
p = vb.openRemoteSession(session, self.mach.id, 'gui', '')
while not p.completed:
p.waitForCompletion(1000)
self.ctx['global'].waitForEvents(0)
if int(p.resultCode) == 0:
session.close()
else:
perror('Cannot start machine!')
pinfo('Waiting %d seconds to boot...' % nsecwait)
time.sleep(nsecwait)
def opensession(self):
session = self.ctx['global'].openMachineSession(self.mach.id)
mach = session.machine
return (session, mach)
def closesession(self, session):
self.ctx['global'].closeMachineSession(session)
time.sleep(5)
def stop(self):
(session, mach) = self.opensession()
pinfo('Powering down the system')
try:
session.console.powerDown()
time.sleep(5)
self.closesession(session)
except Exception, e:
pinfo(e)
def revert(self, snapname):
# Revert a VM to the specified snapshot
(session, mach) = self.opensession()
pinfo("Reverting to snapshot '%s'" % snapname)
try:
snap = mach.findSnapshot(snapname)
session.console.restoreSnapshot(snap)
time.sleep(5)
self.closesession(session)
except Exception, e:
pinfo(e)
def winexec(self, user, passwd, args):
(session, mach) = self.opensession()
try:
argstr = ' '.join(args[1:])
except:
argstr = ''
pinfo("Executing '%s' with args '%s'" % (args[0], argstr))
pinfo("If this set fails, set up autologin for your user.")
env = []
ret = session.console.guest.executeProcess(
args[0],
0,
args,
env,
user, passwd, 0)
# on Windows, executeProcess returns an IProgress instance
if os.name == "nt":
pid = ret[3]
else:
pid = ret[1]
pinfo('Process ID: %d' % pid)
# -----------------------------------------------------------------------
class VMwareAuto:
def __init__(self, vmx):
self.vmx = vmx
self.vmrun = None
self.vmtype = None
if not os.path.isfile(vmx):
raise 'Cannot find vmx file in ' + vmx
for (path,type) in vm_paths.items():
if os.path.isfile(path):
self.vmrun = path
self.vmtype = type
break
if self.vmrun == None:
raise 'Cannot find vmrun in ' + ','.join(vm_paths.keys())
else:
print 'Found vmrun (running on %s)' % self.vmtype
def setuser(self, user, passwd):
'''
Sets the credentials on the guest machine to
use when copying files to/from the guest and
when executing programs in the guest
'''
self.user = user
self.passwd = passwd
def run_cmd(self, cmd, args=[], guest=False):
'''
Execute a command through vmrun. Additional
parameters for commands can be set with args[]
'''
print 'Executing ' + cmd + ' please wait...'
pargs = [self.vmrun, '-T', self.vmtype]
if guest:
pargs.extend(['-gu', self.user])
pargs.extend(['-gp', self.passwd])
pargs.append(cmd)
pargs.append(self.vmx)
pargs.extend(args)
proc = subprocess.Popen(
pargs,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return proc.communicate()[0]
def list(self):
'''
List the running virtual machines
'''
pargs = [self.vmrun, 'list']
print pargs
proc = subprocess.Popen(
pargs,
stdout=subprocess.PIPE
)
return proc.communicate()[0]
def start(self):
'''
Start the virtual machine specified by self.vmx
'''
return self.run_cmd('start')
def stop(self):
'''
Stop the virtual machine specified by self.vmx
'''
return self.run_cmd('stop')
def revert(self, snapname):
'''
Revert the virtual machine specified by self.vmx
to the given snapshot
'''
return self.run_cmd('revertToSnapshot', [snapname])
def suspend(self):
'''
Suspend the virtual machine specified by self.vmx.
This is usually done after executing malware in order
freeze the machine's state and obtain its physical
memory sample
'''
return self.run_cmd('suspend')
def scrshot(self, outfile):
'''
Take a screen shot of the guest's desktop and
save it to the file specified by outfile
'''
return self.run_cmd('captureScreen', [outfile], guest=True)
def copytovm(self, src, dst):
'''
Copy the src file (src is a path on the host) to
dst (dst is a path on the guest).
'''
if not os.path.isfile(src):
perror('Cannot locate source file ' + src)
return
return self.run_cmd(
'copyFileFromHostToGuest', [src, dst], guest=True)
def copytohost(self, src, dst):
'''
Copy the src file (src is a path on the guest) to
dst (dst is a path on the host).
'''
return self.run_cmd(
'copyFileFromGuestToHost', [src, dst], guest=True)
def winexec(self, file, args=''):
'''
Execute a command in the guest with supplied arguments.
You can use this to execute malware or existing programs
on the guest machine such as monitoring tools or whatever.
'''
return self.run_cmd(
'runProgramInGuest',
[
'-noWait',
'-interactive',
'-activeWindow',
file, args
],
guest=True)
def findmem(self):
'''
Find the file on the host machine's file system that
represents the guest's physical memory. This is usually
only available when the guest is suspended
'''
path = os.path.dirname(self.vmx)
mems = glob.glob('%s/*.vmem' % (path))
mems = [m for m in mems if "Snapshot" not in m]
return mems[0] if len(mems) else ''
def main(argv):
print 'Nothing to do. Import me!'
return 0
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 |
beezee/GAE-Django-site | django/utils/tzinfo.py | 313 | 2511 | "Implementation of tzinfo classes for use with datetime.datetime."
import time
from datetime import timedelta, tzinfo
from django.utils.encoding import smart_unicode, smart_str, DEFAULT_LOCALE_ENCODING
class FixedOffset(tzinfo):
"Fixed offset in minutes east from UTC."
def __init__(self, offset):
if isinstance(offset, timedelta):
self.__offset = offset
offset = self.__offset.seconds // 60
else:
self.__offset = timedelta(minutes=offset)
sign = offset < 0 and '-' or '+'
self.__name = u"%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60)
def __repr__(self):
return self.__name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
class LocalTimezone(tzinfo):
"Proxy timezone information from time module."
def __init__(self, dt):
tzinfo.__init__(self)
self._tzname = self.tzname(dt)
def __repr__(self):
return smart_str(self._tzname)
def utcoffset(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone)
else:
return timedelta(seconds=-time.timezone)
def dst(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
else:
return timedelta(0)
def tzname(self, dt):
try:
return smart_unicode(time.tzname[self._isdst(dt)],
DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
return None
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)
try:
stamp = time.mktime(tt)
except (OverflowError, ValueError):
# 32 bit systems can't handle dates after Jan 2038, and certain
# systems can't handle dates before ~1901-12-01:
#
# >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0))
# OverflowError: mktime argument out of range
# >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0))
# ValueError: year out of range
#
# In this case, we fake the date, because we only care about the
# DST flag.
tt = (2037,) + tt[1:]
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
| bsd-3-clause |
HybridF5/jacket | jacket/tests/storage/unit/test_infortrend_common.py | 1 | 77414 | # Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from jacket.storage import exception
from jacket.storage import test
from jacket.tests.storage.unit import test_infortrend_cli
from jacket.tests.storage.unit import utils
from jacket.storage.volume import configuration
from jacket.storage.volume.drivers.infortrend.eonstor_ds_cli import common_cli
SUCCEED = (0, '')
FAKE_ERROR_RETURN = (-1, '')
class InfortrendTestCass(test.TestCase):
def __init__(self, *args, **kwargs):
super(InfortrendTestCass, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendTestCass, self).setUp()
self.cli_data = test_infortrend_cli.InfortrendCLITestData()
self.configuration = configuration.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.safe_get = self._fake_safe_get
def _fake_safe_get(self, key):
return getattr(self.configuration, key)
def _driver_setup(self, mock_commands, configuration=None):
if configuration is None:
configuration = self.configuration
self.driver = self._get_driver(configuration)
mock_commands_execute = self._mock_command_execute(mock_commands)
mock_cli = mock.Mock(side_effect=mock_commands_execute)
self.driver._execute_command = mock_cli
def _get_driver(self, conf):
raise NotImplementedError
def _mock_command_execute(self, mock_commands):
def fake_execute_command(cli_type, *args, **kwargs):
if cli_type in mock_commands.keys():
if isinstance(mock_commands[cli_type], list):
ret = mock_commands[cli_type][0]
del mock_commands[cli_type][0]
return ret
elif isinstance(mock_commands[cli_type], tuple):
return mock_commands[cli_type]
else:
return mock_commands[cli_type](*args, **kwargs)
return FAKE_ERROR_RETURN
return fake_execute_command
def _mock_show_lv_for_migrate(self, *args, **kwargs):
if 'tier' in args:
return self.cli_data.get_test_show_lv_tier_for_migration()
return self.cli_data.get_test_show_lv()
def _mock_show_lv(self, *args, **kwargs):
if 'tier' in args:
return self.cli_data.get_test_show_lv_tier()
return self.cli_data.get_test_show_lv()
def _assert_cli_has_calls(self, expect_cli_cmd):
self.driver._execute_command.assert_has_calls(expect_cli_cmd)
class InfortrendFCCommonTestCase(InfortrendTestCass):
def __init__(self, *args, **kwargs):
super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendFCCommonTestCase, self).setUp()
self.configuration.volume_backend_name = 'infortrend_backend_1'
self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
self.configuration.san_password = '111111'
self.configuration.infortrend_provisioning = 'full'
self.configuration.infortrend_tiering = '0'
self.configuration.infortrend_pools_name = 'LV-1, LV-2'
self.configuration.infortrend_slots_a_channels_id = '0,5'
self.configuration.infortrend_slots_b_channels_id = '0,5'
self.configuration.infortrend_cli_timeout = 30
def _get_driver(self, conf):
return common_cli.InfortrendCommon('FC', configuration=conf)
def test_normal_channel(self):
test_map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(True)
self.assertDictMatch(test_map_dict, self.driver.map_dict)
self.assertDictMatch(test_target_dict, self.driver.target_dict)
def test_normal_channel_with_r_model(self):
test_map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {'0': [], '5': []},
}
test_target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {'0': '113', '5': '113'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(True)
self.assertDictMatch(test_map_dict, self.driver.map_dict)
self.assertDictMatch(test_target_dict, self.driver.target_dict)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(self.cli_data.test_fc_properties, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_specific_channel(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '5'
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
self.cli_data.test_fc_properties_with_specific_channel, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_diff_target_id(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '5'
mock_commands = {
'ShowChannel':
self.cli_data.get_test_show_channel_with_diff_target_id(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[0]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
self.cli_data.test_fc_properties_with_specific_channel, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_multipath_with_r_model(self):
test_volume = self.cli_data.test_volume
test_connector = copy.deepcopy(self.cli_data.test_connector_fc)
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn(),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
self.cli_data.test_fc_properties_multipath_r_model, properties)
def test_initialize_connection_with_get_wwn_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
test_lookup_map = self.cli_data.fake_lookup_map
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
self.cli_data.test_fc_properties_zoning, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning_r_model(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
test_lookup_map = self.cli_data.fake_lookup_map_r_model
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
self.cli_data.test_fc_properties_zoning_r_model, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_zoning_r_model_diff_target_id(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_fc
test_initiator_wwpns = test_connector['wwpns']
test_partition_id = self.cli_data.fake_partition_id[0]
test_all_target_wwpns = self.cli_data.fake_target_wwpns[:]
test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2]
test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1]
test_lookup_map = self.cli_data.fake_lookup_map_r_model
mock_commands = {
'ShowChannel':
self.cli_data.get_test_show_channel_r_model_diff_target_id(),
'ShowMap': self.cli_data.get_test_show_map(),
'CreateMap': SUCCEED,
'ShowWWN': self.cli_data.get_test_show_wwn_with_diff_target_id(),
}
self._driver_setup(mock_commands)
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
properties = self.driver.initialize_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('ShowChannel'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0',
'wwn=%s' % test_initiator_wwpns[0]),
mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0',
'wwn=%s' % test_initiator_wwpns[1]),
mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0',
'wwn=%s' % test_initiator_wwpns[1]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
self.cli_data.test_fc_properties_zoning_r_model, properties)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.driver.terminate_connection(test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection_with_zoning(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2]
test_lookup_map = self.cli_data.fake_lookup_map
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(),
}
self._driver_setup(mock_commands)
self.driver.map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
self.driver.fc_lookup_service = mock.Mock()
get_device_mapping_from_network = (
self.driver.fc_lookup_service.get_device_mapping_from_network
)
get_device_mapping_from_network.return_value = test_lookup_map
conn_info = self.driver.terminate_connection(
test_volume, test_connector)
get_device_mapping_from_network.assert_has_calls(
[mock.call(test_connector['wwpns'], test_all_target_wwpns)])
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
mock.call('ShowWWN'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(
self.cli_data.test_fc_terminate_conn_info, conn_info)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection_with_zoning_and_lun_map_exist(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_fc
mock_commands = {
'DeleteMap': SUCCEED,
'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(),
}
self._driver_setup(mock_commands)
self.driver.map_dict = {
'slot_a': {'0': [], '5': []},
'slot_b': {},
}
self.driver.target_dict = {
'slot_a': {'0': '112', '5': '112'},
'slot_b': {},
}
self.driver.fc_lookup_service = mock.Mock()
conn_info = self.driver.terminate_connection(
test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertIsNone(conn_info)
class InfortrendiSCSICommonTestCase(InfortrendTestCass):
def __init__(self, *args, **kwargs):
super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs)
def setUp(self):
super(InfortrendiSCSICommonTestCase, self).setUp()
self.configuration.volume_backend_name = 'infortrend_backend_1'
self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0]
self.configuration.san_password = '111111'
self.configuration.infortrend_provisioning = 'full'
self.configuration.infortrend_tiering = '0'
self.configuration.infortrend_pools_name = 'LV-1, LV-2'
self.configuration.infortrend_slots_a_channels_id = '1,2,4'
self.configuration.infortrend_slots_b_channels_id = '1,2,4'
def _get_driver(self, conf):
return common_cli.InfortrendCommon('iSCSI', configuration=conf)
@mock.patch.object(common_cli.LOG, 'warning')
def test_create_map_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'CreateMap': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('CreateMap')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_map_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (11, '')
mock_commands = {
'DeleteMap': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteMap')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_create_iqn_warning_return_code(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'CreateIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('CreateIQN')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_iqn_warning_return_code_has_map(self, log_warning):
FAKE_RETURN_CODE = (20, '')
mock_commands = {
'DeleteIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteIQN')
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_iqn_warning_return_code_no_such_name(self, log_warning):
FAKE_RETURN_CODE = (11, '')
mock_commands = {
'DeleteIQN': FAKE_RETURN_CODE,
}
self._driver_setup(mock_commands)
self.driver._execute('DeleteIQN')
self.assertEqual(1, log_warning.call_count)
def test_normal_channel(self):
test_map_dict = {
'slot_a': {'1': [], '2': [], '4': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0', '4': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info()
self.assertDictMatch(test_map_dict, self.driver.map_dict)
self.assertDictMatch(test_target_dict, self.driver.target_dict)
def test_normal_channel_with_multipath(self):
test_map_dict = {
'slot_a': {'1': [], '2': [], '4': []},
'slot_b': {'1': [], '2': [], '4': []},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0', '4': '0'},
'slot_b': {'1': '1', '2': '1', '4': '1'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(test_map_dict, self.driver.map_dict)
self.assertDictMatch(test_target_dict, self.driver.target_dict)
def test_specific_channel(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '2, 4'
test_map_dict = {
'slot_a': {'2': [], '4': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'2': '0', '4': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info()
self.assertDictMatch(test_map_dict, self.driver.map_dict)
self.assertDictMatch(test_target_dict, self.driver.target_dict)
def test_update_mcs_dict(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
test_mcs_dict = {
'slot_a': {'1': ['1', '2'], '2': ['4']},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info()
self.assertDictMatch(test_mcs_dict, self.driver.mcs_dict)
def test_mapping_info_with_mcs(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
fake_mcs_dict = {
'slot_a': {'0': ['1', '2'], '2': ['4']},
'slot_b': {},
}
lun_list = list(range(0, 127))
fake_map_dict = {
'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]},
'slot_b': {},
}
test_map_chl = {
'slot_a': ['1', '2'],
}
test_map_lun = ['2']
test_mcs_id = '0'
self.driver = self._get_driver(configuration)
self.driver.mcs_dict = fake_mcs_dict
self.driver.map_dict = fake_map_dict
map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
self.assertDictMatch(test_map_chl, map_chl)
self.assertEqual(test_map_lun, map_lun)
self.assertEqual(test_mcs_id, mcs_id)
def test_mapping_info_with_mcs_multi_group(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
fake_mcs_dict = {
'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']},
'slot_b': {},
}
lun_list = list(range(0, 127))
fake_map_dict = {
'slot_a': {
'1': lun_list[2:],
'2': lun_list[:],
'3': lun_list[:],
'4': lun_list[1:],
'5': lun_list[:],
},
'slot_b': {},
}
test_map_chl = {
'slot_a': ['3', '4'],
}
test_map_lun = ['1']
test_mcs_id = '1'
self.driver = self._get_driver(configuration)
self.driver.mcs_dict = fake_mcs_dict
self.driver.map_dict = fake_map_dict
map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs()
self.assertDictMatch(test_map_chl, map_chl)
self.assertEqual(test_map_lun, map_lun)
self.assertEqual(test_mcs_id, mcs_id)
def test_specific_channel_with_multipath(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '1,2'
test_map_dict = {
'slot_a': {'1': [], '2': []},
'slot_b': {},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0'},
'slot_b': {},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(test_map_dict, self.driver.map_dict)
self.assertDictMatch(test_target_dict, self.driver.target_dict)
def test_specific_channel_with_multipath_r_model(self):
configuration = copy.copy(self.configuration)
configuration.infortrend_slots_a_channels_id = '1,2'
configuration.infortrend_slots_b_channels_id = '1'
test_map_dict = {
'slot_a': {'1': [], '2': []},
'slot_b': {'1': []},
}
test_target_dict = {
'slot_a': {'1': '0', '2': '0'},
'slot_b': {'1': '1'},
}
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
}
self._driver_setup(mock_commands, configuration)
self.driver._init_map_info(multipath=True)
self.assertDictMatch(test_map_dict, self.driver.map_dict)
self.assertDictMatch(test_target_dict, self.driver.target_dict)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume(self, log_info):
test_volume = self.cli_data.test_volume
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[0]),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume(test_volume)
self.assertDictMatch(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_volume_with_create_fail(self):
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': FAKE_ERROR_RETURN,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_volume,
test_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_volume(self, log_info):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_snapshot_id = self.cli_data.fake_snapshot_id
test_pair_id = self.cli_data.fake_pair_id
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
expect_cli_cmd = [
mock.call('ShowPartition', '-l'),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id[0], '-y'),
mock.call('ShowSnapshot', 'part=%s' % test_partition_id),
mock.call('DeleteSnapshot', test_snapshot_id[0], '-y'),
mock.call('DeleteSnapshot', test_snapshot_id[1], '-y'),
mock.call('ShowMap', 'part=%s' % test_partition_id),
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('DeletePartition', test_partition_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
def test_delete_volume_with_sync_pair(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_sync_pair(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.delete_volume,
test_volume)
def test_delete_volume_with_delete_fail(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.delete_volume,
test_volume)
@mock.patch.object(common_cli.LOG, 'warning')
def test_delete_volume_with_partiton_not_found(self, log_warning):
test_volume = self.cli_data.test_volume
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_empty_list(),
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_volume_without_provider(self, log_info):
test_system_id = self.cli_data.fake_system_id[0]
test_volume = copy.deepcopy(self.cli_data.test_volume)
test_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(test_system_id, 16), 'None')
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail_for_map(
test_partition_id),
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteReplica': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(),
'DeleteSnapshot': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_volume(test_volume)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_cloned_volume(self, log_info):
fake_partition_id = self.cli_data.fake_partition_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_src_volume = self.cli_data.test_volume
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
fake_partition_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_cloned_volume(
test_dst_volume, test_src_volume)
self.assertDictMatch(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_cloned_volume_with_create_replica_fail(self):
test_dst_volume = self.cli_data.test_dst_volume
test_src_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': FAKE_ERROR_RETURN,
'ShowLV': self._mock_show_lv,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_cloned_volume,
test_dst_volume,
test_src_volume)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_export(self):
test_volume = self.cli_data.test_volume
test_model_update = {
'provider_location': test_volume['provider_location'],
}
self.driver = self._get_driver(self.configuration)
model_update = self.driver.create_export(None, test_volume)
self.assertDictMatch(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_get_volume_stats(self):
test_volume_states = self.cli_data.test_volume_states
mock_commands = {
'ShowLicense': self.cli_data.get_test_show_license(),
'ShowLV': self.cli_data.get_test_show_lv(),
'ShowPartition': self.cli_data.get_test_show_partition_detail(),
}
self._driver_setup(mock_commands)
self.driver.VERSION = '99.99'
volume_states = self.driver.get_volume_stats(True)
self.assertDictMatch(test_volume_states, volume_states)
def test_get_volume_stats_fail(self):
mock_commands = {
'ShowLicense': self.cli_data.get_test_show_license(),
'ShowLV': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.get_volume_stats)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_snapshot(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
model_update = self.driver.create_snapshot(self.cli_data.test_snapshot)
self.assertEqual(fake_snapshot_id, model_update['provider_location'])
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_create_snapshot_without_partition_id(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
def test_create_snapshot_with_create_fail(self):
fake_partition_id = self.cli_data.fake_partition_id[0]
fake_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': FAKE_ERROR_RETURN,
'ShowSnapshot': self.cli_data.get_test_show_snapshot(
partition_id=fake_partition_id,
snapshot_id=fake_snapshot_id),
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
def test_create_snapshot_with_show_fail(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'CreateSnapshot': SUCCEED,
'ShowSnapshot': FAKE_ERROR_RETURN,
'ShowPartition': self.cli_data.get_test_show_partition(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.create_snapshot,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'info')
def test_delete_snapshot(self, log_info):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteSnapshot': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.delete_snapshot(test_snapshot)
self.assertEqual(1, log_info.call_count)
def test_delete_snapshot_without_provider_location(self):
test_snapshot = self.cli_data.test_snapshot
self.driver = self._get_driver(self.configuration)
self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
test_snapshot)
def test_delete_snapshot_with_fail(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica': self.cli_data.get_test_show_replica_detail(),
'DeleteSnapshot': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.delete_snapshot,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'warning', mock.Mock())
def test_delete_snapshot_with_sync_pair(self):
test_snapshot = self.cli_data.test_snapshot
mock_commands = {
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_si_sync_pair(),
'DeleteSnapshot': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.delete_snapshot,
test_snapshot)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'ShowSnapshot':
self.cli_data.get_test_show_snapshot_detail_filled_block(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictMatch(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
@mock.patch.object(common_cli.LOG, 'info')
def test_create_volume_from_snapshot_without_filled_block(self, log_info):
test_snapshot = self.cli_data.test_snapshot
test_snapshot_id = self.cli_data.fake_snapshot_id[0]
test_dst_volume = self.cli_data.test_dst_volume
test_dst_volume_id = test_dst_volume['id'].replace('-', '')
test_dst_part_id = self.cli_data.fake_partition_id[1]
test_src_part_id = self.cli_data.fake_partition_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
self.cli_data.fake_partition_id[1]),
}
mock_commands = {
'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail(),
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'ShowDevice': self.cli_data.get_test_show_device(),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv,
'ShowReplica': [
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_dst_volume_id),
self.cli_data.get_test_show_replica_detail_for_migrate(
test_snapshot_id, test_dst_part_id, test_dst_volume_id),
],
'DeleteReplica': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.create_volume_from_snapshot(
test_dst_volume, test_snapshot)
self.assertDictMatch(test_model_update, model_update)
self.assertEqual(1, log_info.call_count)
def test_create_volume_from_snapshot_without_provider_location(
self):
test_snapshot = self.cli_data.test_snapshot
test_dst_volume = self.cli_data.test_dst_volume
self.driver = self._get_driver(self.configuration)
self.driver._get_raid_snapshot_id = mock.Mock(return_value=None)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
test_dst_volume,
test_snapshot)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(test_iscsi_properties, properties)
expect_cli_cmd = [
mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_iqn_not_exist(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1])
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
test_connector['initiator'] = test_initiator
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateIQN': SUCCEED,
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(test_iscsi_properties, properties)
expect_cli_cmd = [
mock.call('CreateIQN', test_initiator, test_initiator[-16:]),
mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_empty_map(self):
test_volume = self.cli_data.test_volume
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_empty_list(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(
self.cli_data.test_iscsi_properties_empty_map, properties)
def test_initialize_connection_with_create_map_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_r_model(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': FAKE_ERROR_RETURN,
'ShowNet': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
def test_initialize_connection_with_get_ip_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.initialize_connection,
test_volume,
test_connector)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_initialize_connection_with_mcs(self):
configuration = copy.copy(self.configuration)
configuration.use_multipath_for_image_xfer = True
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi)
test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs
test_target_protal = [test_iscsi_properties['data']['target_portal']]
test_target_iqn = [test_iscsi_properties['data']['target_iqn']]
test_connector['multipath'] = False
mock_commands = {
'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(),
'ShowMap': self.cli_data.get_test_show_map(),
'ShowIQN': self.cli_data.get_test_show_iqn(),
'CreateMap': SUCCEED,
'ShowNet': self.cli_data.get_test_show_net(),
'ExecuteCommand': self.cli_data.get_fake_discovery(
test_target_iqn, test_target_protal),
}
self._driver_setup(mock_commands, configuration)
properties = self.driver.initialize_connection(
test_volume, test_connector)
self.assertDictMatch(test_iscsi_properties, properties)
expect_cli_cmd = [
mock.call('CreateMap', 'part', test_partition_id, '1', '0', '2',
'iqn=%s' % test_connector['initiator']),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_extend_volume(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_new_size = 10
test_expand_size = test_new_size - test_volume['size']
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.extend_volume(test_volume, test_new_size)
expect_cli_cmd = [
mock.call('SetPartition', 'expand', test_partition_id,
'size=%sGB' % test_expand_size),
]
self._assert_cli_has_calls(expect_cli_cmd)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_extend_volume_mb(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_new_size = 5.5
test_expand_size = round((test_new_size - test_volume['size']) * 1024)
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.extend_volume(test_volume, test_new_size)
expect_cli_cmd = [
mock.call('SetPartition', 'expand', test_partition_id,
'size=%sMB' % test_expand_size),
]
self._assert_cli_has_calls(expect_cli_cmd)
def test_extend_volume_fail(self):
test_volume = self.cli_data.test_volume
test_new_size = 10
mock_commands = {
'SetPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.extend_volume,
test_volume,
test_new_size)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_terminate_connection(self):
test_volume = self.cli_data.test_volume
test_partition_id = self.cli_data.fake_partition_id[0]
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'DeleteMap': SUCCEED,
'DeleteIQN': SUCCEED,
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.driver.terminate_connection(test_volume, test_connector)
expect_cli_cmd = [
mock.call('DeleteMap', 'part', test_partition_id, '-y'),
mock.call('DeleteIQN', test_connector['initiator'][-16:]),
mock.call('ShowMap'),
]
self._assert_cli_has_calls(expect_cli_cmd)
def test_terminate_connection_fail(self):
test_volume = self.cli_data.test_volume
test_connector = self.cli_data.test_connector_iscsi
mock_commands = {
'DeleteMap': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.terminate_connection,
test_volume,
test_connector)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_migrate_volume(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
test_pair_id = self.cli_data.fake_pair_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_dst_part_id),
}
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id),
'DeleteReplica': SUCCEED,
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.migrate_volume(test_volume, test_host)
expect_cli_cmd = [
mock.call('CreatePartition',
fake_pool['pool_id'],
test_volume['id'].replace('-', ''),
'size=%s' % (test_volume['size'] * 1024),
''),
mock.call('ShowPartition'),
mock.call('CreateReplica',
'Cinder-Migrate',
'part', test_src_part_id,
'part', test_dst_part_id,
'type=mirror'),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id, '-y'),
mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
mock.call('DeletePartition', test_src_part_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertTrue(rc)
self.assertDictMatch(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'warning')
def test_migrate_volume_with_invalid_storage(self, log_warning):
fake_host = self.cli_data.fake_host
test_volume = self.cli_data.test_volume
mock_commands = {
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.migrate_volume(test_volume, fake_host)
self.assertFalse(rc)
self.assertTrue(model_update is None)
self.assertEqual(1, log_warning.call_count)
def test_migrate_volume_with_get_part_id_fail(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(),
'DeleteMap': SUCCEED,
'CreateReplica': SUCCEED,
'CreateMap': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeDriverException,
self.driver.migrate_volume,
test_volume,
test_host)
def test_migrate_volume_with_create_replica_fail(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume['id'].replace('-', ''), fake_pool['pool_id']),
'DeleteMap': SUCCEED,
'CreateReplica': FAKE_ERROR_RETURN,
'CreateMap': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.migrate_volume,
test_volume,
test_host)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_migrate_volume_timeout(self):
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
configuration = copy.copy(self.configuration)
configuration.infortrend_cli_timeout = 0
mock_commands = {
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id,
'Copy'),
}
self._driver_setup(mock_commands, configuration)
self.assertRaises(
exception.VolumeDriverException,
self.driver.migrate_volume,
test_volume,
test_host)
def test_manage_existing_get_size(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'storage-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
size = self.driver.manage_existing_get_size(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('ShowMap', 'part=%s' % test_partition_id),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, size)
def test_manage_existing_get_size_with_import(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume_with_import
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
test_ref_volume['source-name'], test_pool),
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
size = self.driver.manage_existing_get_size(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('ShowMap', 'part=%s' % test_partition_id),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, size)
def test_manage_existing_get_size_in_use(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'storage-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': self.cli_data.get_test_show_map(),
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_no_source_id(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_dst_volume
self.driver = self._get_driver(self.configuration)
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_show_part_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
mock_commands = {
'ShowPartition': FAKE_ERROR_RETURN,
'ShowMap': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
def test_manage_existing_get_size_show_map_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'storage-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'ShowMap': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing_get_size,
test_volume,
test_ref_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_manage_existing(self, log_info):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_partition_id),
}
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'storage-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'SetPartition': SUCCEED,
'ShowDevice': self.cli_data.get_test_show_device(),
}
self._driver_setup(mock_commands)
model_update = self.driver.manage_existing(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('SetPartition', test_partition_id,
'name=%s' % test_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
self.assertDictMatch(test_model_update, model_update)
def test_manage_existing_rename_fail(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
test_pool = self.cli_data.fake_lv_id[0]
test_ref_volume_id = test_ref_volume['source-id'].replace('-', '')
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
'storage-unmanaged-%s' % test_ref_volume_id[:-17], test_pool),
'SetPartition': FAKE_ERROR_RETURN,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.InfortrendCliException,
self.driver.manage_existing,
test_volume,
test_ref_volume)
def test_manage_existing_with_part_not_found(self):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume
mock_commands = {
'ShowPartition':
self.cli_data.get_test_show_partition_detail(),
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume,
test_ref_volume)
@mock.patch.object(common_cli.LOG, 'info')
def test_manage_existing_with_import(self, log_info):
test_volume = self.cli_data.test_volume
test_ref_volume = self.cli_data.test_ref_volume_with_import
test_pool = self.cli_data.fake_lv_id[0]
test_partition_id = self.cli_data.fake_partition_id[2]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_partition_id),
}
mock_commands = {
'ShowPartition': self.cli_data.get_test_show_partition_detail(
test_ref_volume['source-name'], test_pool),
'SetPartition': SUCCEED,
'ShowDevice': self.cli_data.get_test_show_device(),
}
self._driver_setup(mock_commands)
model_update = self.driver.manage_existing(
test_volume, test_ref_volume)
expect_cli_cmd = [
mock.call('SetPartition', test_partition_id,
'name=%s' % test_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
self.assertDictMatch(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'info')
def test_unmanage(self, log_info):
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_partition_id = self.cli_data.fake_partition_id[0]
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
self.driver.unmanage(test_volume)
expect_cli_cmd = [
mock.call(
'SetPartition',
test_partition_id,
'name=storage-unmanaged-%s' % test_volume_id[:-17]),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'info')
def test_retype_without_change(self, log_info):
test_volume = self.cli_data.test_volume
test_new_type = self.cli_data.test_new_type
test_diff = {'extra_specs': {}}
test_host = self.cli_data.test_migrate_host_2
self.driver = self._get_driver(self.configuration)
rc = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
self.assertTrue(rc)
self.assertEqual(1, log_info.call_count)
@mock.patch.object(common_cli.LOG, 'warning')
def test_retype_with_change_provision(self, log_warning):
test_volume = self.cli_data.test_volume
test_new_type = self.cli_data.test_new_type
test_diff = self.cli_data.test_diff
test_host = self.cli_data.test_migrate_host_2
self.driver = self._get_driver(self.configuration)
rc = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
self.assertFalse(rc)
self.assertEqual(1, log_warning.call_count)
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_retype_with_migrate(self):
fake_pool = copy.deepcopy(self.cli_data.fake_pool)
test_host = copy.deepcopy(self.cli_data.test_migrate_host)
test_volume = self.cli_data.test_volume
test_volume_id = test_volume['id'].replace('-', '')
test_new_type = self.cli_data.test_new_type
test_diff = self.cli_data.test_diff
test_src_part_id = self.cli_data.fake_partition_id[0]
test_dst_part_id = self.cli_data.fake_partition_id[2]
test_pair_id = self.cli_data.fake_pair_id[0]
test_model_update = {
'provider_location': 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16),
test_dst_part_id),
}
mock_commands = {
'ShowSnapshot': SUCCEED,
'CreatePartition': SUCCEED,
'ShowPartition': self.cli_data.get_test_show_partition(
test_volume_id, fake_pool['pool_id']),
'CreateReplica': SUCCEED,
'ShowLV': self._mock_show_lv_for_migrate,
'ShowReplica':
self.cli_data.get_test_show_replica_detail_for_migrate(
test_src_part_id, test_dst_part_id, test_volume_id),
'DeleteReplica': SUCCEED,
'DeleteMap': SUCCEED,
'DeletePartition': SUCCEED,
}
self._driver_setup(mock_commands)
rc, model_update = self.driver.retype(
None, test_volume, test_new_type, test_diff, test_host)
min_size = int(test_volume['size'] * 1024 * 0.2)
create_params = {'init': 'disable', 'min': '%sMB' % min_size}
create_params = ' '.join('%s=%s' % (key, value)
for key, value in create_params.items())
expect_cli_cmd = [
mock.call('ShowSnapshot', 'part=%s' % test_src_part_id),
mock.call(
'CreatePartition',
fake_pool['pool_id'],
test_volume['id'].replace('-', ''),
'size=%s' % (test_volume['size'] * 1024),
create_params,
),
mock.call('ShowPartition'),
mock.call(
'CreateReplica',
'Cinder-Migrate',
'part', test_src_part_id,
'part', test_dst_part_id,
'type=mirror'
),
mock.call('ShowReplica', '-l'),
mock.call('DeleteReplica', test_pair_id, '-y'),
mock.call('DeleteMap', 'part', test_src_part_id, '-y'),
mock.call('DeletePartition', test_src_part_id, '-y'),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertTrue(rc)
self.assertDictMatch(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
@mock.patch.object(common_cli.LOG, 'info', mock.Mock())
def test_update_migrated_volume(self):
src_volume = self.cli_data.test_volume
dst_volume = copy.deepcopy(self.cli_data.test_dst_volume)
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
test_model_update = {
'_name_id': None,
'provider_location': dst_volume['provider_location'],
}
mock_commands = {
'SetPartition': SUCCEED,
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
None, src_volume, dst_volume, 'available')
expect_cli_cmd = [
mock.call('SetPartition', test_dst_part_id,
'name=%s' % src_volume['id'].replace('-', '')),
]
self._assert_cli_has_calls(expect_cli_cmd)
self.assertDictMatch(test_model_update, model_update)
@mock.patch.object(common_cli.LOG, 'debug', mock.Mock())
def test_update_migrated_volume_rename_fail(self):
src_volume = self.cli_data.test_volume
dst_volume = self.cli_data.test_dst_volume
dst_volume['_name_id'] = 'fake_name_id'
test_dst_part_id = self.cli_data.fake_partition_id[1]
dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % (
int(self.cli_data.fake_system_id[0], 16), test_dst_part_id)
mock_commands = {
'SetPartition': FAKE_ERROR_RETURN
}
self._driver_setup(mock_commands)
model_update = self.driver.update_migrated_volume(
None, src_volume, dst_volume, 'available')
self.assertEqual({'_name_id': 'fake_name_id'}, model_update)
| apache-2.0 |
beni55/augmented-traffic-control | tests/vagrant.py | 18 | 1676 | #
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
#
from subprocess import Popen, PIPE
import paramiko
from host import Host
# For use with the 'with' python feature.
class _sshGroup(object):
@classmethod
def closeAll(cls, clients):
if len(clients) == 0:
return
try:
clients[0].close()
finally:
cls.closeAll(clients[1:])
def __init__(self):
self.clients = []
def append(self, client):
self.clients.append(client)
def __enter__(self):
return tuple(self.clients)
def __exit__(self, type, value, tb):
_sshGroup.closeAll(self.clients)
return False
class _vagrant(object):
vms = []
def ssh(self, *names):
clients = _sshGroup()
for name in names:
ssh_config = self.sshConfig(name)
clients.append(Host(ssh_config))
return clients
def sshConfig(self, name):
p = Popen(['vagrant', 'ssh-config', name],
stdout=PIPE,
stderr=None,
stdin=None,
cwd='tests/',
)
p.wait()
if p.returncode != 0:
raise RuntimeError('Could not get ssh-config for ' + repr(name))
ssh_config = paramiko.SSHConfig()
ssh_config.parse(p.stdout)
p.stdout.close()
return ssh_config.lookup(name)
Vagrant = _vagrant()
| bsd-3-clause |
szibis/Diamond | test.py | 16 | 10774 | #!/usr/bin/env python
# coding=utf-8
###############################################################################
import os
import sys
import inspect
import traceback
import optparse
import logging
import configobj
try:
# python 2.6
import unittest2 as unittest
except ImportError:
import unittest
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = None
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'src')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'src', 'collectors')))
def run_only(func, predicate):
if predicate():
return func
else:
def f(arg):
pass
return f
def get_collector_config(key, value):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['hostname_method'] = "uname_short"
config['collectors'][key] = value
return config
class CollectorTestCase(unittest.TestCase):
def setDocExample(self, collector, metrics, defaultpath=None):
if not len(metrics):
return False
filePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'docs', 'collectors', collector + '.md')
if not os.path.exists(filePath):
return False
if not os.access(filePath, os.W_OK):
return False
if not os.access(filePath, os.R_OK):
return False
try:
with open(filePath, 'Ur') as fp:
content = fp.readlines()
with open(filePath, 'w') as fp:
for line in content:
if line.strip() == '__EXAMPLESHERE__':
for metric in sorted(metrics.iterkeys()):
metricPath = 'servers.hostname.'
if defaultpath:
metricPath += defaultpath + '.'
metricPath += metric
metricPath = metricPath.replace('..', '.')
fp.write('%s %s\n' % (metricPath, metrics[metric]))
else:
fp.write(line)
except IOError:
return False
return True
def getFixtureDirPath(self):
path = os.path.join(
os.path.dirname(inspect.getfile(self.__class__)),
'fixtures')
return path
def getFixturePath(self, fixture_name):
path = os.path.join(self.getFixtureDirPath(),
fixture_name)
if not os.access(path, os.R_OK):
print "Missing Fixture " + path
return path
def getFixture(self, fixture_name):
with open(self.getFixturePath(fixture_name), 'r') as f:
return StringIO(f.read())
def getFixtures(self):
fixtures = []
for root, dirnames, filenames in os.walk(self.getFixtureDirPath()):
fixtures.append(os.path.join(root, dirnames, filenames))
return fixtures
def getPickledResults(self, results_name):
with open(self.getFixturePath(results_name), 'r') as f:
return pickle.load(f)
def setPickledResults(self, results_name, data):
with open(self.getFixturePath(results_name), 'w+b') as f:
pickle.dump(data, f)
def assertUnpublished(self, mock, key, value, expected_value=0):
return self.assertPublished(mock, key, value, expected_value)
def assertPublished(self, mock, key, value, expected_value=1):
if type(mock) is list:
for m in mock:
calls = (filter(lambda x: x[0][0] == key, m.call_args_list))
if len(calls) > 0:
break
else:
calls = filter(lambda x: x[0][0] == key, mock.call_args_list)
actual_value = len(calls)
message = '%s: actual number of calls %d, expected %d' % (
key, actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
if expected_value:
actual_value = calls[0][0][1]
expected_value = value
precision = 0
if isinstance(value, tuple):
expected_value, precision = expected_value
message = '%s: actual %r, expected %r' % (key,
actual_value,
expected_value)
if precision is not None:
self.assertAlmostEqual(float(actual_value),
float(expected_value),
places=precision,
msg=message)
else:
self.assertEqual(actual_value, expected_value, message)
def assertUnpublishedMany(self, mock, dict, expected_value=0):
return self.assertPublishedMany(mock, dict, expected_value)
def assertPublishedMany(self, mock, dict, expected_value=1):
for key, value in dict.iteritems():
self.assertPublished(mock, key, value, expected_value)
if type(mock) is list:
for m in mock:
m.reset_mock()
else:
mock.reset_mock()
def assertUnpublishedMetric(self, mock, key, value, expected_value=0):
return self.assertPublishedMetric(mock, key, value, expected_value)
def assertPublishedMetric(self, mock, key, value, expected_value=1):
calls = filter(lambda x: x[0][0].path.find(key) != -1,
mock.call_args_list)
actual_value = len(calls)
message = '%s: actual number of calls %d, expected %d' % (
key, actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
if expected_value:
actual_value = calls[0][0][0].value
expected_value = value
precision = 0
if isinstance(value, tuple):
expected_value, precision = expected_value
message = '%s: actual %r, expected %r' % (key,
actual_value,
expected_value)
if precision is not None:
self.assertAlmostEqual(float(actual_value),
float(expected_value),
places=precision,
msg=message)
else:
self.assertEqual(actual_value, expected_value, message)
def assertUnpublishedMetricMany(self, mock, dict, expected_value=0):
return self.assertPublishedMetricMany(mock, dict, expected_value)
def assertPublishedMetricMany(self, mock, dict, expected_value=1):
for key, value in dict.iteritems():
self.assertPublishedMetric(mock, key, value, expected_value)
mock.reset_mock()
collectorTests = {}
def getCollectorTests(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if ((os.path.isfile(cPath) and
len(f) > 3 and
f[-3:] == '.py' and
f[0:4] == 'test')):
sys.path.append(os.path.dirname(cPath))
sys.path.append(os.path.dirname(os.path.dirname(cPath)))
modname = f[:-3]
try:
# Import the module
collectorTests[modname] = __import__(modname,
globals(),
locals(),
['*'])
except Exception:
print "Failed to import module: %s. %s" % (
modname, traceback.format_exc())
continue
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isdir(cPath):
getCollectorTests(cPath)
###############################################################################
if __name__ == "__main__":
if setproctitle:
setproctitle('test.py')
# Disable log output for the unit tests
log = logging.getLogger("diamond")
log.addHandler(logging.StreamHandler(sys.stderr))
log.disabled = True
# Initialize Options
parser = optparse.OptionParser()
parser.add_option("-c",
"--collector",
dest="collector",
default="",
help="Run a single collector's unit tests")
parser.add_option("-v",
"--verbose",
dest="verbose",
default=1,
action="count",
help="verbose")
# Parse Command Line Args
(options, args) = parser.parse_args()
cPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src',
'collectors',
options.collector))
dPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src',
'diamond'))
getCollectorTests(cPath)
if not options.collector:
# Only pull in diamond tests when a specific collector
# hasn't been specified
getCollectorTests(dPath)
loader = unittest.TestLoader()
tests = []
for test in collectorTests:
for name, c in inspect.getmembers(collectorTests[test],
inspect.isclass):
if not issubclass(c, unittest.TestCase):
continue
tests.append(loader.loadTestsFromTestCase(c))
suite = unittest.TestSuite(tests)
results = unittest.TextTestRunner(verbosity=options.verbose).run(suite)
results = str(results)
results = results.replace('>', '').split()[1:]
resobj = {}
for result in results:
result = result.split('=')
resobj[result[0]] = int(result[1])
if resobj['failures'] > 0:
sys.exit(1)
if resobj['errors'] > 0:
sys.exit(2)
sys.exit(0)
| mit |
theshteves/tweet-the-wolf | pips/requests/packages/urllib3/util/retry.py | 67 | 9924 | import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| mit |
40223150/2015cd_0505 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_functiontestcase.py | 791 | 5478 | import unittest
from .support import LoggingResult
class Test_FunctionTestCase(unittest.TestCase):
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.countTestCases(), 1)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
raise RuntimeError('raised by setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
raise RuntimeError('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
self.fail('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
raise RuntimeError('raised by tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertIsInstance(test.id(), str)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__no_docstring(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.shortDescription(), None)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__singleline_docstring(self):
desc = "this tests foo"
test = unittest.FunctionTestCase(lambda: None, description=desc)
self.assertEqual(test.shortDescription(), "this tests foo")
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.