repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
BeATz-UnKNoWN/python-for-android | python-build/python-libs/gdata/tests/gdata_tests/base_test.py | 94 | 13455 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder@gmail.com (Jeff Scudder)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata
from gdata import test_data
import gdata.base
class LabelTest(unittest.TestCase):
def setUp(self):
self.label = gdata.base.Label()
def testToAndFromString(self):
self.label.text = 'test label'
self.assert_(self.label.text == 'test label')
new_label = gdata.base.LabelFromString(self.label.ToString())
self.assert_(self.label.text == new_label.text)
class ItemTypeTest(unittest.TestCase):
def setUp(self):
self.item_type = gdata.base.ItemType()
def testToAndFromString(self):
self.item_type.text = 'product'
self.item_type.type = 'text'
self.assert_(self.item_type.text == 'product')
self.assert_(self.item_type.type == 'text')
new_item_type = gdata.base.ItemTypeFromString(self.item_type.ToString())
self.assert_(self.item_type.text == new_item_type.text)
self.assert_(self.item_type.type == new_item_type.type)
class GBaseItemTest(unittest.TestCase):
def setUp(self):
self.item = gdata.base.GBaseItem()
def testToAndFromString(self):
self.item.label.append(gdata.base.Label(text='my label'))
self.assert_(self.item.label[0].text == 'my label')
self.item.item_type = gdata.base.ItemType(text='products')
self.assert_(self.item.item_type.text == 'products')
self.item.item_attributes.append(gdata.base.ItemAttribute('extra', text='foo'))
self.assert_(self.item.item_attributes[0].text == 'foo')
self.assert_(self.item.item_attributes[0].name == 'extra')
new_item = gdata.base.GBaseItemFromString(self.item.ToString())
self.assert_(self.item.label[0].text == new_item.label[0].text)
self.assert_(self.item.item_type.text == new_item.item_type.text)
self.assert_(self.item.item_attributes[0].text ==
new_item.item_attributes[0].text)
def testCustomItemAttributes(self):
self.item.AddItemAttribute('test_attrib', 'foo')
self.assert_(self.item.FindItemAttribute('test_attrib') == 'foo')
self.item.SetItemAttribute('test_attrib', 'bar')
self.assert_(self.item.FindItemAttribute('test_attrib') == 'bar')
self.item.RemoveItemAttribute('test_attrib')
self.assert_(self.item.FindItemAttribute('test_attrib') is None)
def testConvertActualData(self):
feed = gdata.base.GBaseSnippetFeedFromString(test_data.GBASE_FEED)
for an_entry in feed.entry:
if an_entry.author[0].email.text == 'anon-szot0wdsq0at@base.google.com':
for attrib in an_entry.item_attributes:
if attrib.name == 'payment_notes':
self.assert_(attrib.text ==
'PayPal & Bill Me Later credit available online only.')
if attrib.name == 'condition':
self.assert_(attrib.text == 'new')
# self.assert_(an_entry.item_attributes['condition'].text == 'new')
def testModifyCustomItemAttributes(self):
self.item.AddItemAttribute('test_attrib', 'foo', value_type='test1')
self.item.AddItemAttribute('test_attrib', 'bar', value_type='test2')
self.assertEquals(self.item.item_attributes[0].name, 'test_attrib')
self.assertEquals(self.item.item_attributes[1].name, 'test_attrib')
self.assertEquals(self.item.item_attributes[0].text, 'foo')
self.assertEquals(self.item.item_attributes[1].text, 'bar')
# Get one of the custom attributes from the item.
attributes = self.item.GetItemAttributes('test_attrib')
self.assertEquals(len(attributes), 2)
self.assertEquals(attributes[0].text, 'foo')
# Change the contents of the found item attribute.
attributes[0].text = 'new foo'
self.assertEquals(attributes[0].text, 'new foo')
# Make sure that the change is reflected in the item.
self.assertEquals(self.item.item_attributes[0].text, 'new foo')
class GBaseItemFeedTest(unittest.TestCase):
def setUp(self):
self.item_feed = gdata.base.GBaseItemFeedFromString(test_data.GBASE_FEED)
def testToAndFromString(self):
self.assert_(len(self.item_feed.entry) == 3)
for an_entry in self.item_feed.entry:
self.assert_(isinstance(an_entry, gdata.base.GBaseItem))
new_item_feed = gdata.base.GBaseItemFeedFromString(str(self.item_feed))
for an_entry in new_item_feed.entry:
self.assert_(isinstance(an_entry, gdata.base.GBaseItem))
#self.item_feed.label.append(gdata.base.Label(text='my label'))
#self.assert_(self.item.label[0].text == 'my label')
#self.item.item_type = gdata.base.ItemType(text='products')
#self.assert_(self.item.item_type.text == 'products')
#new_item = gdata.base.GBaseItemFromString(self.item.ToString())
#self.assert_(self.item.label[0].text == new_item.label[0].text)
#self.assert_(self.item.item_type.text == new_item.item_type.text)
def testLinkFinderFindsHtmlLink(self):
for entry in self.item_feed.entry:
# All Base entries should have a self link
self.assert_(entry.GetSelfLink() is not None)
# All Base items should have an HTML link
self.assert_(entry.GetHtmlLink() is not None)
# None of the Base items should have an edit link
self.assert_(entry.GetEditLink() is None)
class GBaseSnippetFeedTest(unittest.TestCase):
def setUp(self):
#self.item_feed = gdata.base.GBaseItemFeed()
self.snippet_feed = gdata.base.GBaseSnippetFeedFromString(test_data.GBASE_FEED)
def testToAndFromString(self):
self.assert_(len(self.snippet_feed.entry) == 3)
for an_entry in self.snippet_feed.entry:
self.assert_(isinstance(an_entry, gdata.base.GBaseSnippet))
new_snippet_feed = gdata.base.GBaseSnippetFeedFromString(str(self.snippet_feed))
for an_entry in new_snippet_feed.entry:
self.assert_(isinstance(an_entry, gdata.base.GBaseSnippet))
class ItemAttributeTest(unittest.TestCase):
def testToAndFromStirng(self):
attrib = gdata.base.ItemAttribute('price')
attrib.type = 'float'
self.assert_(attrib.name == 'price')
self.assert_(attrib.type == 'float')
new_attrib = gdata.base.ItemAttributeFromString(str(attrib))
self.assert_(new_attrib.name == attrib.name)
self.assert_(new_attrib.type == attrib.type)
def testClassConvertsActualData(self):
attrib = gdata.base.ItemAttributeFromString(test_data.TEST_GBASE_ATTRIBUTE)
self.assert_(attrib.name == 'brand')
self.assert_(attrib.type == 'text')
self.assert_(len(attrib.extension_elements) == 0)
# Test conversion to en ElementTree
element = attrib._ToElementTree()
self.assert_(element.tag == gdata.base.GBASE_TEMPLATE % 'brand')
class AttributeTest(unittest.TestCase):
def testAttributeToAndFromString(self):
attrib = gdata.base.Attribute()
attrib.type = 'float'
attrib.count = '44000'
attrib.name = 'test attribute'
attrib.value.append(gdata.base.Value(count='500', text='a value'))
self.assert_(attrib.type == 'float')
self.assert_(attrib.count == '44000')
self.assert_(attrib.name == 'test attribute')
self.assert_(attrib.value[0].count == '500')
self.assert_(attrib.value[0].text == 'a value')
new_attrib = gdata.base.AttributeFromString(str(attrib))
self.assert_(attrib.type == new_attrib.type)
self.assert_(attrib.count == new_attrib.count)
self.assert_(attrib.value[0].count == new_attrib.value[0].count)
self.assert_(attrib.value[0].text == new_attrib.value[0].text)
self.assert_(attrib.name == new_attrib.name)
class ValueTest(unittest.TestCase):
def testValueToAndFromString(self):
value = gdata.base.Value()
value.count = '5123'
value.text = 'super great'
self.assert_(value.count == '5123')
self.assert_(value.text == 'super great')
new_value = gdata.base.ValueFromString(str(value))
self.assert_(new_value.count == value.count)
self.assert_(new_value.text == value.text)
class AttributeEntryTest(unittest.TestCase):
def testAttributeEntryToAndFromString(self):
value = gdata.base.Value(count='500', text='happy')
attribute = gdata.base.Attribute(count='600', value=[value])
a_entry = gdata.base.GBaseAttributeEntry(attribute=[attribute])
self.assert_(a_entry.attribute[0].count == '600')
self.assert_(a_entry.attribute[0].value[0].count == '500')
self.assert_(a_entry.attribute[0].value[0].text == 'happy')
new_entry = gdata.base.GBaseAttributeEntryFromString(str(a_entry))
self.assert_(new_entry.attribute[0].count == '600')
self.assert_(new_entry.attribute[0].value[0].count == '500')
self.assert_(new_entry.attribute[0].value[0].text == 'happy')
class GBaseAttributeEntryTest(unittest.TestCase):
def testAttribteEntryFromExampleData(self):
entry = gdata.base.GBaseAttributeEntryFromString(
test_data.GBASE_ATTRIBUTE_ENTRY)
self.assert_(len(entry.attribute) == 1)
self.assert_(len(entry.attribute[0].value) == 10)
self.assert_(entry.attribute[0].name == 'job industry')
for val in entry.attribute[0].value:
if val.text == 'it internet':
self.assert_(val.count == '380772')
elif val.text == 'healthcare':
self.assert_(val.count == '261565')
class GBaseAttributesFeedTest(unittest.TestCase):
def testAttributesFeedExampleData(self):
feed = gdata.base.GBaseAttributesFeedFromString(test_data.GBASE_ATTRIBUTE_FEED)
self.assert_(len(feed.entry) == 1)
self.assert_(isinstance(feed.entry[0], gdata.base.GBaseAttributeEntry))
def testAttributesFeedToAndFromString(self):
value = gdata.base.Value(count='500', text='happy')
attribute = gdata.base.Attribute(count='600', value=[value])
a_entry = gdata.base.GBaseAttributeEntry(attribute=[attribute])
feed = gdata.base.GBaseAttributesFeed(entry=[a_entry])
self.assert_(feed.entry[0].attribute[0].count == '600')
self.assert_(feed.entry[0].attribute[0].value[0].count == '500')
self.assert_(feed.entry[0].attribute[0].value[0].text == 'happy')
new_feed = gdata.base.GBaseAttributesFeedFromString(str(feed))
self.assert_(new_feed.entry[0].attribute[0].count == '600')
self.assert_(new_feed.entry[0].attribute[0].value[0].count == '500')
self.assert_(new_feed.entry[0].attribute[0].value[0].text == 'happy')
class GBaseLocalesFeedTest(unittest.TestCase):
def testLocatesFeedWithExampleData(self):
feed = gdata.base.GBaseLocalesFeedFromString(test_data.GBASE_LOCALES_FEED)
self.assert_(len(feed.entry) == 3)
self.assert_(feed.GetSelfLink().href ==
'http://www.google.com/base/feeds/locales/')
for an_entry in feed.entry:
if an_entry.title.text == 'en_US':
self.assert_(an_entry.category[0].term == 'en_US')
self.assert_(an_entry.title.text == an_entry.category[0].term)
class GBaseItemTypesFeedAndEntryTest(unittest.TestCase):
def testItemTypesFeedToAndFromString(self):
feed = gdata.base.GBaseItemTypesFeed()
entry = gdata.base.GBaseItemTypeEntry()
entry.attribute.append(gdata.base.Attribute(name='location',
attribute_type='location'))
entry.item_type = gdata.base.ItemType(text='jobs')
feed.entry.append(entry)
self.assert_(len(feed.entry) == 1)
self.assert_(feed.entry[0].attribute[0].name == 'location')
new_feed = gdata.base.GBaseItemTypesFeedFromString(str(feed))
self.assert_(len(new_feed.entry) == 1)
self.assert_(new_feed.entry[0].attribute[0].name == 'location')
class GBaseImageLinkTest(unittest.TestCase):
def testImageLinkToAndFromString(self):
image_link = gdata.base.ImageLink()
image_link.type = 'url'
image_link.text = 'example.com'
thumbnail = gdata.base.Thumbnail()
thumbnail.width = '60'
thumbnail.height = '80'
thumbnail.text = 'example text'
image_link.thumbnail.append(thumbnail)
xml = image_link.ToString()
parsed = gdata.base.ImageLinkFromString(xml)
self.assert_(parsed.type == image_link.type)
self.assert_(parsed.text == image_link.text)
self.assert_(len(parsed.thumbnail) == 1)
self.assert_(parsed.thumbnail[0].width == thumbnail.width)
self.assert_(parsed.thumbnail[0].height == thumbnail.height)
self.assert_(parsed.thumbnail[0].text == thumbnail.text)
class GBaseItemAttributeAccessElement(unittest.TestCase):
def testItemAttributeAccessAttribute(self):
item = gdata.base.GBaseItem()
item.AddItemAttribute('test', '1', value_type='int', access='private')
private_attribute = item.GetItemAttributes('test')[0]
self.assert_(private_attribute.access == 'private')
xml = item.ToString()
new_item = gdata.base.GBaseItemFromString(xml)
new_attributes = new_item.GetItemAttributes('test')
self.assert_(len(new_attributes) == 1)
#self.assert_(new_attributes[0].access == 'private')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Stratos42/EveBot | plugins.disabled/bf.py | 1 | 2472 | '''brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py'''
import re
import random
from util import hook
BUFFER_SIZE = 5000
MAX_STEPS = 1000000
@hook.command
def bf(inp):
".bf <prog> -- executes brainfuck program <prog>"""
program = re.sub('[^][<>+-.,]', '', inp)
# create a dict of brackets pairs, for speed later on
brackets = {}
open_brackets = []
for pos in range(len(program)):
if program[pos] == '[':
open_brackets.append(pos)
elif program[pos] == ']':
if len(open_brackets) > 0:
brackets[pos] = open_brackets[-1]
brackets[open_brackets[-1]] = pos
open_brackets.pop()
else:
return 'unbalanced brackets'
if len(open_brackets) != 0:
return 'unbalanced brackets'
# now we can start interpreting
ip = 0 # instruction pointer
mp = 0 # memory pointer
steps = 0
memory = [0] * BUFFER_SIZE # initial memory area
rightmost = 0
output = "" # we'll save the output here
# the main program loop:
while ip < len(program):
c = program[ip]
if c == '+':
memory[mp] = memory[mp] + 1 % 256
elif c == '-':
memory[mp] = memory[mp] - 1 % 256
elif c == '>':
mp += 1
if mp > rightmost:
rightmost = mp
if mp >= len(memory):
# no restriction on memory growth!
memory.extend([0] * BUFFER_SIZE)
elif c == '<':
mp = mp - 1 % len(memory)
elif c == '.':
output += chr(memory[mp])
if len(output) > 500:
break
elif c == ',':
memory[mp] = random.randint(1, 255)
elif c == '[':
if memory[mp] == 0:
ip = brackets[ip]
elif c == ']':
if memory[mp] != 0:
ip = brackets[ip]
ip += 1
steps += 1
if steps > MAX_STEPS:
if output == '':
output = '(no output)'
output += '[exceeded %d iterations]' % MAX_STEPS
break
stripped_output = re.sub(r'[\x00-\x1F]', '', output)
if stripped_output == '':
if output != '':
return 'no printable output'
return 'no output'
return stripped_output[:430].decode('utf8', 'ignore')
| gpl-3.0 |
koala-ai/tensorflow_nlp | nlp/chatbot/model.py | 1 | 10775 | import copy
import numpy as np
import tensorflow as tf
from nlp.chatbot.dataset import data_utils
class S2SModel(object):
def __init__(self,
source_vocab_size,
target_vocab_size,
buckets,
size,
dropout,
num_layers,
max_gradient_norm,
batch_size,
learning_rate,
num_samples,
forward_only=False,
dtype=tf.float32):
# init member variales
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = learning_rate
# LSTM cells
cell = tf.contrib.rnn.BasicLSTMCell(size)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout)
cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers)
output_projection = None
softmax_loss_function = None
if num_samples > 0 and num_samples < self.target_vocab_size:
print('开启投影:{}'.format(num_samples))
w_t = tf.get_variable(
"proj_w",
[self.target_vocab_size, size],
dtype=dtype
)
w = tf.transpose(w_t)
b = tf.get_variable(
"proj_b",
[self.target_vocab_size],
dtype=dtype
)
output_projection = (w, b)
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [-1, 1])
# 因为选项有选fp16的训练,这里同意转换为fp32
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(logits, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(
weights=local_w_t,
biases=local_b,
labels=labels,
inputs=local_inputs,
num_sampled=num_samples,
num_classes=self.target_vocab_size
),
dtype
)
softmax_loss_function = sampled_loss
# seq2seq_f
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
tmp_cell = copy.deepcopy(cell)
return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
encoder_inputs,
decoder_inputs,
tmp_cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode,
dtype=dtype
)
# inputs
self.encoder_inputs = []
self.decoder_inputs = []
self.decoder_weights = []
# buckets中的最后一个是最大的(即第“-1”个)
for i in range(buckets[-1][0]):
self.encoder_inputs.append(tf.placeholder(
tf.int32,
shape=[None],
name='encoder_input_{}'.format(i)
))
# 输出比输入大 1,这是为了保证下面的targets可以向左shift 1位
for i in range(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(
tf.int32,
shape=[None],
name='decoder_input_{}'.format(i)
))
self.decoder_weights.append(tf.placeholder(
dtype,
shape=[None],
name='decoder_weight_{}'.format(i)
))
targets = [
self.decoder_inputs[i + 1] for i in range(buckets[-1][1])
]
if forward_only:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs,
self.decoder_inputs,
targets,
self.decoder_weights,
buckets,
lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function
)
if output_projection is not None:
for b in range(len(buckets)):
self.outputs[b] = [
tf.matmul(
output,
output_projection[0]
) + output_projection[1]
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs,
self.decoder_inputs,
targets,
self.decoder_weights,
buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function
)
params = tf.trainable_variables()
opt = tf.train.AdamOptimizer(
learning_rate=learning_rate
)
if not forward_only:
self.gradient_norms = []
self.updates = []
for output, loss in zip(self.outputs, self.losses):
gradients = tf.gradients(loss, params)
clipped_gradients, norm = tf.clip_by_global_norm(
gradients,
max_gradient_norm
)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params)
))
# self.saver = tf.train.Saver(tf.all_variables())
self.saver = tf.train.Saver(
tf.all_variables(),
write_version=tf.train.SaverDef.V2
)
def step(
self,
session,
encoder_inputs,
decoder_inputs,
decoder_weights,
bucket_id,
forward_only
):
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError(
"Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size)
)
if len(decoder_inputs) != decoder_size:
raise ValueError(
"Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size)
)
if len(decoder_weights) != decoder_size:
raise ValueError(
"Weights length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_weights), decoder_size)
)
input_feed = {}
for i in range(encoder_size):
input_feed[self.encoder_inputs[i].name] = encoder_inputs[i]
for i in range(decoder_size):
input_feed[self.decoder_inputs[i].name] = decoder_inputs[i]
input_feed[self.decoder_weights[i].name] = decoder_weights[i]
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
if not forward_only:
output_feed = [
self.updates[bucket_id],
self.gradient_norms[bucket_id],
self.losses[bucket_id]
]
output_feed.append(self.outputs[bucket_id][i])
else:
output_feed = [self.losses[bucket_id]]
for i in range(decoder_size):
output_feed.append(self.outputs[bucket_id][i])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], outputs[3:]
else:
return None, outputs[0], outputs[1:]
def get_batch_data(self, bucket_dbs, bucket_id):
data = []
data_in = []
bucket_db = bucket_dbs[bucket_id]
for _ in range(self.batch_size):
ask, answer = bucket_db.random()
data.append((ask, answer))
data_in.append((answer, ask))
return data, data_in
def get_batch(self, bucket_dbs, bucket_id, data):
encoder_size, decoder_size = self.buckets[bucket_id]
# bucket_db = bucket_dbs[bucket_id]
encoder_inputs, decoder_inputs = [], []
for encoder_input, decoder_input in data:
# encoder_input, decoder_input = random.choice(data[bucket_id])
# encoder_input, decoder_input = bucket_db.random()
encoder_input = data_utils.sentence_indice(encoder_input)
decoder_input = data_utils.sentence_indice(decoder_input)
# Encoder
encoder_pad = [data_utils.PAD_ID] * (
encoder_size - len(encoder_input)
)
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder
decoder_pad_size = decoder_size - len(decoder_input) - 2
decoder_inputs.append(
[data_utils.GO_ID] + decoder_input +
[data_utils.EOS_ID] +
[data_utils.PAD_ID] * decoder_pad_size
)
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# batch encoder
for i in range(encoder_size):
batch_encoder_inputs.append(np.array(
[encoder_inputs[j][i] for j in range(self.batch_size)],
dtype=np.int32
))
# batch decoder
for i in range(decoder_size):
batch_decoder_inputs.append(np.array(
[decoder_inputs[j][i] for j in range(self.batch_size)],
dtype=np.int32
))
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for j in range(self.batch_size):
if i < decoder_size - 1:
target = decoder_inputs[j][i + 1]
if i == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[j] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
def create_model(forward_only, args):
"""建立模型"""
buckets = data_utils.buckets
dtype = tf.float16 if args.use_fp16 else tf.float32
model = S2SModel(
data_utils.dim,
data_utils.dim,
buckets,
args.size,
args.dropout,
args.num_layers,
args.max_gradient_norm,
args.batch_size,
args.learning_rate,
args.num_samples,
forward_only,
dtype
)
return model | apache-2.0 |
bernardokyotoku/skillplant | django/db/models/aggregates.py | 12 | 2168 | """
Classes to represent the definitions of aggregate functions.
"""
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| bsd-3-clause |
Alwnikrotikz/pyglet | tools/genmpkg/bdist_mpkg_pyglet/pkg.py | 26 | 6256 | import os
import sys
from cStringIO import StringIO
from distutils.dir_util import mkpath
from distutils.file_util import copy_file
from bdist_mpkg_pyglet import tools, plists
from bdist_mpkg_pyglet.util import copy_tree
from bdist_mpkg_pyglet.templates import InstallationCheck
def write_template((script, strings), dest, mkpath=mkpath):
spath = os.path.join(dest, 'InstallationCheck')
f = open(spath, 'w')
f.write(script.encode('utf8'))
f.close()
os.chmod(spath, os.stat(spath)[0] | 0111)
lproj = os.path.join(dest, 'English.lproj')
mkpath(lproj)
spath = os.path.join(lproj, 'InstallationCheck.strings')
f = open(spath, 'w')
f.write(strings.encode('utf16'))
f.close()
def write_sizes(count, size, compressed, pkgdir):
f = open(
os.path.join(pkgdir, 'Contents', 'Resources', 'Archive.sizes'),
'w'
)
f.write('NumFiles %d\nInstalledSize %d\nCompressedSize %d'
% (count, size, compressed))
f.close()
TEXT_EXTS = '.rtfd', '.rtf', '.html', '.txt'
IMAGE_EXTS = '.tiff', '.png', '.jpg', '.pdf'
def write_pkginfo(pkgdir):
f = open(os.path.join(pkgdir, 'Contents', 'PkgInfo'), 'w')
f.write('pmkrpkg1')
f.close()
def try_exts(path, exts=TEXT_EXTS):
path = os.path.splitext(path)[0]
for ext in exts:
npath = path + ext
if os.path.exists(npath):
return npath
return None
def copy_doc(path, name, pkgdir, exts=TEXT_EXTS, language=None, dry_run=0,
copy_tree=copy_tree, copy_file=copy_file, mkpath=mkpath):
if path is None:
return
is_string = hasattr(path, 'getvalue')
if is_string:
ext = '.txt'
else:
ext = os.path.splitext(path)[1].lower()
if ext == '':
ext = '.txt'
if ext not in exts:
raise ValueError('Invalid extension for %s' % (path,))
destdir = os.path.join(pkgdir, 'Contents', 'Resources')
if language is not None:
destdir = os.path.join(destdir, language + '.lproj')
mkpath(destdir)
dest = os.path.join(destdir, name + ext)
if is_string:
if not dry_run:
f = file(dest, 'wb')
f.write(path.getvalue())
f.close()
elif ext == '.rtfd':
copy_tree(path, dest)
else:
copy_file(path, dest)
def make_metapackage(cmd, name, version, packages, pkgdir,
info=(), description=None):
license = cmd.license
readme = cmd.readme
welcome = cmd.welcome
background = cmd.background
template = cmd.template
dry_run = cmd.dry_run
dist = cmd.distribution
copy_tree = cmd.copy_tree
copy_file = cmd.copy_file
mkpath = cmd.mkpath
if description is None:
description = dist.get_description()
if not description:
description = u'%s %s' % (name, version)
mkpath(os.path.join(pkgdir, 'Contents', 'Resources'))
if not dry_run:
write_pkginfo(pkgdir)
ninfo = plists.mpkg_info(name, version, packages)
ninfo.update(dict(info))
if not dry_run:
plists.write(ninfo, os.path.join(pkgdir, 'Contents', 'Info.plist'))
desc = plists.common_description(name+' '+version, version)
if description:
desc['IFPkgDescriptionDescription'] = description
if not dry_run:
plists.write(
desc,
os.path.join(pkgdir, 'Contents', 'Resources', 'Description.plist')
)
template_dest = os.path.join(pkgdir, 'Contents', 'Resources')
if not os.path.exists(template) and template in InstallationCheck:
write_template(InstallationCheck[template], template_dest,
mkpath=mkpath)
else:
copy_tree(template, template_dest)
if readme is None:
readme_text = dist.get_long_description()
if readme_text:
readme = StringIO(readme_text)
def doc(path, name, exts=TEXT_EXTS):
copy_doc(path, name, pkgdir, exts=exts, dry_run=dry_run,
mkpath=mkpath, copy_tree=copy_tree, copy_file=copy_file,
)
doc(readme, 'ReadMe')
doc(license, 'License')
doc(welcome, 'Welcome')
doc(background, 'background', exts=IMAGE_EXTS)
def make_package(cmd, name, version, files, common, prefix, pkgdir,
info=(), description=None):
license = cmd.license
readme = cmd.readme
welcome = cmd.welcome
background = cmd.background
template = cmd.template
dry_run = cmd.dry_run
dist = cmd.distribution
copy_tree = cmd.copy_tree
copy_file = cmd.copy_file
mkpath = cmd.mkpath
if description is None:
description = dist.get_description()
mkpath(os.path.join(pkgdir, 'Contents', 'Resources'))
if not dry_run:
write_pkginfo(pkgdir)
tools.mkbom(common, pkgdir)
count = len(files)
admin = True #tools.admin_writable(prefix)
size = tools.reduce_size(files)
compressed = tools.pax(common, pkgdir)
if not dry_run:
write_sizes(count, size, compressed, pkgdir)
if admin:
auth = u'AdminAuthorization'
else:
auth = u'RootAuthorization'
ninfo = plists.pkg_info(name, version)
ninfo.update(dict(
IFPkgFlagAuthorizationAction=auth,
IFPkgFlagDefaultLocation=tools.unicode_path(prefix),
))
ninfo.update(dict(info))
if not dry_run:
plists.write(ninfo, os.path.join(pkgdir, 'Contents', 'Info.plist'))
desc = plists.common_description(name, version)
if description is not None:
desc['IFPkgDescriptionDescription'] = description
if not dry_run:
plists.write(
desc,
os.path.join(pkgdir, 'Contents', 'Resources', 'Description.plist')
)
template_dest = os.path.join(pkgdir, 'Contents', 'Resources')
if not os.path.exists(template) and template in InstallationCheck:
write_template(InstallationCheck[template], template_dest,
mkpath=mkpath)
else:
copy_tree(template, template_dest)
def doc(path, name, exts=TEXT_EXTS):
copy_doc(path, name, pkgdir, exts=exts, dry_run=dry_run,
mkpath=mkpath, copy_tree=copy_tree, copy_file=copy_file,
)
doc(readme, 'ReadMe')
doc(license, 'License')
doc(welcome, 'Welcome')
doc(background, 'background', exts=IMAGE_EXTS)
| bsd-3-clause |
laosiaudi/tensorflow | tensorflow/python/ops/functional_ops.py | 8 | 23708 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations.
## Higher Order Operators
TensorFlow provides several higher order operators to simplify the common
map-reduce programming patterns.
@@map_fn
@@foldl
@@foldr
@@scan
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_functional_ops import *
# pylint: enable=wildcard-import
# pylint: disable=unused-import
from tensorflow.python.ops.gen_functional_ops import _symbolic_gradient
# pylint: enable=unused-import
from tensorflow.python.util import nest
# TODO(yuanbyu, mrry): Handle stride to support sliding windows.
def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldl on the list of tensors unpacked from `elems` on dimension 0.
This foldl operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
Args:
fn: The callable to be performed.
elems: A tensor to be unpacked on dimension 0.
initializer: (optional) The initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor resulting from applying `fn` consecutively to the list of tensors
unpacked from `elems`, from first to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldl(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
with ops.name_scope(name, "foldl", [elems]):
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems = ops.convert_to_tensor(elems, name="elems")
n = array_ops.shape(elems)[0]
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
dynamic_size=False,
infer_shape=True)
elems_ta = elems_ta.unpack(elems)
if initializer is None:
a = elems_ta.read(0)
i = constant_op.constant(1)
else:
a = ops.convert_to_tensor(initializer)
i = constant_op.constant(0)
def compute(i, a):
a = fn(a, elems_ta.read(i))
return [i + 1, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n, compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
if varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
Args:
fn: The callable to be performed.
elems: A tensor that is unpacked into a sequence of tensors to apply `fn`.
initializer: (optional) The initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor resulting from applying `fn` consecutively to the list of tensors
unpacked from `elems`, from last to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
with ops.name_scope(name, "foldr", [elems]):
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems = ops.convert_to_tensor(elems, name="elems")
n = array_ops.shape(elems)[0]
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
dynamic_size=False,
infer_shape=True)
elems_ta = elems_ta.unpack(elems)
if initializer is None:
i = n - 1
a = elems_ta.read(i)
else:
i = n
a = ops.convert_to_tensor(initializer)
def compute(i, a):
i -= 1
a = fn(a, elems_ta.read(i))
return [i, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i > 0, compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
if varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
def map_fn(fn, elems, dtype=None, parallel_iterations=10, back_prop=True,
swap_memory=False, infer_shape=True, name=None):
"""map on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `map` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the
tensors unpacked from `elems`. `dtype` is the data type of the return
value of `fn`. Users must provide `dtype` if it is different from
the data type of `elems`.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[values.shape[0]] + fn(values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Furthermore, `fn` may emit a different structure than its input. For example,
`fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case,
the `dtype` parameter is not optional: `dtype` must be a type or (possibly
nested) tuple of types matching the output of `fn`.
To apply a functional operation to the nonzero elements of a SparseTensor
one of the following methods is recommended. First, if the function is
expressible as TensorFlow ops, use
```python
result = SparseTensor(input.indices, fn(input.values), input.shape)
```
If, however, the function is not expressible as a TensorFlow op, then use
```python
result = SparseTensor(input.indices, map_fn(fn, input.values), input.shape)
```
instead.
Args:
fn: The callable to be performed. It accepts one argument, which will
have the same (possibly nested) structure as `elems`. Its output
must have the same structure as `dtype` if one is provided, otherwise
it must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be applied to `fn`.
dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure
of Tensors differing from the structure of `elems`, then `dtype` is not
optional and must have the same structure as the output of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, from first to last.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `dtype` do not match, or if elems is a SparseTensor.
ValueError: if the lengths of the output of `fn` and `dtype` do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
squares = map_fn(lambda x: x * x, elems)
# squares == [1, 4, 9, 16, 25, 36]
```
```python
elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64)
# alternate == [-1, 2, -3]
```
```python
elems = np.array([1, 2, 3])
alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64))
# alternates[0] == [1, 2, 3]
# alternates[1] == [-1, -2, -3]
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
if isinstance(elems, sparse_tensor.SparseTensor):
raise TypeError(
"To perform a map on the values of a sparse tensor use either "
" SparseTensor(input.indices, fn(input.values), input.shape) or "
" SparseTensor(input.indices, map_fn(fn, input.values), input.shape)")
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if dtype is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(dtype)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(dtype, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
with ops.name_scope(name, "map", elems_flat):
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat]
dtype = dtype or input_pack([elem.dtype for elem in elems_flat])
dtype_flat = output_flatten(dtype)
# Convert elems to tensor array.
n = array_ops.shape(elems_flat[0])[0]
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,
dynamic_size=False,
infer_shape=True)
for elem in elems_flat]
# Unpack elements
elems_ta = [
elem_ta.unpack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
i = constant_op.constant(0)
accs_ta = [
tensor_array_ops.TensorArray(dtype=dt, size=n,
dynamic_size=False,
infer_shape=infer_shape)
for dt in dtype_flat]
def compute(i, tas):
"""The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if dtype and packed_fn_values structure do not match
ValueType: if dtype and packed_fn_values lengths do not match
"""
packed_values = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_fn_values = fn(packed_values)
nest.assert_same_structure(dtype or elems, packed_fn_values)
flat_fn_values = output_flatten(packed_fn_values)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_values)]
return (i + 1, tas)
_, r_a = control_flow_ops.while_loop(
lambda i, _: i < n, compute, (i, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
results_flat = [r.pack() for r in r_a]
n_static = elems_flat[0].get_shape().with_rank_at_least(1)[0]
for elem in elems_flat[1:]:
n_static.merge_with(elem.get_shape().with_rank_at_least(1)[0])
for r in results_flat:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
if varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
def scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, infer_shape=True, name=None):
"""scan on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `scan` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and accumulator. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The second argument of
`fn` must match the structure of `elems`.
If no `initializer` is provided, the output structure and dtypes of `fn`
are assumed to be the same as its input; and in this case, the first
argument of `fn` must match the structure of `elems`.
If an `initializer` is provided, then the output of `fn` must have the same
structure as `initializer`; and the first argument of `fn` must match
this structure.
For example, if `elems` is `(t1, [t2, t3])` and `initializer` is
`[i1, i2]` then an appropriate signature for `fn` in `python2` is:
`fn = lambda (acc_p1, acc_p2), (t1 [t2, t3]):` and `fn` must return a list,
`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the
one that works in `python3`, is:
`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.
Args:
fn: The callable to be performed. It accepts two arguments. The first
will have the same structure as `initializer` if one is provided,
otherwise it will have the same structure as `elems`. The second
will have the same (possibly nested) structure as `elems`. Its output
must have the same structure as `initializer` if one is provided,
otherwise it must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
initial value for the accumulator, and the expected output type of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, and the previous accumulator value(s), from first to last.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `initializer` do not match.
ValueError: if the lengths of the output of `fn` and `initializer`
do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
sum = scan(lambda a, x: a + x, elems)
# sum == [1, 3, 6, 10, 15, 21]
```
```python
elems = np.array([1, 2, 3, 4, 5, 6])
initializer = np.array(0)
sum_one = scan(
lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)
# sum_one == [1, 2, 3, 4, 5, 6]
```
```python
elems = np.array([1, 0, 0, 0, 0, 0])
initializer = (np.array(0), np.array(1))
fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)
# fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if initializer is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(initializer)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(initializer, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
with ops.name_scope(name, "scan", elems_flat):
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat]
n = array_ops.shape(elems_flat[0])[0]
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,
dynamic_size=False,
infer_shape=True)
for elem in elems_flat]
# Unpack elements
elems_ta = [
elem_ta.unpack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
if initializer is None:
a_flat = [elem.read(0) for elem in elems_ta]
i = constant_op.constant(1)
else:
initializer_flat = output_flatten(initializer)
a_flat = [ops.convert_to_tensor(init) for init in initializer_flat]
i = constant_op.constant(0)
# Create a tensor array to store the intermediate values.
accs_ta = [
tensor_array_ops.TensorArray(dtype=init.dtype, size=n,
dynamic_size=False,
infer_shape=infer_shape)
for init in a_flat]
if initializer is None:
accs_ta = [acc_ta.write(0, a) for (acc_ta, a) in zip(accs_ta, a_flat)]
def compute(i, a_flat, tas):
"""The loop body of scan.
Args:
i: the loop counter.
a_flat: the accumulator value(s), flattened.
tas: the output accumulator TensorArray(s), flattened.
Returns:
[i + 1, a_flat, tas]: the updated counter + new accumulator values +
updated TensorArrays
Raises:
TypeError: if initializer and fn() output structure do not match
ValueType: if initializer and fn() output lengths do not match
"""
packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_a = output_pack(a_flat)
a_out = fn(packed_a, packed_elems)
nest.assert_same_structure(
elems if initializer is None else initializer, a_out)
flat_a_out = output_flatten(a_out)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_a_out)]
return (i + 1, flat_a_out, tas)
_, _, r_a = control_flow_ops.while_loop(
lambda i, _1, _2: i < n, compute, (i, a_flat, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop, swap_memory=swap_memory)
results_flat = [r.pack() for r in r_a]
n_static = elems_flat[0].get_shape().with_rank_at_least(1)[0]
for elem in elems_flat[1:]:
n_static.merge_with(elem.get_shape().with_rank_at_least(1)[0])
for r in results_flat:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
if varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
| apache-2.0 |
liosha2007/temporary-groupdocs-python-sdk | groupdocs/models/GetJobsDocumentsResponse.py | 1 | 1166 | #!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class GetJobsDocumentsResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'GetJobsDocumentsResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # GetJobsDocumentsResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
| apache-2.0 |
dbbhattacharya/kitsune | vendor/packages/translate-toolkit/translate/convert/symb2po.py | 7 | 3784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Symbian localisation files to Gettext PO localization files."""
from translate.storage import factory
from translate.storage.pypo import extractpoline
from translate.storage.symbian import *
def read_header_items(ps):
match = read_while(ps, header_item_or_end_re.match, lambda match: match is None)
if match.groupdict()['end_comment'] is not None:
return {}
results = {}
while match:
match_chunks = match.groupdict()
ps.read_line()
results[match_chunks['key']] = match_chunks['value']
match = header_item_re.match(ps.current_line)
match = read_while(ps, identity, lambda line: not line.startswith('*/'))
ps.read_line()
return results
def parse(ps):
header = read_header_items(ps)
units = []
try:
while True:
eat_whitespace(ps)
skip_no_translate(ps)
match = string_entry_re.match(ps.current_line)
if match is not None:
units.append((match.groupdict()['id'], extractpoline(match.groupdict()['str'])))
ps.read_line()
except StopIteration:
pass
return header, units
def read_symbian(f):
lines = list(f)
charset = read_charset(lines)
return parse(ParseState(iter(lines), charset))
def get_template_dict(template_file):
if template_file is not None:
template_header, template_units = read_symbian(template_file)
return template_header, dict(template_units)
else:
return {}, {}
def build_output(units, template_header, template_dict):
output_store = factory.classes['po']()
ignore = set(['r_string_languagegroup_name'])
header_entries = {
'Last-Translator': template_header.get('Author', ''),
'Language-Team': template_dict.get('r_string_languagegroup_name', ''),
'Content-Transfer-Encoding': '8bit',
'Content-Type': 'text/plain; charset=UTF-8',
}
output_store.updateheader(add=True, **header_entries)
for id, source in units:
if id in ignore:
continue
unit = output_store.UnitClass(source)
unit.target = template_dict.get(id, '')
unit.addlocation(id)
output_store.addunit(unit)
return output_store
def convert_symbian(input_file, output_file, template_file, pot=False, duplicatestyle="msgctxt"):
header, units = read_symbian(input_file)
template_header, template_dict = get_template_dict(template_file)
output_store = build_output(units, template_header, template_dict)
if output_store.isempty():
return 0
else:
output_file.write(str(output_store))
return 1
def main(argv=None):
from translate.convert import convert
formats = {"r01": ("po", convert_symbian)}
parser = convert.ConvertOptionParser(formats, usetemplates=True, usepots=True, description=__doc__)
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.run(argv)
if __name__ == '__main__':
main()
| bsd-3-clause |
krinart/AutobahnPython | autobahn/autobahn/wamp/protocol.py | 4 | 42459 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
import inspect
import six
from autobahn.wamp.interfaces import ISession, \
IPublication, \
IPublisher, \
ISubscription, \
ISubscriber, \
ICaller, \
IRegistration, \
ICallee, \
ITransportHandler
from autobahn import util
from autobahn import wamp
from autobahn.wamp import uri
from autobahn.wamp import message
from autobahn.wamp import types
from autobahn.wamp import role
from autobahn.wamp import exception
from autobahn.wamp.exception import ProtocolError, SessionNotReady
from autobahn.wamp.types import SessionDetails
class Endpoint:
"""
"""
def __init__(self, obj, fn, procedure, options = None):
self.obj = obj
self.fn = fn
self.procedure = procedure
self.options = options
class Handler:
"""
"""
def __init__(self, obj, fn, topic, details_arg = None):
self.obj = obj
self.fn = fn
self.topic = topic
self.details_arg = details_arg
class Publication:
"""
Object representing a publication.
This class implements :class:`autobahn.wamp.interfaces.IPublication`.
"""
def __init__(self, publicationId):
self.id = publicationId
IPublication.register(Publication)
class Subscription:
"""
Object representing a subscription.
This class implements :class:`autobahn.wamp.interfaces.ISubscription`.
"""
def __init__(self, session, subscriptionId):
self._session = session
self.active = True
self.id = subscriptionId
def unsubscribe(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISubscription.unsubscribe`
"""
return self._session._unsubscribe(self)
ISubscription.register(Subscription)
class Registration:
"""
Object representing a registration.
This class implements :class:`autobahn.wamp.interfaces.IRegistration`.
"""
def __init__(self, session, registrationId):
self._session = session
self.active = True
self.id = registrationId
def unregister(self):
"""
Implements :func:`autobahn.wamp.interfaces.IRegistration.unregister`
"""
return self._session._unregister(self)
IRegistration.register(Registration)
class BaseSession:
"""
WAMP session base class.
This class implements:
* :class:`autobahn.wamp.interfaces.ISession`
"""
def __init__(self):
"""
Ctor.
"""
self.debug = False
self._ecls_to_uri_pat = {}
self._uri_to_ecls = {}
def onConnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onConnect`
"""
def onJoin(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
def onLeave(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
def onDisconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onDisconnect`
"""
def define(self, exception, error = None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.define`
"""
if error is None:
assert(hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = exception._wampuris
self._uri_to_ecls[exception._wampuris[0].uri()] = exception
else:
assert(not hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = [uri.Pattern(six.u(error), uri.Pattern.URI_TARGET_HANDLER)]
self._uri_to_ecls[six.u(error)] = exception
def _message_from_exception(self, request_type, request, exc):
"""
Create a WAMP error message from an exception.
:param request: The request ID this WAMP error message is for.
:type request: int
:param exc: The exception.
:type exc: Instance of :class:`Exception` or subclass thereof.
"""
if isinstance(exc, exception.ApplicationError):
msg = message.Error(request_type, request, six.u(exc.error), args = list(exc.args), kwargs = exc.kwargs)
else:
if exc.__class__ in self._ecls_to_uri_pat:
error = self._ecls_to_uri_pat[exc.__class__][0]._uri
else:
error = u"wamp.error.runtime_error"
if hasattr(exc, 'args'):
if hasattr(exc, 'kwargs'):
msg = message.Error(request_type, request, error, args = exc.args, kwargs = exc.kwargs)
else:
msg = message.Error(request_type, request, error, args = exc.args)
else:
msg = message.Error(request_type, request, error)
return msg
def _exception_from_message(self, msg):
"""
Create a user (or generic) exception from a WAMP error message.
:param msg: A WAMP error message.
:type msg: Instance of :class:`autobahn.wamp.message.Error`
"""
# FIXME:
# 1. map to ecls based on error URI wildcard/prefix
# 2. extract additional args/kwargs from error URI
exc = None
if msg.error in self._uri_to_ecls:
ecls = self._uri_to_ecls[msg.error]
try:
## the following might fail, eg. TypeError when
## signature of exception constructor is incompatible
## with args/kwargs or when the exception constructor raises
if msg.kwargs:
if msg.args:
exc = ecls(*msg.args, **msg.kwargs)
else:
exc = ecls(**msg.kwargs)
else:
if msg.args:
exc = ecls(*msg.args)
else:
exc = ecls()
except Exception as e:
## FIXME: log e
pass
if not exc:
## the following ctor never fails ..
if msg.kwargs:
if msg.args:
exc = exception.ApplicationError(msg.error, *msg.args, **msg.kwargs)
else:
exc = exception.ApplicationError(msg.error, **msg.kwargs)
else:
if msg.args:
exc = exception.ApplicationError(msg.error, *msg.args)
else:
exc = exception.ApplicationError(msg.error)
return exc
ISession.register(BaseSession)
class ApplicationSession(BaseSession):
"""
WAMP endpoint session.
This class implements:
* :class:`autobahn.wamp.interfaces.IPublisher`
* :class:`autobahn.wamp.interfaces.ISubscriber`
* :class:`autobahn.wamp.interfaces.ICaller`
* :class:`autobahn.wamp.interfaces.ICallee`
* :class:`autobahn.wamp.interfaces.ITransportHandler`
"""
def __init__(self, config = types.ComponentConfig(u"anonymous")):
"""
Constructor.
"""
BaseSession.__init__(self)
self.config = config
self._transport = None
self._session_id = None
self._realm = None
self._session_id = None
self._goodbye_sent = False
self._transport_is_closing = False
## outstanding requests
self._publish_reqs = {}
self._subscribe_reqs = {}
self._unsubscribe_reqs = {}
self._call_reqs = {}
self._register_reqs = {}
self._unregister_reqs = {}
## subscriptions in place
self._subscriptions = {}
## registrations in place
self._registrations = {}
## incoming invocations
self._invocations = {}
self.debug_app = False
def onOpen(self, transport):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onOpen`
"""
self._transport = transport
self.onConnect()
def onConnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onConnect`
"""
self.join(self.config.realm)
def join(self, realm):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.join`
"""
if six.PY2 and type(realm) == str:
realm = six.u(realm)
if self._session_id:
raise Exception("already joined")
self._goodbye_sent = False
roles = [
role.RolePublisherFeatures(),
role.RoleSubscriberFeatures(),
role.RoleCallerFeatures(),
role.RoleCalleeFeatures()
]
msg = message.Hello(realm, roles)
self._realm = realm
self._transport.send(msg)
def disconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.disconnect`
"""
if self._transport:
self._transport.close()
else:
raise Exception("transport disconnected")
def onMessage(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onMessage`
"""
if self._session_id is None:
## the first message MUST be WELCOME
if isinstance(msg, message.Welcome):
self._session_id = msg.session
details = SessionDetails(self._realm, self._session_id, msg.authid, msg.authrole, msg.authmethod)
self._as_future(self.onJoin, details)
#self.onJoin(details)
else:
raise ProtocolError("Received {} message, and session is not yet established".format(msg.__class__))
else:
if isinstance(msg, message.Goodbye):
if not self._goodbye_sent:
## the peer wants to close: send GOODBYE reply
reply = message.Goodbye()
self._transport.send(reply)
self._session_id = None
## fire callback and close the transport
self.onLeave(types.CloseDetails(msg.reason, msg.message))
## consumer messages
##
elif isinstance(msg, message.Event):
if msg.subscription in self._subscriptions:
handler = self._subscriptions[msg.subscription]
if handler.details_arg:
if not msg.kwargs:
msg.kwargs = {}
msg.kwargs[handler.details_arg] = types.EventDetails(publication = msg.publication, publisher = msg.publisher)
try:
if handler.obj:
if msg.kwargs:
if msg.args:
handler.fn(handler.obj, *msg.args, **msg.kwargs)
else:
handler.fn(handler.obj, **msg.kwargs)
else:
if msg.args:
handler.fn(handler.obj, *msg.args)
else:
handler.fn(handler.obj)
else:
if msg.kwargs:
if msg.args:
handler.fn(*msg.args, **msg.kwargs)
else:
handler.fn(**msg.kwargs)
else:
if msg.args:
handler.fn(*msg.args)
else:
handler.fn()
except Exception as e:
if self.debug_app:
print("Failure while firing event handler {} subscribed under '{}' ({}):".format(handler.fn, handler.topic, msg.subscription))
print(err)
else:
raise ProtocolError("EVENT received for non-subscribed subscription ID {}".format(msg.subscription))
elif isinstance(msg, message.Published):
if msg.request in self._publish_reqs:
d, opts = self._publish_reqs.pop(msg.request)
p = Publication(msg.publication)
self._resolve_future(d, p)
else:
raise ProtocolError("PUBLISHED received for non-pending request ID {}".format(msg.request))
elif isinstance(msg, message.Subscribed):
if msg.request in self._subscribe_reqs:
d, obj, fn, topic, options = self._subscribe_reqs.pop(msg.request)
if options:
self._subscriptions[msg.subscription] = Handler(obj, fn, topic, options.details_arg)
else:
self._subscriptions[msg.subscription] = Handler(obj, fn, topic)
s = Subscription(self, msg.subscription)
self._resolve_future(d, s)
else:
raise ProtocolError("SUBSCRIBED received for non-pending request ID {}".format(msg.request))
elif isinstance(msg, message.Unsubscribed):
if msg.request in self._unsubscribe_reqs:
d, subscription = self._unsubscribe_reqs.pop(msg.request)
if subscription.id in self._subscriptions:
del self._subscriptions[subscription.id]
subscription.active = False
self._resolve_future(d, None)
else:
raise ProtocolError("UNSUBSCRIBED received for non-pending request ID {}".format(msg.request))
elif isinstance(msg, message.Result):
if msg.request in self._call_reqs:
if msg.progress:
## progressive result
##
_, opts = self._call_reqs[msg.request]
if opts.onProgress:
try:
if msg.kwargs:
if msg.args:
opts.onProgress(*msg.args, **msg.kwargs)
else:
opts.onProgress(**msg.kwargs)
else:
if msg.args:
opts.onProgress(*msg.args)
else:
opts.onProgress()
except Exception as e:
## silently drop exceptions raised in progressive results handlers
if self.debug:
print("Exception raised in progressive results handler: {}".format(e))
else:
## silently ignore progressive results
pass
else:
## final result
##
d, opts = self._call_reqs.pop(msg.request)
if msg.kwargs:
if msg.args:
res = types.CallResult(*msg.args, **msg.kwargs)
else:
res = types.CallResult(**msg.kwargs)
self._resolve_future(d, res)
else:
if msg.args:
if len(msg.args) > 1:
res = types.CallResult(*msg.args)
self._resolve_future(d, res)
else:
self._resolve_future(d, msg.args[0])
else:
self._resolve_future(d, None)
else:
raise ProtocolError("RESULT received for non-pending request ID {}".format(msg.request))
elif isinstance(msg, message.Invocation):
if msg.request in self._invocations:
raise ProtocolError("INVOCATION received for request ID {} already invoked".format(msg.request))
else:
if msg.registration not in self._registrations:
raise ProtocolError("INVOCATION received for non-registered registration ID {}".format(msg.registration))
else:
endpoint = self._registrations[msg.registration]
if endpoint.options and endpoint.options.details_arg:
if not msg.kwargs:
msg.kwargs = {}
if msg.receive_progress:
def progress(*args, **kwargs):
progress_msg = message.Yield(msg.request, args = args, kwargs = kwargs, progress = True)
self._transport.send(progress_msg)
else:
progress = None
msg.kwargs[endpoint.options.details_arg] = types.CallDetails(progress, caller = msg.caller, authid = msg.authid, authrole = msg.authrole, authmethod = msg.authmethod)
if endpoint.obj:
if msg.kwargs:
if msg.args:
d = self._as_future(endpoint.fn, endpoint.obj, *msg.args, **msg.kwargs)
else:
d = self._as_future(endpoint.fn, endpoint.obj, **msg.kwargs)
else:
if msg.args:
d = self._as_future(endpoint.fn, endpoint.obj, *msg.args)
else:
d = self._as_future(endpoint.fn, endpoint.obj)
else:
if msg.kwargs:
if msg.args:
d = self._as_future(endpoint.fn, *msg.args, **msg.kwargs)
else:
d = self._as_future(endpoint.fn, **msg.kwargs)
else:
if msg.args:
d = self._as_future(endpoint.fn, *msg.args)
else:
d = self._as_future(endpoint.fn)
def success(res):
del self._invocations[msg.request]
if isinstance(res, types.CallResult):
reply = message.Yield(msg.request, args = res.results, kwargs = res.kwresults)
else:
reply = message.Yield(msg.request, args = [res])
self._transport.send(reply)
def error(err):
if self.debug_app:
print("Failure while invoking procedure {} registered under '{}' ({}):".format(endpoint.fn, endpoint.procedure, msg.registration))
print(err)
del self._invocations[msg.request]
if hasattr(err, 'value'):
exc = err.value
else:
exc = err
reply = self._message_from_exception(message.Invocation.MESSAGE_TYPE, msg.request, exc)
self._transport.send(reply)
self._invocations[msg.request] = d
self._add_future_callbacks(d, success, error)
elif isinstance(msg, message.Interrupt):
if msg.request not in self._invocations:
raise ProtocolError("INTERRUPT received for non-pending invocation {}".format(msg.request))
else:
try:
self._invocations[msg.request].cancel()
except Exception as e:
if self.debug:
print("could not cancel call {}".format(msg.request))
finally:
del self._invocations[msg.request]
elif isinstance(msg, message.Registered):
if msg.request in self._register_reqs:
d, obj, fn, procedure, options = self._register_reqs.pop(msg.request)
self._registrations[msg.registration] = Endpoint(obj, fn, procedure, options)
r = Registration(self, msg.registration)
self._resolve_future(d, r)
else:
raise ProtocolError("REGISTERED received for non-pending request ID {}".format(msg.request))
elif isinstance(msg, message.Unregistered):
if msg.request in self._unregister_reqs:
d, registration = self._unregister_reqs.pop(msg.request)
if registration.id in self._registrations:
del self._registrations[registration.id]
registration.active = False
self._resolve_future(d, None)
else:
raise ProtocolError("UNREGISTERED received for non-pending request ID {}".format(msg.request))
elif isinstance(msg, message.Error):
d = None
## ERROR reply to PUBLISH
##
if msg.request_type == message.Publish.MESSAGE_TYPE and msg.request in self._publish_reqs:
d = self._publish_reqs.pop(msg.request)[0]
## ERROR reply to SUBSCRIBE
##
elif msg.request_type == message.Subscribe.MESSAGE_TYPE and msg.request in self._subscribe_reqs:
d = self._subscribe_reqs.pop(msg.request)[0]
## ERROR reply to UNSUBSCRIBE
##
elif msg.request_type == message.Unsubscribe.MESSAGE_TYPE and msg.request in self._unsubscribe_reqs:
d = self._unsubscribe_reqs.pop(msg.request)[0]
## ERROR reply to REGISTER
##
elif msg.request_type == message.Register.MESSAGE_TYPE and msg.request in self._register_reqs:
d = self._register_reqs.pop(msg.request)[0]
## ERROR reply to UNREGISTER
##
elif msg.request_type == message.Unregister.MESSAGE_TYPE and msg.request in self._unregister_reqs:
d = self._unregister_reqs.pop(msg.request)[0]
## ERROR reply to CALL
##
elif msg.request_type == message.Call.MESSAGE_TYPE and msg.request in self._call_reqs:
d = self._call_reqs.pop(msg.request)[0]
if d:
self._reject_future(d, self._exception_from_message(msg))
else:
raise ProtocolError("WampAppSession.onMessage(): ERROR received for non-pending request_type {} and request ID {}".format(msg.request_type, msg.request))
elif isinstance(msg, message.Heartbeat):
pass ## FIXME
else:
raise ProtocolError("Unexpected message {}".format(msg.__class__))
def onClose(self, wasClean):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onClose`
"""
self._transport = None
if self._session_id:
## fire callback and close the transport
try:
self.onLeave(types.CloseDetails())
except Exception as e:
if self.debug:
print("exception raised in onLeave callback: {}".format(e))
self._session_id = None
self.onDisconnect()
def onJoin(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
def onLeave(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
self.disconnect()
def leave(self, reason = None, log_message = None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.leave`
"""
if not self._session_id:
raise Exception("not joined")
if not self._goodbye_sent:
if not reason:
reason = u"wamp.close.normal"
msg = wamp.message.Goodbye(reason = reason, message = log_message)
self._transport.send(msg)
self._goodbye_sent = True
else:
raise SessionNotReady("Already requested to close the session")
def publish(self, topic, *args, **kwargs):
"""
Implements :func:`autobahn.wamp.interfaces.IPublisher.publish`
"""
if six.PY2 and type(topic) == str:
topic = six.u(topic)
assert(type(topic) == six.text_type)
if not self._transport:
raise exception.TransportLost()
request = util.id()
if 'options' in kwargs and isinstance(kwargs['options'], types.PublishOptions):
opts = kwargs.pop('options')
msg = message.Publish(request, topic, args = args, kwargs = kwargs, **opts.options)
else:
opts = None
msg = message.Publish(request, topic, args = args, kwargs = kwargs)
if opts and opts.options['acknowledge'] == True:
d = self._create_future()
self._publish_reqs[request] = d, opts
self._transport.send(msg)
return d
else:
self._transport.send(msg)
return
def subscribe(self, handler, topic = None, options = None):
"""
Implements :func:`autobahn.wamp.interfaces.ISubscriber.subscribe`
"""
assert((callable(handler) and topic is not None) or hasattr(handler, '__class__'))
if topic and six.PY2 and type(topic) == str:
topic = six.u(topic)
assert(topic is None or type(topic) == six.text_type)
assert(options is None or isinstance(options, types.SubscribeOptions))
if not self._transport:
raise exception.TransportLost()
def _subscribe(obj, handler, topic, options):
request = util.id()
d = self._create_future()
self._subscribe_reqs[request] = (d, obj, handler, topic, options)
if options is not None:
msg = message.Subscribe(request, topic, **options.options)
else:
msg = message.Subscribe(request, topic)
self._transport.send(msg)
return d
if callable(handler):
## register a single handler
##
return _subscribe(None, handler, topic, options)
else:
## register all methods on an object
## decorated with "wamp.topic"
##
dl = []
test = lambda x: inspect.ismethod(x) or inspect.isfunction(x)
for k in inspect.getmembers(handler.__class__, test):
proc = k[1]
if "_wampuris" in proc.__dict__:
pat = proc.__dict__["_wampuris"][0]
if pat.is_handler():
uri = pat.uri()
dl.append(_subscribe(handler, proc, uri, options))
return self._gather_futures(dl, consume_exceptions = True)
def _unsubscribe(self, subscription):
"""
Called from :meth:`autobahn.wamp.protocol.Subscription.unsubscribe`
"""
assert(isinstance(subscription, Subscription))
assert(subscription.active)
assert(subscription.id in self._subscriptions)
if not self._transport:
raise exception.TransportLost()
request = util.id()
d = self._create_future()
self._unsubscribe_reqs[request] = (d, subscription)
msg = message.Unsubscribe(request, subscription.id)
self._transport.send(msg)
return d
def call(self, procedure, *args, **kwargs):
"""
Implements :func:`autobahn.wamp.interfaces.ICaller.call`
"""
if six.PY2 and type(procedure) == str:
procedure = six.u(procedure)
assert(isinstance(procedure, six.text_type))
if not self._transport:
raise exception.TransportLost()
request = util.id()
if 'options' in kwargs and isinstance(kwargs['options'], types.CallOptions):
opts = kwargs.pop('options')
msg = message.Call(request, procedure, args = args, kwargs = kwargs, **opts.options)
else:
opts = None
msg = message.Call(request, procedure, args = args, kwargs = kwargs)
## FIXME
#def canceller(_d):
# cancel_msg = message.Cancel(request)
# self._transport.send(cancel_msg)
#d = Deferred(canceller)
d = self._create_future()
self._call_reqs[request] = d, opts
self._transport.send(msg)
return d
def register(self, endpoint, procedure = None, options = None):
"""
Implements :func:`autobahn.wamp.interfaces.ICallee.register`
"""
assert((callable(endpoint) and procedure is not None) or hasattr(endpoint, '__class__'))
if procedure and six.PY2 and type(procedure) == str:
procedure = six.u(procedure)
assert(procedure is None or type(procedure) == six.text_type)
assert(options is None or isinstance(options, types.RegisterOptions))
if not self._transport:
raise exception.TransportLost()
def _register(obj, endpoint, procedure, options):
request = util.id()
d = self._create_future()
self._register_reqs[request] = (d, obj, endpoint, procedure, options)
if options is not None:
msg = message.Register(request, procedure, **options.options)
else:
msg = message.Register(request, procedure)
self._transport.send(msg)
return d
if callable(endpoint):
## register a single callable
##
return _register(None, endpoint, procedure, options)
else:
## register all methods on an object
## decorated with "wamp.procedure"
##
dl = []
test = lambda x: inspect.ismethod(x) or inspect.isfunction(x)
for k in inspect.getmembers(endpoint.__class__, test):
proc = k[1]
if "_wampuris" in proc.__dict__:
pat = proc.__dict__["_wampuris"][0]
if pat.is_endpoint():
uri = pat.uri()
dl.append(_register(endpoint, proc, uri, options))
return self._gather_futures(dl, consume_exceptions = True)
def _unregister(self, registration):
"""
Called from :meth:`autobahn.wamp.protocol.Registration.unregister`
"""
assert(isinstance(registration, Registration))
assert(registration.active)
assert(registration.id in self._registrations)
if not self._transport:
raise exception.TransportLost()
request = util.id()
d = self._create_future()
self._unregister_reqs[request] = (d, registration)
msg = message.Unregister(request, registration.id)
self._transport.send(msg)
return d
IPublisher.register(ApplicationSession)
ISubscriber.register(ApplicationSession)
ICaller.register(ApplicationSession)
#ICallee.register(ApplicationSession) ## FIXME: ".register" collides with the ABC "register" method
ITransportHandler.register(ApplicationSession)
class ApplicationSessionFactory:
"""
WAMP endpoint session factory.
"""
session = ApplicationSession
"""
WAMP application session class to be used in this factory.
"""
def __init__(self, config = types.ComponentConfig(u"anonymous")):
"""
Ctor.
:param config: The default component configuration.
:type config: instance of :class:`autobahn.wamp.types.ComponentConfig`
"""
self.config = config
def __call__(self):
"""
Creates a new WAMP application session.
:returns: -- An instance of the WAMP application session class as
given by `self.session`.
"""
session = self.session(self.config)
session.factory = self
return session
class RouterApplicationSession:
"""
Wraps an application session to run directly attached to a WAMP router (broker+dealer).
"""
def __init__(self, session, routerFactory):
"""
Wrap an application session and add it to the given broker and dealer.
:param session: Application session to wrap.
:type session: An instance that implements :class:`autobahn.wamp.interfaces.ISession`
:param routerFactory: The router factory to associate this session with.
:type routerFactory: An instance that implements :class:`autobahn.wamp.interfaces.IRouterFactory`
"""
## remember router we are wrapping the app session for
##
self._routerFactory = routerFactory
self._router = None
## remember wrapped app session
##
self._session = session
## set fake transport on session ("pass-through transport")
##
self._session._transport = self
self._session.onConnect()
def isOpen(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen`
"""
def close(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.close`
"""
if self._router:
self._router.detach(self._session)
def abort(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.abort`
"""
def send(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.send`
"""
if isinstance(msg, message.Hello):
self._router = self._routerFactory.get(msg.realm)
## fake session ID assignment (normally done in WAMP opening handshake)
self._session._session_id = util.id()
## add app session to router
self._router.attach(self._session)
## fake app session open
##
details = SessionDetails(self._session._realm, self._session._session_id)
self._session._as_future(self._session.onJoin, details)
#self._session.onJoin(details)
## app-to-router
##
elif isinstance(msg, message.Publish) or \
isinstance(msg, message.Subscribe) or \
isinstance(msg, message.Unsubscribe) or \
isinstance(msg, message.Call) or \
isinstance(msg, message.Yield) or \
isinstance(msg, message.Register) or \
isinstance(msg, message.Unregister) or \
isinstance(msg, message.Cancel) or \
(isinstance(msg, message.Error) and
msg.request_type == message.Invocation.MESSAGE_TYPE):
## deliver message to router
##
self._router.process(self._session, msg)
## router-to-app
##
elif isinstance(msg, message.Event) or \
isinstance(msg, message.Invocation) or \
isinstance(msg, message.Result) or \
isinstance(msg, message.Published) or \
isinstance(msg, message.Subscribed) or \
isinstance(msg, message.Unsubscribed) or \
isinstance(msg, message.Registered) or \
isinstance(msg, message.Unregistered) or \
(isinstance(msg, message.Error) and (
msg.request_type == message.Call.MESSAGE_TYPE or
msg.request_type == message.Cancel.MESSAGE_TYPE or
msg.request_type == message.Register.MESSAGE_TYPE or
msg.request_type == message.Unregister.MESSAGE_TYPE or
msg.request_type == message.Publish.MESSAGE_TYPE or
msg.request_type == message.Subscribe.MESSAGE_TYPE or
msg.request_type == message.Unsubscribe.MESSAGE_TYPE)):
## deliver message to app session
##
self._session.onMessage(msg)
else:
## should not arrive here
##
raise Exception("RouterApplicationSession.send: unhandled message {}".format(msg))
class RouterSession(BaseSession):
"""
WAMP router session.
This class implements:
* :class:`autobahn.wamp.interfaces.ITransportHandler`
"""
def __init__(self, routerFactory):
"""
Constructor.
"""
BaseSession.__init__(self)
self._transport = None
self._router_factory = routerFactory
self._router = None
self._realm = None
self._goodbye_sent = False
self._transport_is_closing = False
def onOpen(self, transport):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onOpen`
"""
self._transport = transport
self._realm = None
self._session_id = None
## session authentication information
##
self._authid = None
self._authrole = None
self._authmethod = None
def onHello(self, realm, details):
return types.Accept()
def onAuthenticate(self, signature, extra):
return types.Accept()
def onMessage(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onMessage`
"""
if self._session_id is None:
def welcome(realm, authid = None, authrole = None, authmethod = None):
self._session_id = util.id()
self._goodbye_sent = False
self._router = self._router_factory.get(realm)
if not self._router:
raise Exception("no such realm")
self._authid = authid
self._authrole = authrole
self._authmethod = authmethod
roles = self._router.attach(self)
msg = message.Welcome(self._session_id, roles, authid = authid, authrole = authrole, authmethod = authmethod)
self._transport.send(msg)
self.onJoin(SessionDetails(self._realm, self._session_id, self._authid, self._authrole, self._authmethod))
## the first message MUST be HELLO
if isinstance(msg, message.Hello):
self._realm = msg.realm
details = types.HelloDetails(msg.roles, msg.authmethods)
d = self._as_future(self.onHello, self._realm, details)
def success(res):
msg = None
if isinstance(res, types.Accept):
welcome(self._realm, res.authid, res.authrole, res.authmethod)
elif isinstance(res, types.Challenge):
msg = message.Challenge(res.method, res.extra)
elif isinstance(res, types.Deny):
msg = message.Abort(res.reason, res.message)
else:
pass
if msg:
self._transport.send(msg)
def failed(err):
print(err.value)
self._add_future_callbacks(d, success, failed)
elif isinstance(msg, message.Authenticate):
d = self._as_future(self.onAuthenticate, msg.signature, {})
def success(res):
msg = None
if isinstance(res, types.Accept):
welcome(self._realm, res.authid, res.authrole, res.authmethod)
elif isinstance(res, types.Deny):
msg = message.Abort(res.reason, res.message)
else:
pass
if msg:
self._transport.send(msg)
def failed(err):
print(err.value)
self._add_future_callbacks(d, success, failed)
else:
raise ProtocolError("Received {} message, and session is not yet established".format(msg.__class__))
else:
if isinstance(msg, message.Hello):
raise ProtocolError("HELLO message received, while session is already established")
elif isinstance(msg, message.Goodbye):
if not self._goodbye_sent:
## the peer wants to close: send GOODBYE reply
reply = message.Goodbye()
self._transport.send(reply)
## fire callback and close the transport
self.onLeave(types.CloseDetails(msg.reason, msg.message))
self._router.detach(self)
self._session_id = None
#self._transport.close()
elif isinstance(msg, message.Heartbeat):
pass ## FIXME
else:
self._router.process(self, msg)
def onClose(self, wasClean):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onClose`
"""
self._transport = None
if self._session_id:
## fire callback and close the transport
try:
self.onLeave(types.CloseDetails())
except Exception as e:
if self.debug:
print("exception raised in onLeave callback: {}".format(e))
self._router.detach(self)
self._session_id = None
self._authid = None
self._authrole = None
self._authmethod = None
def onJoin(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
def onLeave(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
def leave(self, reason = None, message = None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.leave`
"""
if not self._goodbye_sent:
msg = wamp.message.Goodbye(reason = reason, message = message)
self._transport.send(msg)
self._goodbye_sent = True
else:
raise SessionNotReady("Already requested to close the session")
ITransportHandler.register(RouterSession)
class RouterSessionFactory:
"""
WAMP router session factory.
"""
session = RouterSession
"""
WAMP router session class to be used in this factory.
"""
def __init__(self, routerFactory):
"""
Constructor.
"""
self._routerFactory = routerFactory
self._app_sessions = {}
def add(self, session):
"""
Adds a WAMP application session to run directly in this router.
:param: session: A WAMP application session.
:type session: A instance of a class that derives of :class:`autobahn.wamp.protocol.WampAppSession`
"""
#router = self._routerFactory.get(session.realm)
self._app_sessions[session] = RouterApplicationSession(session, self._routerFactory)
def remove(self, session):
"""
Removes a WAMP application session running directly in this router.
"""
if session in self._app_sessions:
self._app_sessions[session]._session.disconnect()
del self._app_sessions[session]
def __call__(self):
"""
Creates a new WAMP router session.
:returns: -- An instance of the WAMP router session class as
given by `self.session`.
"""
session = self.session(self._routerFactory)
session.factory = session
return session
| apache-2.0 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/test/test_coding.py | 121 | 1212 |
import test.test_support, unittest
import os
class CodingTest(unittest.TestCase):
def test_bad_coding(self):
module_name = 'bad_coding'
self.verify_bad_module(module_name)
def test_bad_coding2(self):
module_name = 'bad_coding2'
self.verify_bad_module(module_name)
def verify_bad_module(self, module_name):
self.assertRaises(SyntaxError, __import__, 'test.' + module_name)
path = os.path.dirname(__file__)
filename = os.path.join(path, module_name + '.py')
with open(filename) as fp:
text = fp.read()
self.assertRaises(SyntaxError, compile, text, filename, 'exec')
def test_error_from_string(self):
# See http://bugs.python.org/issue6289
input = u"# coding: ascii\n\N{SNOWMAN}".encode('utf-8')
with self.assertRaises(SyntaxError) as c:
compile(input, "<string>", "exec")
expected = "'ascii' codec can't decode byte 0xe2 in position 16: " \
"ordinal not in range(128)"
self.assertTrue(c.exception.args[0].startswith(expected))
def test_main():
test.test_support.run_unittest(CodingTest)
if __name__ == "__main__":
test_main()
| mit |
nickweinberg/werewolf-slackbot | test_fixtures.py | 1 | 2584 | import copy
def get_empty_game_state():
# hi there
# make mock game state.
# we'll have several fixtures
# and a basic one we can set up in each test.
return {'players':{},
'votes':{},
'STATUS': 'INACTIVE',
'ROUND': None
}
def all_vote_but_one_state():
return copy.deepcopy({
'players': {
'ab': {
'name': 'nick',
'DM': 'dm channel',
'role': 'v',
'side': 'v',
'status': 'alive'
},
'cd': {
'name': 'not_nick',
'dm': 'dm channel',
'role': 'w',
'side': 'w',
'status': 'alive'
},
'ef': {
'name': 'maksym',
'dm': 'dm channel',
'role': 'v',
'side': 'v',
'status': 'alive',
},
'gh': {
'name': 'who',
'dm': 'dm channel',
'role': 'v',
'side': 'v',
'status': 'alive'
}
},
'votes': {
'gh': 'cd',
'ef': 'cd',
'ab': 'cd'
},
'STATUS': 'RUNNING',
'ROUND': 'day'
})
def get_fake_game_state():
return copy.deepcopy({
'players': {
'ab': {
'name': 'nick',
'DM': 'dm channel',
'role': 'v',
'side': 'v',
'status': 'alive'
},
'cd': {
'name': 'not_nick',
'dm': 'dm channel',
'role': 'w',
'side': 'w',
'status': 'alive'
},
'ef': {
'name': 'maksym',
'dm': 'dm channel',
'role': 'v',
'side': 'v',
'status': 'alive',
},
'gh': {
'name': 'who',
'dm': 'dm channel',
'role': 'v',
'side': 'v',
'status': 'alive'
}
},
'votes': {},
'STATUS': 'RUNNING',
'ROUND': 'night'
})
| mit |
2014c2g4/c2g4 | wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/test/testmock/testmagicmethods.py | 737 | 12145 | import unittest
import inspect
import sys
from unittest.mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
m.__bool__ = lambda s: False
self.assertFalse(bool(m))
def test_comparison(self):
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
getattr(mock, '__bool__').return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegex(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
filip-be/ImageComparer | exiv2/exiv2/doc/templates/gen.py | 8 | 1538 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Settings
vardir = "."
date_format = "%d-%b-%Y"
# ----------------------------------------------------------------------
# functions
def usage():
print("""Usage: gen.py file.in [...]
Substitute placeholders in input files with content
""")
def gen_html(file):
"""Replace variables in the file with their content"""
text = open(file).read()
for var in vars:
vartext = open(vardir + "/" + var).read()
text = text.replace(var, vartext)
text = last_modified(text)
return text
def last_modified(text):
"""Substitute variable __last_modified__ with the current date"""
date = time.strftime(date_format, time.localtime())
text = text.replace("__last_modified__", date)
return text
# ----------------------------------------------------------------------
# main
import sys
import os
import re
import time
# Check command line arguments
if len(sys.argv) == 1:
usage()
sys.exit()
# The input files from the command line
input = sys.argv[1:]
# Get a list of all variables (files in the form __*__) from vardir
vars = os.listdir(vardir)
for i in range(len(vars)-1, -1, -1):
if re.match("^__.*__$", vars[i]): continue
del vars[i]
vars.sort()
# Substitute variables in all input files
print("Substituting variables {0}".format(vars))
for file in input:
print("Processing {0}...".format(file))
text = gen_html(file)
file = file.replace(".in", "")
open(file, 'w').write(text)
| gpl-3.0 |
marco-lancini/Showcase | django/contrib/localflavor/ch/forms.py | 55 | 3882 | """
Swiss-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
id_re = re.compile(r"^(?P<idnumber>\w{8})(?P<pos9>(\d{1}|<))(?P<checksum>\d{1})$")
phone_digits_re = re.compile(r'^0([1-9]{1})\d{8}$')
class CHZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, *args, **kwargs):
super(CHZipCodeField, self).__init__(r'^\d{4}$',
max_length=None, min_length=None, *args, **kwargs)
class CHPhoneNumberField(Field):
"""
Validate local Swiss phone number (not international ones)
The correct format is '0XX XXX XX XX'.
'0XX.XXX.XX.XX' and '0XXXXXXXXX' validate but are corrected to
'0XX XXX XX XX'.
"""
default_error_messages = {
'invalid': 'Phone numbers must be in 0XX XXX XX XX format.',
}
def clean(self, value):
super(CHPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\.|\s|/|-)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s %s %s %s' % (value[0:3], value[3:6], value[6:8], value[8:10])
raise ValidationError(self.error_messages['invalid'])
class CHStateSelect(Select):
"""
A Select widget that uses a list of CH states as its choices.
"""
def __init__(self, attrs=None):
from ch_states import STATE_CHOICES # relative import
super(CHStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class CHIdentityCardNumberField(Field):
"""
A Swiss identity card number.
Checks the following rules to determine whether the number is valid:
* Conforms to the X1234567<0 or 1234567890 format.
* Included checksums match calculated checksums
"""
default_error_messages = {
'invalid': _('Enter a valid Swiss identity or passport card number in X1234567<0 or 1234567890 format.'),
}
def has_valid_checksum(self, number):
given_number, given_checksum = number[:-1], number[-1]
new_number = given_number
calculated_checksum = 0
fragment = ""
parameter = 7
first = str(number[:1])
if first.isalpha():
num = ord(first.upper()) - 65
if num < 0 or num > 8:
return False
new_number = str(num) + new_number[1:]
new_number = new_number[:8] + '0'
if not new_number.isdigit():
return False
for i in range(len(new_number)):
fragment = int(new_number[i])*parameter
calculated_checksum += fragment
if parameter == 1:
parameter = 7
elif parameter == 3:
parameter = 1
elif parameter ==7:
parameter = 3
return str(calculated_checksum)[-1] == given_checksum
def clean(self, value):
super(CHIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
idnumber, pos9, checksum = match.groupdict()['idnumber'], match.groupdict()['pos9'], match.groupdict()['checksum']
if idnumber == '00000000' or \
idnumber == 'A0000000':
raise ValidationError(self.error_messages['invalid'])
all_digits = "%s%s%s" % (idnumber, pos9, checksum)
if not self.has_valid_checksum(all_digits):
raise ValidationError(self.error_messages['invalid'])
return u'%s%s%s' % (idnumber, pos9, checksum)
| mit |
manashmndl/kivy | kivy/uix/colorpicker.py | 20 | 15505 | '''
Color Picker
============
.. versionadded:: 1.7.0
.. warning::
This widget is experimental. Its use and API can change at any time until
this warning is removed.
The ColorPicker widget allows a user to select a color from a chromatic
wheel where pinch and zoom can be used to change the wheel's saturation.
Sliders and TextInputs are also provided for entering the RGBA/HSV/HEX values
directly.
Usage::
clr_picker = ColorPicker()
parent.add_widget(clr_picker)
# To monitor changes, we can bind to color property changes
def on_color(instance, value):
print "RGBA = ", str(value) # or instance.color
print "HSV = ", str(instance.hsv)
print "HEX = ", str(instance.hex_color)
clr_picker.bind(color=on_color)
'''
__all__ = ('ColorPicker', 'ColorWheel')
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.widget import Widget
from kivy.properties import (NumericProperty, BoundedNumericProperty,
ListProperty, ObjectProperty,
ReferenceListProperty, StringProperty,
AliasProperty)
from kivy.clock import Clock
from kivy.graphics import Mesh, InstructionGroup, Color
from kivy.utils import get_color_from_hex, get_hex_from_color
from kivy.logger import Logger
from math import cos, sin, pi, sqrt, atan
from colorsys import rgb_to_hsv, hsv_to_rgb
def distance(pt1, pt2):
return sqrt((pt1[0] - pt2[0]) ** 2. + (pt1[1] - pt2[1]) ** 2.)
def polar_to_rect(origin, r, theta):
return origin[0] + r * cos(theta), origin[1] + r * sin(theta)
def rect_to_polar(origin, x, y):
if x == origin[0]:
if y == origin[1]:
return (0, 0)
elif y > origin[1]:
return (y - origin[1], pi / 2.)
else:
return (origin[1] - y, 3 * pi / 2.)
t = atan(float((y - origin[1])) / (x - origin[0]))
if x - origin[0] < 0:
t += pi
if t < 0:
t += 2 * pi
return (distance((x, y), origin), t)
class ColorWheel(Widget):
'''Chromatic wheel for the ColorPicker.
.. versionchanged:: 1.7.1
`font_size`, `font_name` and `foreground_color` have been removed. The
sizing is now the same as others widget, based on 'sp'. Orientation is
also automatically determined according to the width/height ratio.
'''
r = BoundedNumericProperty(0, min=0, max=1)
'''The Red value of the color currently selected.
:attr:`r` is a :class:`~kivy.properties.BoundedNumericProperty` and
can be a value from 0 to 1. It defaults to 0.
'''
g = BoundedNumericProperty(0, min=0, max=1)
'''The Green value of the color currently selected.
:attr:`g` is a :class:`~kivy.properties.BoundedNumericProperty`
and can be a value from 0 to 1.
'''
b = BoundedNumericProperty(0, min=0, max=1)
'''The Blue value of the color currently selected.
:attr:`b` is a :class:`~kivy.properties.BoundedNumericProperty` and
can be a value from 0 to 1.
'''
a = BoundedNumericProperty(0, min=0, max=1)
'''The Alpha value of the color currently selected.
:attr:`a` is a :class:`~kivy.properties.BoundedNumericProperty` and
can be a value from 0 to 1.
'''
color = ReferenceListProperty(r, g, b, a)
'''The holds the color currently selected.
:attr:`color` is a :class:`~kivy.properties.ReferenceListProperty` and
contains a list of `r`, `g`, `b`, `a` values.
'''
_origin = ListProperty((100, 100))
_radius = NumericProperty(100)
_piece_divisions = NumericProperty(10)
_pieces_of_pie = NumericProperty(16)
_inertia_slowdown = 1.25
_inertia_cutoff = .25
_num_touches = 0
_pinch_flag = False
_hsv = ListProperty([1, 1, 1, 0])
def __init__(self, **kwargs):
super(ColorWheel, self).__init__(**kwargs)
pdv = self._piece_divisions
self.sv_s = [(float(x) / pdv, 1) for x in range(pdv)] + [
(1, float(y) / pdv) for y in reversed(range(pdv))]
def on__origin(self, instance, value):
self.init_wheel(None)
def on__radius(self, instance, value):
self.init_wheel(None)
def init_wheel(self, dt):
# initialize list to hold all meshes
self.canvas.clear()
self.arcs = []
self.sv_idx = 0
pdv = self._piece_divisions
ppie = self._pieces_of_pie
for r in range(pdv):
for t in range(ppie):
self.arcs.append(
_ColorArc(
self._radius * (float(r) / float(pdv)),
self._radius * (float(r + 1) / float(pdv)),
2 * pi * (float(t) / float(ppie)),
2 * pi * (float(t + 1) / float(ppie)),
origin=self._origin,
color=(float(t) / ppie,
self.sv_s[self.sv_idx + r][0],
self.sv_s[self.sv_idx + r][1],
1)))
self.canvas.add(self.arcs[-1])
def recolor_wheel(self):
ppie = self._pieces_of_pie
for idx, segment in enumerate(self.arcs):
segment.change_color(
sv=self.sv_s[int(self.sv_idx + idx / ppie)])
def change_alpha(self, val):
for idx, segment in enumerate(self.arcs):
segment.change_color(a=val)
def inertial_incr_sv_idx(self, dt):
# if its already zoomed all the way out, cancel the inertial zoom
if self.sv_idx == len(self.sv_s) - self._piece_divisions:
return False
self.sv_idx += 1
self.recolor_wheel()
if dt * self._inertia_slowdown > self._inertia_cutoff:
return False
else:
Clock.schedule_once(self.inertial_incr_sv_idx,
dt * self._inertia_slowdown)
def inertial_decr_sv_idx(self, dt):
# if its already zoomed all the way in, cancel the inertial zoom
if self.sv_idx == 0:
return False
self.sv_idx -= 1
self.recolor_wheel()
if dt * self._inertia_slowdown > self._inertia_cutoff:
return False
else:
Clock.schedule_once(self.inertial_decr_sv_idx,
dt * self._inertia_slowdown)
def on_touch_down(self, touch):
r = self._get_touch_r(touch.pos)
if r > self._radius:
return False
# code is still set up to allow pinch to zoom, but this is
# disabled for now since it was fiddly with small wheels.
# Comment out these lines and adjust on_touch_move to reenable
# this.
if self._num_touches != 0:
return False
touch.grab(self)
self._num_touches += 1
touch.ud['anchor_r'] = r
touch.ud['orig_sv_idx'] = self.sv_idx
touch.ud['orig_time'] = Clock.get_time()
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
r = self._get_touch_r(touch.pos)
goal_sv_idx = (touch.ud['orig_sv_idx']
- int((r - touch.ud['anchor_r'])
/ (float(self._radius) / self._piece_divisions)))
if (
goal_sv_idx != self.sv_idx and
goal_sv_idx >= 0 and
goal_sv_idx <= len(self.sv_s) - self._piece_divisions
):
# this is a pinch to zoom
self._pinch_flag = True
self.sv_idx = goal_sv_idx
self.recolor_wheel()
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
self._num_touches -= 1
if self._pinch_flag:
if self._num_touches == 0:
# user was pinching, and now both fingers are up. Return
# to normal
if self.sv_idx > touch.ud['orig_sv_idx']:
Clock.schedule_once(
self.inertial_incr_sv_idx,
(Clock.get_time() - touch.ud['orig_time'])
/ (self.sv_idx - touch.ud['orig_sv_idx']))
if self.sv_idx < touch.ud['orig_sv_idx']:
Clock.schedule_once(
self.inertial_decr_sv_idx,
(Clock.get_time() - touch.ud['orig_time'])
/ (self.sv_idx - touch.ud['orig_sv_idx']))
self._pinch_flag = False
return
else:
# user was pinching, and at least one finger remains. We
# don't want to treat the remaining fingers as touches
return
else:
r, theta = rect_to_polar(self._origin, *touch.pos)
# if touch up is outside the wheel, ignore
if r >= self._radius:
return
# compute which ColorArc is being touched (they aren't
# widgets so we don't get collide_point) and set
# _hsv based on the selected ColorArc
piece = int((theta / (2 * pi)) * self._pieces_of_pie)
division = int((r / self._radius) * self._piece_divisions)
self._hsv = \
self.arcs[self._pieces_of_pie * division + piece].color
def on__hsv(self, instance, value):
c_hsv = Color(*value, mode='hsv')
self.r = c_hsv.r
self.g = c_hsv.g
self.b = c_hsv.b
self.a = c_hsv.a
self.rgba = (self.r, self.g, self.b, self.a)
def _get_touch_r(self, pos):
return distance(pos, self._origin)
class _ColorArc(InstructionGroup):
def __init__(self, r_min, r_max, theta_min, theta_max,
color=(0, 0, 1, 1), origin = (0, 0), **kwargs):
super(_ColorArc, self).__init__(**kwargs)
self.origin = origin
self.r_min = r_min
self.r_max = r_max
self.theta_min = theta_min
self.theta_max = theta_max
self.color = color
self.color_instr = Color(*color, mode='hsv')
self.add(self.color_instr)
self.mesh = self.get_mesh()
self.add(self.mesh)
def __str__(self):
return "r_min: %s r_max: %s theta_min: %s theta_max: %s color: %s" % (
self.r_min, self.r_max, self.theta_min, self.theta_max, self.color
)
def get_mesh(self):
v = []
# first calculate the distance between endpoints of the inner
# arc, so we know how many steps to use when calculating
# vertices
end_point_inner = polar_to_rect(
self.origin, self.r_min, self.theta_max)
d_inner = d_outer = 3.
theta_step_inner = (self.theta_max - self.theta_min) / d_inner
end_point_outer = polar_to_rect(
self.origin, self.r_max, self.theta_max)
if self.r_min == 0:
theta_step_outer = (self.theta_max - self.theta_min) / d_outer
for x in range(int(d_outer)):
v += (polar_to_rect(self.origin, 0, 0) * 2)
v += (polar_to_rect(
self.origin, self.r_max,
self.theta_min + x * theta_step_outer) * 2)
else:
for x in range(int(d_inner + 2)):
v += (polar_to_rect(
self.origin, self.r_min - 1,
self.theta_min + x * theta_step_inner) * 2)
v += (polar_to_rect(
self.origin, self.r_max + 1,
self.theta_min + x * theta_step_inner) * 2)
v += (end_point_inner * 2)
v += (end_point_outer * 2)
return Mesh(vertices=v, indices=range(int(len(v) / 4)),
mode='triangle_strip')
def change_color(self, color=None, color_delta=None, sv=None, a=None):
self.remove(self.color_instr)
if color is not None:
self.color = color
elif color_delta is not None:
self.color = [self.color[i] + color_delta[i] for i in range(4)]
elif sv is not None:
self.color = (self.color[0], sv[0], sv[1], self.color[3])
elif a is not None:
self.color = (self.color[0], self.color[1], self.color[2], a)
self.color_instr = Color(*self.color, mode='hsv')
self.insert(0, self.color_instr)
class ColorPicker(RelativeLayout):
'''
See module documentation.
'''
font_name = StringProperty('data/fonts/RobotoMono-Regular.ttf')
'''Specifies the font used on the ColorPicker.
:attr:`font_name` is a :class:`~kivy.properties.StringProperty` and
defaults to 'data/fonts/RobotoMono-Regular.ttf'.
'''
color = ListProperty((1, 1, 1, 1))
'''The :attr:`color` holds the color currently selected in rgba format.
:attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to
(1, 1, 1, 1).
'''
hsv = ListProperty((1, 1, 1))
'''The :attr:`hsv` holds the color currently selected in hsv format.
:attr:`hsv` is a :class:`~kivy.properties.ListProperty` and defaults to
(1, 1, 1).
'''
def _get_hex(self):
return get_hex_from_color(self.color)
def _set_hex(self, value):
self.color = get_color_from_hex(value)[:4]
hex_color = AliasProperty(_get_hex, _set_hex, bind=('color', ))
'''The :attr:`hex_color` holds the currently selected color in hex.
:attr:`hex_color` is an :class:`~kivy.properties.AliasProperty` and
defaults to `#ffffffff`.
'''
wheel = ObjectProperty(None)
'''The :attr:`wheel` holds the color wheel.
:attr:`wheel` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
# now used only internally.
foreground_color = ListProperty((1, 1, 1, 1))
def on_color(self, instance, value):
if not self._updating_clr:
self._updating_clr = True
self.hsv = rgb_to_hsv(*value[:3])
self._updating_clr = False
def on_hsv(self, instance, value):
if not self._updating_clr:
self._updating_clr = True
self.color[:3] = hsv_to_rgb(*value)
self._updating_clr = False
def _trigger_update_clr(self, mode, clr_idx, text):
self._upd_clr_list = mode, clr_idx, text
Clock.unschedule(self._update_clr)
Clock.schedule_once(self._update_clr)
def _update_clr(self, dt):
mode, clr_idx, text = self._upd_clr_list
try:
text = min(255, max(0, float(text)))
if mode == 'rgb':
self.color[clr_idx] = float(text) / 255.
else:
self.hsv[clr_idx] = float(text) / 255.
except ValueError:
Logger.warning('ColorPicker: invalid value : {}'.format(text))
def _update_hex(self, dt):
if len(self._upd_hex_list) != 9:
return
self.hex_color = self._upd_hex_list
def _trigger_update_hex(self, text):
self._upd_hex_list = text
Clock.unschedule(self._update_hex)
Clock.schedule_once(self._update_hex)
def __init__(self, **kwargs):
self._updating_clr = False
super(ColorPicker, self).__init__(**kwargs)
if __name__ in ('__android__', '__main__'):
from kivy.app import App
class ColorPickerApp(App):
def build(self):
cp = ColorPicker(pos_hint={'center_x': .5, 'center_y': .5},
size_hint=(1, 1))
return cp
ColorPickerApp().run()
| mit |
SamaraCardoso27/eMakeup | backend/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py | 164 | 27956 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter,
cached_property, get_cache_base, read_exports)
logger = logging.getLogger(__name__)
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
ABI = 'none'
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
return '%s-%s%s-%s-%s-%s.whl' % (self.name, self.version, buildver,
pyver, abi, arch)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
metadata_filename = posixpath.join(info_dir, METADATA_FILENAME)
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % METADATA_FILENAME)
return result
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
message = message_from_file(wf)
result = dict(message)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
data = b'#!python' + data[m.end():]
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = b'#!python' + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
records = []
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
# Use native string to avoid issues on 2.x: see Python #20140.
result = os.path.join(get_cache_base(), str('dylib-cache'), sys.version[:3])
if not os.path.isdir(result):
os.makedirs(result)
return result
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache_base = self._get_dylib_cache()
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not is_compatible(self):
msg = 'Wheel %s not mountable in this Python.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
# Most specific - our Python version, ABI and arch
for abi in abis:
result.append((''.join((IMP_PREFIX, versions[0])), abi, ARCH))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return result
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| mit |
FeodorFitsner/coala | coalib/processes/BearRunning.py | 2 | 24835 | import queue
import traceback
from collections import Iterable
from coalib.bears.BEAR_KIND import BEAR_KIND
from coalib.bears.GlobalBear import GlobalBear
from coalib.bears.LocalBear import LocalBear
from coalib.misc.Constants import Constants
from coalib.processes.CONTROL_ELEMENT import CONTROL_ELEMENT
from coalib.processes.communication.LogMessage import LogMessage, LOG_LEVEL
from coalib.misc.i18n import _
from coalib.results.Result import Result
def send_msg(message_queue, timeout, log_level, *args, delimiter=' ', end=''):
"""
Puts message into message queue for a LogPrinter to present to the user.
:param message_queue: The queue to put the message into and which the
LogPrinter reads.
:param timeout: The queue blocks at most timeout seconds for a free
slot to execute the put operation on. After the
timeout it returns queue Full exception.
:param log_level: The log_level i.e Error,Debug or Warning.It is sent
to the LogPrinter depending on the message.
:param args: This includes the elements of the message.
:param delimiter: It is the value placed between each arg. By default
it is a ' '.
:param end: It is the value placed at the end of the message.
"""
output = str(delimiter).join(str(arg) for arg in args) + str(end)
message_queue.put(LogMessage(log_level, output),
timeout=timeout)
def validate_results(message_queue, timeout, result_list, name, args, kwargs):
"""
Validates if the result_list passed to it contains valid set of results.
That is the result_list must itself be a list and contain objects of the
instance of Result object. If any irregularity is found a message is put in
the message_queue to present the irregularity to the user. Each result_list
belongs to an execution of a bear.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed in the
Log.
:param timeout: The queue blocks at most timeout seconds for a free
slot to execute the put operation on. After the
timeout it returns queue Full exception.
:param result_list: The list of results to validate.
:param name: The name of the bear executed.
:param args: The args with which the bear was executed.
:param kwargs: The kwargs with which the bear was executed.
:return: Returns None if the result_list is invalid. Else it
returns the result_list itself.
"""
if result_list is None:
return None
if not isinstance(result_list, Iterable):
send_msg(message_queue,
timeout,
LOG_LEVEL.ERROR,
_("The results from the bear {bear} couldn't be processed "
"with arguments {arglist}, {kwarglist}.")
.format(bear=name, arglist=args, kwarglist=kwargs))
send_msg(message_queue,
timeout,
LOG_LEVEL.DEBUG,
_("The return value of the {bear} is an instance of {ret}"
" but should be an instance of list.")
.format(bear=name, ret=result_list.__class__))
return None
# If it's already a list it won't change it
result_list = list(result_list)
for result in result_list:
if not isinstance(result, Result):
send_msg(message_queue,
timeout,
LOG_LEVEL.ERROR,
_("The results from the bear {bear} could only be "
"partially processed with arguments {arglist}, "
"{kwarglist}")
.format(bear=name, arglist=args, kwarglist=kwargs))
send_msg(message_queue,
timeout,
LOG_LEVEL.DEBUG,
_("One of the results in the list for the bear {bear} is "
"an instance of {ret} but it should be an instance of "
"Result")
.format(bear=name, ret=result.__class__))
result_list.remove(result)
return result_list
def run_bear(message_queue, timeout, bear_instance, *args, **kwargs):
"""
This method is responsible for executing the instance of a bear. It also
reports or logs errors if any occur during the execution of that bear
instance.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed in the
Log.
:param timeout: The queue blocks at most timeout seconds for a free
slot to execute the put operation on. After the
timeout it returns queue Full exception.
:param bear_instance: The instance of the bear to be executed.
:param args: The arguments that are to be passed to the bear.
:param kwargs: The keyword arguments that are to be passed to the
bear.
:return: Returns a valid list of objects of the type Result
if the bear executed succesfully. None otherwise.
"""
if kwargs.get("dependency_results", True) is None:
del kwargs["dependency_results"]
name = bear_instance.__class__.__name__
try:
result_list = bear_instance.execute(*args,
**kwargs)
except:
send_msg(message_queue,
timeout,
LOG_LEVEL.ERROR,
_("The bear {bear} failed to run with the arguments "
"{arglist}, {kwarglist}. Skipping bear...")
.format(bear=name, arglist=args, kwarglist=kwargs))
send_msg(message_queue,
timeout,
LOG_LEVEL.DEBUG,
_("Traceback for error in bear {bear}:")
.format(bear=name),
traceback.format_exc(),
delimiter="\n")
return None
return validate_results(message_queue,
timeout,
result_list,
name,
args,
kwargs)
def get_local_dependency_results(local_result_list, bear_instance):
"""
This method gets all the results originating from the dependencies of a
bear_instance. Each bear_instance may or may not have dependencies.
:param local_result_list: The list of results out of which the dependency
results are picked.
:param bear_instance: The instance of a local bear to get the
dependencies from.
:return: Return none if their are no dependencies for the
bear. Else return a dictionary containing
dependency results.
"""
deps = bear_instance.get_dependencies()
if deps == []:
return None
dependency_results = {}
dep_strings = []
for dep in deps:
dep_strings.append(dep.__name__)
for result in local_result_list:
if result.origin in dep_strings:
results = dependency_results.get(result.origin, [])
results.append(result)
dependency_results[result.origin] = results
return dependency_results
def run_local_bear(message_queue,
timeout,
local_result_list,
file_dict,
bear_instance,
filename):
"""
Runs an instance of a local bear. Checks if bear_instance is of type
LocalBear and then passes it to the run_bear to execute.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed in
the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param local_result_list: Its a list that stores the results of all local
bears.
:param file_dict: Dictionary containing contents of file.
:param bear_instance: Instance of LocalBear the run.
:param filename: Name of the file to run it on.
:return: Returns a list of results generated by the passed
bear_instance.
"""
if (not isinstance(bear_instance, LocalBear) or
bear_instance.kind() != BEAR_KIND.LOCAL):
send_msg(message_queue,
timeout,
LOG_LEVEL.WARNING,
_("A given local bear ({}) is not valid. Leaving "
"it out...").format(bear_instance.__class__.__name__),
Constants.THIS_IS_A_BUG)
return None
kwargs = {"dependency_results":
get_local_dependency_results(local_result_list,
bear_instance)}
return run_bear(message_queue,
timeout,
bear_instance,
filename,
file_dict[filename],
**kwargs)
def run_global_bear(message_queue,
timeout,
global_bear_instance,
dependency_results):
"""
Runs an instance of a global bear. Checks if bear_instance is of type
GlobalBear and then passes it to the run_bear to execute.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed
in the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on.
After the timeout it returns queue Full
exception.
:param global_bear_instance: Instance of GlobalBear to run.
:param dependency_results: The results of all the bears on which the
instance of the passed bear to be run depends
on.
:return: Returns a list of results generated by the
passed bear_instance.
"""
if (not isinstance(global_bear_instance, GlobalBear)
or global_bear_instance.kind() != BEAR_KIND.GLOBAL):
send_msg(message_queue,
timeout,
LOG_LEVEL.WARNING,
_("A given global bear ({}) is not valid. Leaving it "
"out...")
.format(global_bear_instance.__class__.__name__),
Constants.THIS_IS_A_BUG)
return None
kwargs = {"dependency_results": dependency_results}
return run_bear(message_queue,
timeout,
global_bear_instance,
**kwargs)
def run_local_bears_on_file(message_queue,
timeout,
file_dict,
local_bear_list,
local_result_dict,
control_queue,
filename):
"""
This method runs a list of local bears on one file.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed
in the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param file_dict: Dictionary that contains contents of files.
:param local_bear_list: List of local bears to run on file.
:param local_result_dict: A Manager.dict that will be used to store local
bear results. A list of all local bear results
will be stored with the filename as key.
:param control_queue: If any result gets written to the result_dict a
tuple containing a CONTROL_ELEMENT (to indicate
what kind of event happened) and either a bear
name(for global results) or a file name to
indicate the result will be put to the queue.
:param filename: The name of file on which to run the bears.
"""
if filename not in file_dict:
send_msg(message_queue,
timeout,
LOG_LEVEL.ERROR,
_("An internal error occurred."),
Constants.THIS_IS_A_BUG)
send_msg(message_queue,
timeout,
LOG_LEVEL.DEBUG,
_("The given file through the queue is not in the file "
"dictionary."))
return
local_result_list = []
for bear_instance in local_bear_list:
result = run_local_bear(message_queue,
timeout,
local_result_list,
file_dict,
bear_instance,
filename)
if result is not None:
local_result_list.extend(result)
local_result_dict[filename] = local_result_list
control_queue.put((CONTROL_ELEMENT.LOCAL, filename))
def get_global_dependency_results(global_result_dict, bear_instance):
"""
This method gets all the results originating from the dependencies of a
bear_instance. Each bear_instance may or may not have dependencies.
:param global_result_dict: The list of results out of which the dependency
results are picked.
:return: None if bear has no dependencies, False if
dependencies are not met, the dependency dict
otherwise.
"""
try:
deps = bear_instance.get_dependencies()
if deps == []:
return None
except AttributeError:
# When this occurs we have an invalid bear and a warning will be
# emitted later.
return None
dependency_results = {}
for dep in deps:
depname = dep.__name__
if depname not in global_result_dict:
return False
dependency_results[depname] = global_result_dict[depname]
return dependency_results
def get_next_global_bear(timeout,
global_bear_queue,
global_bear_list,
global_result_dict):
"""
Retrieves the next global bear.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param global_bear_queue: queue (read, write) of indexes of global bear
instances in the global_bear_list.
:param global_bear_list: A list containing all global bears to be
executed.
:param global_result_dict: A Manager.dict that will be used to store global
results. The list of results of one global bear
will be stored with the bear name as key.
:return: (bear, bearname, dependency_results)
"""
dependency_results = False
while dependency_results is False:
bear_id = global_bear_queue.get(timeout=timeout)
bear = global_bear_list[bear_id]
dependency_results = (
get_global_dependency_results(global_result_dict, bear))
if dependency_results is False:
global_bear_queue.put(bear_id)
return bear, bear.__class__.__name__, dependency_results
def task_done(obj):
"""
Invokes task_done if the given queue provides this operation. Otherwise
passes silently.
:param obj: Any object.
"""
if hasattr(obj, "task_done"):
obj.task_done()
def run_local_bears(filename_queue,
message_queue,
timeout,
file_dict,
local_bear_list,
local_result_dict,
control_queue):
"""
Run local bears on all the files given.
:param filename_queue: queue (read) of file names to check with
local bears.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed
in the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param file_dict: Dictionary that contains contents of files.
:param local_bear_list: List of local bears to run.
:param local_result_dict: A Manager.dict that will be used to store local
bear results. A list of all local bear results
will be stored with the filename as key.
:param control_queue: If any result gets written to the result_dict a
tuple containing a CONTROL_ELEMENT (to indicate
what kind of event happened) and either a bear
name(for global results) or a file name to
indicate the result will be put to the queue.
"""
try:
while True:
filename = filename_queue.get(timeout=timeout)
run_local_bears_on_file(message_queue,
timeout,
file_dict,
local_bear_list,
local_result_dict,
control_queue,
filename)
task_done(filename_queue)
except queue.Empty:
return
def run_global_bears(message_queue,
timeout,
global_bear_queue,
global_bear_list,
global_result_dict,
control_queue):
"""
Run all global bears.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed
in the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param global_bear_queue: queue (read, write) of indexes of global bear
instances in the global_bear_list.
:param global_bear_list: list of global bear instances
:param global_result_dict: A Manager.dict that will be used to store global
results. The list of results of one global bear
will be stored with the bear name as key.
:param control_queue: If any result gets written to the result_dict a
tuple containing a CONTROL_ELEMENT (to indicate
what kind of event happened) and either a bear
name(for global results) or a file name to
indicate the result will be put to the queue.
"""
try:
while True:
bear, bearname, dep_results = (
get_next_global_bear(timeout,
global_bear_queue,
global_bear_list,
global_result_dict))
result = run_global_bear(message_queue, timeout, bear, dep_results)
if result:
global_result_dict[bearname] = result
control_queue.put((CONTROL_ELEMENT.GLOBAL, bearname))
else:
global_result_dict[bearname] = None
task_done(global_bear_queue)
except queue.Empty:
return
def run(file_name_queue,
local_bear_list,
global_bear_list,
global_bear_queue,
file_dict,
local_result_dict,
global_result_dict,
message_queue,
control_queue,
timeout=0):
"""
This is the method that is actually runs by processes.
If parameters type is 'queue (read)' this means it has to implement the
get(timeout=TIMEOUT) method and it shall raise queue.Empty if the queue
is empty up until the end of the timeout. If the queue has the
(optional!) task_done() attribute, the run method will call it after
processing each item.
If parameters type is 'queue (write)' it shall implement the
put(object, timeout=TIMEOUT) method.
If the queues raise any exception not specified here the user will get
an 'unknown error' message. So beware of that.
:param file_name_queue: queue (read) of file names to check with local
bears. Each invocation of the run method needs
one such queue which it checks with all the
local bears. The queue could be empty.
(Repeat until queue empty.)
:param local_bear_list: List of local bear instances.
:param global_bear_list: List of global bear instances.
:param global_bear_queue: queue (read, write) of indexes of global bear
instances in the global_bear_list.
:param file_dict: dict of all files as {filename:file}, file as in
file.readlines().
:param local_result_dict: A Manager.dict that will be used to store local
results. A list of all local results.
will be stored with the filename as key.
:param global_result_dict: A Manager.dict that will be used to store global
results. The list of results of one global bear
will be stored with the bear name as key.
:param message_queue: queue (write) for debug/warning/error
messages (type LogMessage)
:param control_queue: queue (write). If any result gets written to the
result_dict a tuple containing a CONTROL_ELEMENT
(to indicate what kind of event happened) and
either a bear name (for global results) or a
file name to indicate the result will be put to
the queue. If the run method finished all its
local bears it will put
(CONTROL_ELEMENT.LOCAL_FINISHED, None) to the
queue, if it finished all global ones,
(CONTROL_ELEMENT.GLOBAL_FINISHED, None) will
be put there.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
"""
run_local_bears(file_name_queue,
message_queue,
timeout,
file_dict,
local_bear_list,
local_result_dict,
control_queue)
control_queue.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
run_global_bears(message_queue,
timeout,
global_bear_queue,
global_bear_list,
global_result_dict,
control_queue)
control_queue.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
| agpl-3.0 |
rbuffat/pyepw | tests/test_typical_or_extreme_periods.py | 1 | 2262 | import os
import tempfile
import unittest
from pyepw.epw import TypicalOrExtremePeriods, TypicalOrExtremePeriod, EPW
class TestTypicalOrExtremePeriods(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_typical_or_extreme_periods(self):
obj = TypicalOrExtremePeriods()
typical_or_extreme_period_obj = TypicalOrExtremePeriod()
var_typical_or_extreme_period_typical_or_extreme_period_name = "typical_or_extreme_period_name"
typical_or_extreme_period_obj.typical_or_extreme_period_name = var_typical_or_extreme_period_typical_or_extreme_period_name
var_typical_or_extreme_period_typical_or_extreme_period_type = "typical_or_extreme_period_type"
typical_or_extreme_period_obj.typical_or_extreme_period_type = var_typical_or_extreme_period_typical_or_extreme_period_type
var_typical_or_extreme_period_period_start_day = "period_start_day"
typical_or_extreme_period_obj.period_start_day = var_typical_or_extreme_period_period_start_day
var_typical_or_extreme_period_period_end_day = "period_end_day"
typical_or_extreme_period_obj.period_end_day = var_typical_or_extreme_period_period_end_day
obj.add_typical_or_extreme_period(typical_or_extreme_period_obj)
epw = EPW(typical_or_extreme_periods=obj)
epw.save(self.path, check=False)
epw2 = EPW()
epw2.read(self.path)
self.assertEqual(
epw2.typical_or_extreme_periods.typical_or_extreme_periods[0].typical_or_extreme_period_name,
var_typical_or_extreme_period_typical_or_extreme_period_name)
self.assertEqual(
epw2.typical_or_extreme_periods.typical_or_extreme_periods[0].typical_or_extreme_period_type,
var_typical_or_extreme_period_typical_or_extreme_period_type)
self.assertEqual(
epw2.typical_or_extreme_periods.typical_or_extreme_periods[0].period_start_day,
var_typical_or_extreme_period_period_start_day)
self.assertEqual(
epw2.typical_or_extreme_periods.typical_or_extreme_periods[0].period_end_day,
var_typical_or_extreme_period_period_end_day)
| apache-2.0 |
alxgu/ansible | lib/ansible/modules/cloud/google/gcp_sql_database_facts.py | 9 | 4766 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_sql_database_facts
description:
- Gather facts for GCP Database
short_description: Gather facts for GCP Database
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''name'' and value of
your resource''s name Alternatively, you can add `register: name-of-resource`
to a gcp_sql_instance task and then set this instance field to "{{ name-of-resource
}}"'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a database facts"
gcp_sql_database_facts:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
charset:
description:
- The MySQL charset value.
returned: success
type: str
collation:
description:
- The MySQL collation value.
returned: success
type: str
name:
description:
- The name of the database in the Cloud SQL instance.
- This does not include the project ID or instance name.
returned: success
type: str
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin']
items = fetch_list(module, collection(module))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/databases".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'sql')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
3dfxsoftware/cbss-addons | project_phase_description/model/project_phase.py | 1 | 1240 | # -*- encoding: utf-8 -*-
#
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2013 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
#
# Coded by: Jorge Angel Naranjo (jorge_nr@vauxoo.com)
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import osv, fields
from openerp.tools.translate import _
class project_phase(osv.Model):
_inherit = 'project.phase'
_columns = {
'description':fields.text('Description'),
}
class project_task(osv.Model):
_inherit = 'project.task'
| gpl-2.0 |
crosswalk-project/blink-crosswalk | Tools/Scripts/webkitpy/layout_tests/port/builders.py | 6 | 5428 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from webkitpy.common.memoized import memoized
# In this dictionary, each item stores:
# * port_name -- a fully qualified port name
# * rebaseline_override_dir -- (optional) directory to put baselines in instead of where you would normally put them.
# This is useful when we don't have bots that cover particular configurations; so, e.g., you might
# support mac-mountainlion but not have a mac-mountainlion bot yet, so you'd want to put the mac-lion
# results into platform/mac temporarily.
# * specifiers -- TestExpectation specifiers for that config. Valid values are found in
# TestExpectationsParser._configuration_tokens_list
_exact_matches = {
"WebKit XP": {"port_name": "win-xp", "specifiers": ['XP', 'Release']},
"WebKit Win7": {"port_name": "win-win7", "specifiers": ['Win7', 'Release']},
"WebKit Win7 (dbg)": {"port_name": "win-win7", "specifiers": ['Win7', 'Debug']},
"WebKit Linux": {"port_name": "linux-x86_64", "specifiers": ['Linux', 'Release']},
"WebKit Linux 32": {"port_name": "linux-x86", "specifiers": ['Linux', 'Release']},
"WebKit Linux (dbg)": {"port_name": "linux-x86_64", "specifiers": ['Linux', 'Debug']},
"WebKit Mac10.6": {"port_name": "mac-snowleopard", "specifiers": ['SnowLeopard', 'Release']},
"WebKit Mac10.6 (dbg)": {"port_name": "mac-snowleopard", "specifiers": ['SnowLeopard', 'Debug']},
"WebKit Mac10.7": {"port_name": "mac-lion", "specifiers": ['Lion', 'Release']},
"WebKit Mac10.7 (dbg)": {"port_name": "mac-lion", "specifiers": ['Lion', 'Debug']},
"WebKit Mac10.8": {"port_name": "mac-mountainlion", "specifiers": ['MountainLion', 'Release']},
"WebKit Mac10.9 (retina)": {"port_name": "mac-retina", "specifiers": ['Retina', 'Release']},
"WebKit Mac10.9": {"port_name": "mac-mavericks", "specifiers": ['Mavericks', 'Release']},
"WebKit Mac10.10": {"port_name": "mac-yosemite", "specifiers": ['Yosemite', 'Release']},
"WebKit Android (Nexus4)": {"port_name": "android", "specifiers": ['Android', 'Release']},
}
# Mapping from port name to the deps builder of the same os:
_deps_builders = {
"linux-x86": "WebKit Linux (deps)",
"linux-x86_64": "WebKit Linux (deps)",
"win-xp": "WebKit XP (deps)",
"win-win7": "WebKit XP (deps)",
"mac-snowleopard": "WebKit Mac10.6 (deps)",
# The following port names are mapped to 10.6 bot since we don't have bots
# for these ports.
"mac-lion": "WebKit Mac10.6 (deps)",
"mac-mountainlion": "WebKit Mac10.6 (deps)",
"mac-mavericks": "WebKit Mac10.6 (deps)",
"mac-retina": "WebKit Mac10.6 (deps)",
"mac-yosemite": "WebKit Mac10.6 (deps)",
}
_ports_without_builders = [
]
def builder_path_from_name(builder_name):
return re.sub(r'[\s().]', '_', builder_name)
def all_builder_names():
return sorted(set(_exact_matches.keys()))
def all_port_names():
return sorted(set(map(lambda x: x["port_name"], _exact_matches.values()) + _ports_without_builders))
def rebaseline_override_dir(builder_name):
return _exact_matches[builder_name].get("rebaseline_override_dir", None)
def port_name_for_builder_name(builder_name):
return _exact_matches[builder_name]["port_name"]
def specifiers_for_builder(builder_name):
return _exact_matches[builder_name]["specifiers"]
def builder_name_for_port_name(target_port_name):
debug_builder_name = None
for builder_name, builder_info in _exact_matches.items():
if builder_info['port_name'] == target_port_name:
if 'dbg' in builder_name:
debug_builder_name = builder_name
else:
return builder_name
return debug_builder_name
def builder_path_for_port_name(port_name):
builder_path_from_name(builder_name_for_port_name(port_name))
def deps_builder_name_for_port_name(target_port_name):
return _deps_builders.get(target_port_name, None)
| bsd-3-clause |
wonder-sk/QGIS | python/ext-libs/pygments/console.py | 49 | 1850 | # -*- coding: utf-8 -*-
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%i;01m" % x
x += 1
del d, l, x
codes["darkteal"] = codes["turquoise"]
codes["darkyellow"] = codes["brown"]
codes["fuscia"] = codes["fuchsia"]
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
| gpl-2.0 |
prov-suite/interop-test-harness | prov_interop/provman/converter.py | 1 | 5717 | """Manages invocation of ProvScala `provmanagement` script.
"""
# Copyright (c) 2015 University of Southampton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os.path
import subprocess
from prov_interop.component import CommandLineComponent
from prov_interop.component import ConfigError
from prov_interop.converter import ConversionError
from prov_interop.converter import Converter
class ProvManConverter(Converter, CommandLineComponent):
"""Manages invocation of ProvScala `provmanagement` script."""
INPUT = "INPUT"
"""str or unicode: token for input file in command-line specification"""
OUTPUT = "OUTPUT"
"""str or unicode: token for output file in command-line specification"""
INFORMAT = "INFORMAT"
"""str or unicode: token for output file in command-line specification"""
OUTFORMAT = "OUTFORMAT"
"""str or unicode: token for output file in command-line specification"""
def __init__(self):
"""Create converter.
"""
super(ProvManConverter, self).__init__()
def configure(self, config):
"""Configure converter. The configuration must hold:
- :class:`prov_interop.converter.Converter` configuration
- :class:`prov_interop.component.CommandLineComponent` configuration
``arguments`` must have tokens ``INPUT``, ``OUTPUT`` which are
place-holders for the input file and output file.
A valid configuration is::
{
"executable": "/home/user/provman/bin/provmanagement"
"arguments": "translate --infile INPUT --outfile OUTPUT --inputFormat INFORMAT --outformat OUTFORMAT"
"input-formats": ["provn", "ttl", "trig", "provx", "json"]
"output-formats": ["provn", "ttl", "trig", "provx", "json"]
}
:param config: Configuration
:type config: dict
:raises ConfigError: if `config` does not hold the above entries
"""
super(ProvManConverter, self).configure(config)
for token in [ProvManConverter.INPUT, ProvManConverter.OUTPUT,
ProvManConverter.INFORMAT, ProvManConverter.OUTFORMAT]:
if token not in self._arguments:
raise ConfigError("Missing token " + token)
def convert(self, in_file, out_file):
"""Convert input file into output file.
- Input and output formats are derived from `in_file` and
`out_file` file extensions.
- A check is done to see that `in_file` exists and that the input
and output format are in ``input-formats`` and
``output-formats`` respectively.
- ``executable`` and ``arguments`` are used to create a
command-line invocation, with ``INPUT`` and ``OUTPUT`` being
replaced with `in_file`, and `out_file`
An example command-line invocation is::
/home/user/ProvToolbox/bin/provmanagement translate --infile testcase1.json --outfile testcase1.provx --inputFormat json --outformat provx
:param in_file: Input file
:type in_file: str or unicode
:param out_file: Output file
:type out_file: str or unicode
:raises ConversionError: if the input file cannot be found, or
the exit code of ``provmanagement`` is non-zero
:raises OSError: if there are problems invoking the converter
e.g. the script is not found
"""
super(ProvManConverter, self).convert(in_file, out_file)
in_format = os.path.splitext(in_file)[1][1:]
out_format = os.path.splitext(out_file)[1][1:]
super(ProvManConverter, self).check_formats(in_format, out_format)
command_line = list(self._executable)
command_line.extend(self._arguments)
command_line = [in_file if x == ProvManConverter.INPUT else x
for x in command_line]
command_line = [out_file if x == ProvManConverter.OUTPUT else x
for x in command_line]
command_line = [in_format if x == ProvManConverter.INFORMAT else x
for x in command_line]
command_line = [out_format if x == ProvManConverter.OUTFORMAT else x
for x in command_line]
print((" ".join(command_line)))
return_code = subprocess.call(command_line)
if return_code != 0:
raise ConversionError(" ".join(command_line) +
" returned " + str(return_code))
if not os.path.isfile(out_file):
raise ConversionError("Output file not found: " + out_file)
| mit |
mermi/bedrock | bedrock/settings/__init__.py | 5 | 2988 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
from .base import * # noqa
if os.getenv('TRAVIS', False):
from .travis import * # noqa
elif os.getenv('JENKINS_HOME', False):
from .jenkins import * # noqa
else:
if os.getenv('C9_USER'):
from .c9 import * # noqa
try:
from .local import * # noqa
except ImportError as exc:
'local.py is supported, but no longer necessary'
if DEV:
ALLOWED_HOSTS = ['*']
else:
MIDDLEWARE_CLASSES += ('commonware.middleware.FrameOptionsHeader',)
# waffle flags, switches, and samples should default to True in DEV mode
WAFFLE_FLAG_DEFAULT = WAFFLE_SWITCH_DEFAULT = WAFFLE_SAMPLE_DEFAULT = DEV
# Any databases configured other than "default" should be
# read-only slaves, which our default router
# should use with this setting.
if 'manage.py' not in sys.argv:
SLAVE_DATABASES = [db for db in DATABASES if db != 'default']
if CACHES['default']['BACKEND'] == 'django_pylibmc.memcached.PyLibMCCache':
CACHES['default']['BINARY'] = True
CACHES['default']['OPTIONS'] = { # Maps to pylibmc "behaviors"
'tcp_nodelay': True,
'ketama': True,
}
# cache for lang files
CACHES['l10n'] = {
'BACKEND': 'bedrock.base.cache.SimpleDictCache',
'LOCATION': 'l10n',
'TIMEOUT': DOTLANG_CACHE,
'OPTIONS': {
'MAX_ENTRIES': 5000,
'CULL_FREQUENCY': 4, # 1/4 entries deleted if max reached
}
}
# cache for product details
CACHES['product-details'] = {
'BACKEND': 'bedrock.base.cache.SimpleDictCache',
'LOCATION': 'product-details',
'OPTIONS': {
'MAX_ENTRIES': 200, # currently 104 json files
'CULL_FREQUENCY': 4, # 1/4 entries deleted if max reached
}
}
# cache for externalfiles
CACHES['externalfiles'] = {
'BACKEND': 'bedrock.base.cache.SimpleDictCache',
'LOCATION': 'externalfiles',
'OPTIONS': {
'MAX_ENTRIES': 10, # currently 2 files
'CULL_FREQUENCY': 4, # 1/4 entries deleted if max reached
}
}
MEDIA_URL = CDN_BASE_URL + MEDIA_URL
STATIC_URL = CDN_BASE_URL + STATIC_URL
if (len(sys.argv) > 1 and sys.argv[1] == 'test') or sys.argv[0].endswith('py.test'):
# Using the CachedStaticFilesStorage for tests breaks all the things.
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
# Turn off less compilation in tests
PIPELINE_ENABLED = True
# TEMPLATE_DEBUG has to be True for jingo to call the template_rendered
# signal which Django's test client uses to save away the contexts for your
# test to look at later.
TEMPLATE_DEBUG = True
# don't cache product-details
CACHES['product-details']['BACKEND'] = 'django.core.cache.backends.dummy.DummyCache'
# use default product-details data
PROD_DETAILS_STORAGE = 'product_details.storage.PDFileStorage'
| mpl-2.0 |
UnrememberMe/pants | contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py | 6 | 2011 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pyflakes.checker import Checker as FlakesChecker
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin, Nit
class FlakeError(Nit):
# TODO(wickman) There is overlap between this and Flake8 -- consider integrating
# checkstyle plug-ins into the PEP8 tool directly so that this can be inherited
# by flake8.
# Code reference is here: https://flake8.readthedocs.io/en/latest/warnings.html
CLASS_ERRORS = {
'DuplicateArgument': 'F831',
'ImportShadowedByLoopVar': 'F402',
'ImportStarUsed': 'F403',
'LateFutureImport': 'F404',
'Redefined': 'F810',
'RedefinedInListComp': 'F812',
'RedefinedWhileUnused': 'F811',
'UndefinedExport': 'F822',
'UndefinedLocal': 'F823',
'UndefinedName': 'F821',
'UnusedImport': 'F401',
'UnusedVariable': 'F841',
}
def __init__(self, python_file, flake_message):
line_range = python_file.line_range(flake_message.lineno)
super(FlakeError, self).__init__(
self.get_error_code(flake_message),
Nit.ERROR,
python_file.filename,
flake_message.message % flake_message.message_args,
line_range,
python_file.lines[line_range])
@classmethod
def get_error_code(cls, message):
return cls.CLASS_ERRORS.get(message.__class__.__name__, 'F999')
class PyflakesChecker(CheckstylePlugin):
"""Detect common coding errors via the pyflakes package."""
def nits(self):
checker = FlakesChecker(self.python_file.tree, self.python_file.filename)
for message in sorted(checker.messages, key=lambda msg: msg.lineno):
if FlakeError.get_error_code(message) not in self.options.ignore:
yield FlakeError(self.python_file, message)
| apache-2.0 |
cwisecarver/osf.io | addons/base/logger.py | 39 | 2120 | import abc
class AddonNodeLogger(object):
"""Helper class for adding correctly-formatted addon logs to nodes.
:param Node node: The node to add logs to
:param Auth auth: Authorization of the person who did the action.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def addon_short_name(self):
pass
def _log_params(self):
node_settings = self.node.get_addon(self.addon_short_name, deleted=True)
return {
'project': self.node.parent_id,
'node': self.node._primary_key,
'folder_id': node_settings.folder_id,
'folder_name': node_settings.folder_name,
'folder': node_settings.folder_path
}
def __init__(self, node, auth, path=None):
self.node = node
self.auth = auth
self.path = path
def log(self, action, extra=None, save=False):
"""Log an event. Wraps the Node#add_log method, automatically adding
relevant parameters and prefixing log events with addon_short_name.
:param str action: Log action. Should be a class constant from NodeLog.
:param dict extra: Extra parameters to add to the ``params`` dict of the
new NodeLog.
"""
params = self._log_params()
# If logging a file-related action, add the file's view and download URLs
if self.path:
params.update({
'urls': {
'view': self.node.web_url_for('addon_view_or_download_file', path=self.path, provider=self.addon_short_name),
'download': self.node.web_url_for(
'addon_view_or_download_file',
path=self.path,
provider=self.addon_short_name
)
},
'path': self.path,
})
if extra:
params.update(extra)
self.node.add_log(
action='{0}_{1}'.format(self.addon_short_name, action),
params=params,
auth=self.auth
)
if save:
self.node.save()
| apache-2.0 |
evanma92/routeh | flask/lib/python2.7/site-packages/whoosh/query/spans.py | 30 | 28812 | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains Query objects that deal with "spans".
Span queries allow for positional constraints on matching documents. For
example, the :class:`whoosh.spans.SpanNear` query matches documents where one
term occurs near another. Because you can nest span queries, and wrap them
around almost any non-span query, you can create very complex constraints.
For example, to find documents containing "whoosh" at most 5 positions before
"library" in the "text" field::
from whoosh import query, spans
t1 = query.Term("text", "whoosh")
t2 = query.Term("text", "library")
q = spans.SpanNear(t1, t2, slop=5)
"""
from whoosh.matching import mcore, wrappers, binary
from whoosh.query import Query, And, AndMaybe, Or, Term
from whoosh.util import make_binary_tree
# Span class
class Span(object):
__slots__ = ("start", "end", "startchar", "endchar", "boost")
def __init__(self, start, end=None, startchar=None, endchar=None,
boost=1.0):
if end is None:
end = start
assert start <= end
self.start = start
self.end = end
self.startchar = startchar
self.endchar = endchar
self.boost = boost
def __repr__(self):
if self.startchar is not None or self.endchar is not None:
return "<%d-%d %d:%d>" % (self.start, self.end, self.startchar,
self.endchar)
else:
return "<%d-%d>" % (self.start, self.end)
def __eq__(self, span):
return (self.start == span.start
and self.end == span.end
and self.startchar == span.startchar
and self.endchar == span.endchar)
def __ne__(self, span):
return self.start != span.start or self.end != span.end
def __lt__(self, span):
return self.start < span.start
def __gt__(self, span):
return self.start > span.start
def __hash__(self):
return hash((self.start, self.end))
@classmethod
def merge(cls, spans):
"""Merges overlapping and touches spans in the given list of spans.
Note that this modifies the original list.
>>> spans = [Span(1,2), Span(3)]
>>> Span.merge(spans)
>>> spans
[<1-3>]
"""
i = 0
while i < len(spans) - 1:
here = spans[i]
j = i + 1
while j < len(spans):
there = spans[j]
if there.start > here.end + 1:
break
if here.touches(there) or here.overlaps(there):
here = here.to(there)
spans[i] = here
del spans[j]
else:
j += 1
i += 1
return spans
def to(self, span):
if self.startchar is None:
minchar = span.startchar
elif span.startchar is None:
minchar = self.startchar
else:
minchar = min(self.startchar, span.startchar)
if self.endchar is None:
maxchar = span.endchar
elif span.endchar is None:
maxchar = self.endchar
else:
maxchar = max(self.endchar, span.endchar)
minpos = min(self.start, span.start)
maxpos = max(self.end, span.end)
return self.__class__(minpos, maxpos, minchar, maxchar)
def overlaps(self, span):
return ((self.start >= span.start and self.start <= span.end)
or (self.end >= span.start and self.end <= span.end)
or (span.start >= self.start and span.start <= self.end)
or (span.end >= self.start and span.end <= self.end))
def surrounds(self, span):
return self.start < span.start and self.end > span.end
def is_within(self, span):
return self.start >= span.start and self.end <= span.end
def is_before(self, span):
return self.end < span.start
def is_after(self, span):
return self.start > span.end
def touches(self, span):
return self.start == span.end + 1 or self.end == span.start - 1
def distance_to(self, span):
if self.overlaps(span):
return 0
elif self.is_before(span):
return span.start - self.end
else:
return self.start - span.end
def bisect_spans(spans, start):
lo = 0
hi = len(spans)
while lo < hi:
mid = (lo + hi) // 2
if spans[mid].start < start:
lo = mid + 1
else:
hi = mid
return lo
# Base matchers
class SpanWrappingMatcher(wrappers.WrappingMatcher):
"""An abstract matcher class that wraps a "regular" matcher. This matcher
uses the sub-matcher's matching logic, but only matches documents that have
matching spans, i.e. where ``_get_spans()`` returns a non-empty list.
Subclasses must implement the ``_get_spans()`` method, which returns a list
of valid spans for the current document.
"""
def __init__(self, child):
super(SpanWrappingMatcher, self).__init__(child)
self._spans = None
if self.is_active():
self._find_next()
def copy(self):
m = self.__class__(self.child.copy())
m._spans = self._spans
return m
def _replacement(self, newchild):
return self.__class__(newchild)
def _find_next(self):
if not self.is_active():
return
child = self.child
r = False
spans = self._get_spans()
while child.is_active() and not spans:
r = child.next() or r
if not child.is_active():
return True
spans = self._get_spans()
self._spans = spans
return r
def spans(self):
return self._spans
def next(self):
self.child.next()
self._find_next()
def skip_to(self, id):
self.child.skip_to(id)
self._find_next()
def all_ids(self):
while self.is_active():
if self.spans():
yield self.id()
self.next()
class SpanBiMatcher(SpanWrappingMatcher):
def copy(self):
return self.__class__(self.a.copy(), self.b.copy())
def depth(self):
return 1 + max(self.a.depth(), self.b.depth())
def replace(self, minquality=0):
# TODO: fix this
if not self.is_active():
return mcore.NullMatcher()
return self
# Queries
class SpanQuery(Query):
"""Abstract base class for span-based queries. Each span query type wraps
a "regular" query that implements the basic document-matching functionality
(for example, SpanNear wraps an And query, because SpanNear requires that
the two sub-queries occur in the same documents. The wrapped query is
stored in the ``q`` attribute.
Subclasses usually only need to implement the initializer to set the
wrapped query, and ``matcher()`` to return a span-aware matcher object.
"""
def _subm(self, s, context=None):
return self.q.matcher(s, context)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.q)
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.q == other.q)
def __hash__(self):
return hash(self.__class__.__name__) ^ hash(self.q)
def field(self):
return None
def needs_spans(self):
return True
class WrappingSpan(SpanQuery):
def is_leaf(self):
return False
def apply(self, fn):
return self.__class__(fn(self.q), limit=self.limit)
def field(self):
return self.q.field()
class SpanFirst(WrappingSpan):
"""Matches spans that end within the first N positions. This lets you
for example only match terms near the beginning of the document.
"""
def __init__(self, q, limit=0):
"""
:param q: the query to match.
:param limit: the query must match within this position at the start
of a document. The default is ``0``, which means the query must
match at the first position.
"""
self.q = q
self.limit = limit
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.q == other.q and self.limit == other.limit)
def __hash__(self):
return hash(self.q) ^ hash(self.limit)
def matcher(self, searcher, context=None):
m = self._subm(searcher, context)
return SpanFirst.SpanFirstMatcher(m, limit=self.limit)
class SpanFirstMatcher(SpanWrappingMatcher):
def __init__(self, child, limit=0):
self.limit = limit
super(SpanFirst.SpanFirstMatcher, self).__init__(child)
def copy(self):
return self.__class__(self.child.copy(), limit=self.limit)
def _replacement(self, newchild):
return self.__class__(newchild, limit=self.limit)
def _get_spans(self):
return [span for span in self.child.spans()
if span.end <= self.limit]
class SpanNear(SpanQuery):
"""
Note: for new code, use :class:`SpanNear2` instead of this class. SpanNear2
takes a list of sub-queries instead of requiring you to create a binary
tree of query objects.
Matches queries that occur near each other. By default, only matches
queries that occur right next to each other (slop=1) and in order
(ordered=True).
For example, to find documents where "whoosh" occurs next to "library"
in the "text" field::
from whoosh import query, spans
t1 = query.Term("text", "whoosh")
t2 = query.Term("text", "library")
q = spans.SpanNear(t1, t2)
To find documents where "whoosh" occurs at most 5 positions before
"library"::
q = spans.SpanNear(t1, t2, slop=5)
To find documents where "whoosh" occurs at most 5 positions before or after
"library"::
q = spans.SpanNear(t1, t2, slop=5, ordered=False)
You can use the ``phrase()`` class method to create a tree of SpanNear
queries to match a list of terms::
q = spans.SpanNear.phrase("text", ["whoosh", "search", "library"],
slop=2)
"""
def __init__(self, a, b, slop=1, ordered=True, mindist=1):
"""
:param a: the first query to match.
:param b: the second query that must occur within "slop" positions of
the first query.
:param slop: the number of positions within which the queries must
occur. Default is 1, meaning the queries must occur right next
to each other.
:param ordered: whether a must occur before b. Default is True.
:pram mindist: the minimum distance allowed between the queries.
"""
self.q = And([a, b])
self.a = a
self.b = b
self.slop = slop
self.ordered = ordered
self.mindist = mindist
def __repr__(self):
return ("%s(%r, slop=%d, ordered=%s, mindist=%d)"
% (self.__class__.__name__, self.q, self.slop, self.ordered,
self.mindist))
def __eq__(self, other):
return (other and self.__class__ == other.__class__
and self.q == other.q and self.slop == other.slop
and self.ordered == other.ordered
and self.mindist == other.mindist)
def __hash__(self):
return (hash(self.a) ^ hash(self.b) ^ hash(self.slop)
^ hash(self.ordered) ^ hash(self.mindist))
def is_leaf(self):
return False
def apply(self, fn):
return self.__class__(fn(self.a), fn(self.b), slop=self.slop,
ordered=self.ordered, mindist=self.mindist)
def matcher(self, searcher, context=None):
ma = self.a.matcher(searcher, context)
mb = self.b.matcher(searcher, context)
return SpanNear.SpanNearMatcher(ma, mb, slop=self.slop,
ordered=self.ordered,
mindist=self.mindist)
@classmethod
def phrase(cls, fieldname, words, slop=1, ordered=True):
"""Returns a tree of SpanNear queries to match a list of terms.
This class method is a convenience for constructing a phrase query
using a binary tree of SpanNear queries::
SpanNear.phrase("content", ["alfa", "bravo", "charlie", "delta"])
:param fieldname: the name of the field to search in.
:param words: a sequence of texts to search for.
:param slop: the number of positions within which the terms must
occur. Default is 1, meaning the terms must occur right next
to each other.
:param ordered: whether the terms must occur in order. Default is True.
"""
terms = [Term(fieldname, word) for word in words]
return make_binary_tree(cls, terms, slop=slop, ordered=ordered)
class SpanNearMatcher(SpanWrappingMatcher):
def __init__(self, a, b, slop=1, ordered=True, mindist=1):
self.a = a
self.b = b
self.slop = slop
self.ordered = ordered
self.mindist = mindist
isect = binary.IntersectionMatcher(a, b)
super(SpanNear.SpanNearMatcher, self).__init__(isect)
def copy(self):
return self.__class__(self.a.copy(), self.b.copy(), slop=self.slop,
ordered=self.ordered, mindist=self.mindist)
def replace(self, minquality=0):
# TODO: fix this
if not self.is_active():
return mcore.NullMatcher()
return self
def _get_spans(self):
slop = self.slop
mindist = self.mindist
ordered = self.ordered
spans = set()
bspans = self.b.spans()
for aspan in self.a.spans():
for bspan in bspans:
if (bspan.end < aspan.start - slop
or (ordered and aspan.start > bspan.start)):
# B is too far in front of A, or B is in front of A
# *at all* when ordered is True
continue
if bspan.start > aspan.end + slop:
# B is too far from A. Since spans are listed in
# start position order, we know that all spans after
# this one will also be too far.
break
# Check the distance between the spans
dist = aspan.distance_to(bspan)
if mindist <= dist <= slop:
spans.add(aspan.to(bspan))
return sorted(spans)
class SpanNear2(SpanQuery):
"""
Matches queries that occur near each other. By default, only matches
queries that occur right next to each other (slop=1) and in order
(ordered=True).
New code should use this query type instead of :class:`SpanNear`.
(Unlike :class:`SpanNear`, this query takes a list of subqueries instead of
requiring you to build a binary tree of query objects. This query should
also be slightly faster due to less overhead.)
For example, to find documents where "whoosh" occurs next to "library"
in the "text" field::
from whoosh import query, spans
t1 = query.Term("text", "whoosh")
t2 = query.Term("text", "library")
q = spans.SpanNear2([t1, t2])
To find documents where "whoosh" occurs at most 5 positions before
"library"::
q = spans.SpanNear2([t1, t2], slop=5)
To find documents where "whoosh" occurs at most 5 positions before or after
"library"::
q = spans.SpanNear2(t1, t2, slop=5, ordered=False)
"""
def __init__(self, qs, slop=1, ordered=True, mindist=1):
"""
:param qs: a sequence of sub-queries to match.
:param slop: the number of positions within which the queries must
occur. Default is 1, meaning the queries must occur right next
to each other.
:param ordered: whether a must occur before b. Default is True.
:pram mindist: the minimum distance allowed between the queries.
"""
self.qs = qs
self.slop = slop
self.ordered = ordered
self.mindist = mindist
def __repr__(self):
return ("%s(%r, slop=%d, ordered=%s, mindist=%d)"
% (self.__class__.__name__, self.qs, self.slop, self.ordered,
self.mindist))
def __eq__(self, other):
return (other and self.__class__ == other.__class__
and self.qs == other.qs and self.slop == other.slop
and self.ordered == other.ordered
and self.mindist == other.mindist)
def __hash__(self):
h = hash(self.slop) ^ hash(self.ordered) ^ hash(self.mindist)
for q in self.qs:
h ^= hash(q)
return h
def is_leaf(self):
return False
def children(self):
return self.qs
def apply(self, fn):
return self.__class__([fn(q) for q in self.qs], slop=self.slop,
ordered=self.ordered, mindist=self.mindist)
def matcher(self, searcher, context=None):
ms = [q.matcher(searcher, context) for q in self.qs]
return self.SpanNear2Matcher(ms, slop=self.slop, ordered=self.ordered,
mindist=self.mindist)
class SpanNear2Matcher(SpanWrappingMatcher):
def __init__(self, ms, slop=1, ordered=True, mindist=1):
self.ms = ms
self.slop = slop
self.ordered = ordered
self.mindist = mindist
isect = make_binary_tree(binary.IntersectionMatcher, ms)
super(SpanNear2.SpanNear2Matcher, self).__init__(isect)
def copy(self):
return self.__class__([m.copy() for m in self.ms], slop=self.slop,
ordered=self.ordered, mindist=self.mindist)
def replace(self, minquality=0):
# TODO: fix this
if not self.is_active():
return mcore.NullMatcher()
return self
def _get_spans(self):
slop = self.slop
mindist = self.mindist
ordered = self.ordered
ms = self.ms
aspans = ms[0].spans()
i = 1
while i < len(ms) and aspans:
bspans = ms[i].spans()
spans = set()
for aspan in aspans:
# Use a binary search to find the first position we should
# start looking for possible matches
if ordered:
start = aspan.start
else:
start = max(0, aspan.start - slop)
j = bisect_spans(bspans, start)
while j < len(bspans):
bspan = bspans[j]
j += 1
if (bspan.end < aspan.start - slop
or (ordered and aspan.start > bspan.start)):
# B is too far in front of A, or B is in front of A
# *at all* when ordered is True
continue
if bspan.start > aspan.end + slop:
# B is too far from A. Since spans are listed in
# start position order, we know that all spans after
# this one will also be too far.
break
# Check the distance between the spans
dist = aspan.distance_to(bspan)
if mindist <= dist <= slop:
spans.add(aspan.to(bspan))
aspans = sorted(spans)
i += 1
if i == len(ms):
return aspans
else:
return []
class SpanOr(SpanQuery):
"""Matches documents that match any of a list of sub-queries. Unlike
query.Or, this class merges together matching spans from the different
sub-queries when they overlap.
"""
def __init__(self, subqs):
"""
:param subqs: a list of queries to match.
"""
self.q = Or(subqs)
self.subqs = subqs
def is_leaf(self):
return False
def apply(self, fn):
return self.__class__([fn(sq) for sq in self.subqs])
def matcher(self, searcher, context=None):
matchers = [q.matcher(searcher, context) for q in self.subqs]
return make_binary_tree(SpanOr.SpanOrMatcher, matchers)
class SpanOrMatcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
self.b = b
um = binary.UnionMatcher(a, b)
super(SpanOr.SpanOrMatcher, self).__init__(um)
def _get_spans(self):
a_active = self.a.is_active()
b_active = self.b.is_active()
if a_active:
a_id = self.a.id()
if b_active:
b_id = self.b.id()
if a_id == b_id:
spans = sorted(set(self.a.spans())
| set(self.b.spans()))
elif a_id < b_id:
spans = self.a.spans()
else:
spans = self.b.spans()
else:
spans = self.a.spans()
else:
spans = self.b.spans()
Span.merge(spans)
return spans
class SpanBiQuery(SpanQuery):
# Intermediate base class for methods common to "a/b" span query types
def is_leaf(self):
return False
def apply(self, fn):
return self.__class__(fn(self.a), fn(self.b))
def matcher(self, searcher, context=None):
ma = self.a.matcher(searcher, context)
mb = self.b.matcher(searcher, context)
return self._Matcher(ma, mb)
class SpanNot(SpanBiQuery):
"""Matches spans from the first query only if they don't overlap with
spans from the second query. If there are no non-overlapping spans, the
document does not match.
For example, to match documents that contain "bear" at most 2 places after
"apple" in the "text" field but don't have "cute" between them::
from whoosh import query, spans
t1 = query.Term("text", "apple")
t2 = query.Term("text", "bear")
near = spans.SpanNear(t1, t2, slop=2)
q = spans.SpanNot(near, query.Term("text", "cute"))
"""
def __init__(self, a, b):
"""
:param a: the query to match.
:param b: do not match any spans that overlap with spans from this
query.
"""
self.q = AndMaybe(a, b)
self.a = a
self.b = b
class _Matcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
self.b = b
amm = binary.AndMaybeMatcher(a, b)
super(SpanNot._Matcher, self).__init__(amm)
def _get_spans(self):
if self.a.id() == self.b.id():
spans = []
bspans = self.b.spans()
for aspan in self.a.spans():
overlapped = False
for bspan in bspans:
if aspan.overlaps(bspan):
overlapped = True
break
if not overlapped:
spans.append(aspan)
return spans
else:
return self.a.spans()
class SpanContains(SpanBiQuery):
"""Matches documents where the spans of the first query contain any spans
of the second query.
For example, to match documents where "apple" occurs at most 10 places
before "bear" in the "text" field and "cute" is between them::
from whoosh import query, spans
t1 = query.Term("text", "apple")
t2 = query.Term("text", "bear")
near = spans.SpanNear(t1, t2, slop=10)
q = spans.SpanContains(near, query.Term("text", "cute"))
"""
def __init__(self, a, b):
"""
:param a: the query to match.
:param b: the query whose spans must occur within the matching spans
of the first query.
"""
self.q = And([a, b])
self.a = a
self.b = b
class _Matcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
self.b = b
im = binary.IntersectionMatcher(a, b)
super(SpanContains._Matcher, self).__init__(im)
def _get_spans(self):
spans = []
bspans = self.b.spans()
for aspan in self.a.spans():
for bspan in bspans:
if aspan.start > bspan.end:
continue
if aspan.end < bspan.start:
break
if bspan.is_within(aspan):
spans.append(aspan)
break
return spans
class SpanBefore(SpanBiQuery):
"""Matches documents where the spans of the first query occur before any
spans of the second query.
For example, to match documents where "apple" occurs anywhere before
"bear"::
from whoosh import query, spans
t1 = query.Term("text", "apple")
t2 = query.Term("text", "bear")
q = spans.SpanBefore(t1, t2)
"""
def __init__(self, a, b):
"""
:param a: the query that must occur before the second.
:param b: the query that must occur after the first.
"""
self.a = a
self.b = b
self.q = And([a, b])
class _Matcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
self.b = b
im = binary.IntersectionMatcher(a, b)
super(SpanBefore._Matcher, self).__init__(im)
def _get_spans(self):
bminstart = min(bspan.start for bspan in self.b.spans())
return [aspan for aspan in self.a.spans() if aspan.end < bminstart]
class SpanCondition(SpanBiQuery):
"""Matches documents that satisfy both subqueries, but only uses the spans
from the first subquery.
This is useful when you want to place conditions on matches but not have
those conditions affect the spans returned.
For example, to get spans for the term ``alfa`` in documents that also
must contain the term ``bravo``::
SpanCondition(Term("text", u"alfa"), Term("text", u"bravo"))
"""
def __init__(self, a, b):
self.a = a
self.b = b
self.q = And([a, b])
class _Matcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
im = binary.IntersectionMatcher(a, b)
super(SpanCondition._Matcher, self).__init__(im)
def _get_spans(self):
return self.a.spans()
| bsd-3-clause |
eRestin/Mezz | mhnweb/settings.py | 1 | 12829 | ALLOWED_HOSTS = ['*']
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost", "prefooter.SitewideContent",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
#)
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "Europe/London"
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = True
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Make this unique, and don't share it with anybody.
SECRET_KEY = "733be3b1-37d8-47ca-8915-e31a151a6846e2e59e1b-5393-407d-8542-ee1e12114827eddfb52e-3143-4f8c-80ec-b198f42cc3eb"
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
#############
# DATABASES #
#############
CONTACT_EMAIL = 'email4django@gmail.com'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'email4django@gmail.com'
EMAIL_HOST_PASSWORD = 'trudnehaslo'
EMAIL_PORT = 587
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": 'django.db.backends.sqlite3', #"django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "mhnweb.db",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
#STATIC_ROOT = ''
#STATICFILES_DIRS = (os.path.join('static'),)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
"mezzanine.twitter",
#"mezzanine.accounts",
#"mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username
# "SSH_PASS": "", # SSH password (consider key-based authentication)
# "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth
# "HOSTS": [], # List of hosts to deploy to
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "LIVE_HOSTNAME": "www.example.com", # Host for public site.
# "REPO_URL": "", # Git or Mercurial remote repo URL for the project
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# }
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError:
pass
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| bsd-2-clause |
ldirer/scikit-learn | examples/classification/plot_lda_qda.py | 32 | 5381 | """
====================================================================
Linear and Quadratic Discriminant Analysis with covariance ellipsoid
====================================================================
This example plots the covariance ellipsoids of each class and
decision boundary learned by LDA and QDA. The ellipsoids display
the double standard deviation for each class. With LDA, the
standard deviation is the same for all the classes, while each
class has its own standard deviation with QDA.
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
alpha = 0.5
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', alpha=alpha,
color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '*', alpha=alpha,
color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', alpha=alpha,
color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '*', alpha=alpha,
color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color, edgecolor='yellow',
linewidth=2, zorder=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
wwj718/edx-platform | openedx/core/djangoapps/programs/models.py | 1 | 3607 | """Models providing Programs support for the LMS and Studio."""
from collections import namedtuple
from urlparse import urljoin
from django.utils.translation import ugettext_lazy as _
from django.db import models
from config_models.models import ConfigurationModel
AuthoringAppConfig = namedtuple('AuthoringAppConfig', ['js_url', 'css_url'])
class ProgramsApiConfig(ConfigurationModel):
"""
Manages configuration for connecting to the Programs service and using its
API.
"""
OAUTH2_CLIENT_NAME = 'programs'
CACHE_KEY = 'programs.api.data'
API_NAME = 'programs'
api_version_number = models.IntegerField(verbose_name=_("API Version"))
internal_service_url = models.URLField(verbose_name=_("Internal Service URL"))
public_service_url = models.URLField(verbose_name=_("Public Service URL"))
authoring_app_js_path = models.CharField(
verbose_name=_("Path to authoring app's JS"),
max_length=255,
blank=True,
help_text=_(
"This value is required in order to enable the Studio authoring interface."
)
)
authoring_app_css_path = models.CharField(
verbose_name=_("Path to authoring app's CSS"),
max_length=255,
blank=True,
help_text=_(
"This value is required in order to enable the Studio authoring interface."
)
)
cache_ttl = models.PositiveIntegerField(
verbose_name=_("Cache Time To Live"),
default=0,
help_text=_(
"Specified in seconds. Enable caching by setting this to a value greater than 0."
)
)
enable_student_dashboard = models.BooleanField(
verbose_name=_("Enable Student Dashboard Displays"),
default=False
)
enable_studio_tab = models.BooleanField(
verbose_name=_("Enable Studio Authoring Interface"),
default=False
)
@property
def internal_api_url(self):
"""
Generate a URL based on internal service URL and API version number.
"""
return urljoin(self.internal_service_url, '/api/v{}/'.format(self.api_version_number))
@property
def public_api_url(self):
"""
Generate a URL based on public service URL and API version number.
"""
return urljoin(self.public_service_url, '/api/v{}/'.format(self.api_version_number))
@property
def authoring_app_config(self):
"""
Returns a named tuple containing information required for working with the Programs
authoring app, a Backbone app hosted by the Programs service.
"""
js_url = urljoin(self.public_service_url, self.authoring_app_js_path)
css_url = urljoin(self.public_service_url, self.authoring_app_css_path)
return AuthoringAppConfig(js_url=js_url, css_url=css_url)
@property
def is_cache_enabled(self):
"""Whether responses from the Programs API will be cached."""
return self.cache_ttl > 0
@property
def is_student_dashboard_enabled(self):
"""
Indicates whether LMS dashboard functionality related to Programs should
be enabled or not.
"""
return self.enabled and self.enable_student_dashboard
@property
def is_studio_tab_enabled(self):
"""
Indicates whether Studio functionality related to Programs should
be enabled or not.
"""
return (
self.enabled and
self.enable_studio_tab and
bool(self.authoring_app_js_path) and
bool(self.authoring_app_css_path)
)
| agpl-3.0 |
quaddra/engage-utils | engage_utils/test_wakeable_queue.py | 1 | 5086 | """Test the wakeable_queue.Queue class
"""
import unittest
import os
import sys
import random
import logging
import threading
from time import sleep
logger = logging.getLogger(__name__)
from wakeable_queue import Queue, AbortRequested, WakeableQueueWorker, WorkerStatus
random.seed()
STOP_MSG = []
class Worker(WakeableQueueWorker):
def __init__(self, worker_id, queue, consume_messages=False):
WakeableQueueWorker.__init__(self, worker_id, queue, STOP_MSG, logger)
self.consume_messages = consume_messages
if consume_messages:
logger.info("Worker %s will consume messages" % worker_id)
def process_batch(self, data):
if data[0]>=0:
#logger.info("worker %s got message %s" % (self.worker_id, data[0]))
# normal processing
sleep(0.1)
if not self.consume_messages:
#logger.info("worker %s got %s" % (self.worker_id, data[0]))
self.queue.put(data)
else:
logger.info("worker %s consuming message %s" % (self.worker_id, data[0]))
elif data[0]==(-1): # a -1 means we should signal the abort
logger.info("Worker %s requesting an abort" % self.worker_id)
self.status = WorkerStatus.ABORT_REQUESTED
self.queue.abort_request()
elif data[0]==(-2): # a -2 means we should throw an exception
raise Exception("Expected error")
else:
assert 0
NUM_WORKERS = 4
class TestWakeableQueue(unittest.TestCase):
def setUp(self):
self.queue = Queue()
self.workers = [Worker(i, self.queue, False) for i in range(NUM_WORKERS)]
def test_normal_processing(self):
logger.info("Runing test_normal_processing")
self.workers.append(Worker(NUM_WORKERS, self.queue, consume_messages=True))
for i in range(100):
self.queue.put([i+1, 2, 3, 4, 5])
WakeableQueueWorker.run_workers(self.workers, self.queue, STOP_MSG,
logger)
for w in self.workers:
self.assertTrue(w.status==WorkerStatus.STOPPED,
"Worker %s not stopped, status was %s" %
(w.worker_id, w.status))
def test_abort_processing(self):
logger.info("Runing test_abort_processing")
poison_pill = random.randint(25, 80)
for i in range(100):
if i == poison_pill:
logger.info("poison pill is at %d" % poison_pill)
self.queue.put([-1])
else:
self.queue.put([1, 2, 3, 4, 5])
try:
WakeableQueueWorker.run_workers(self.workers, self.queue, STOP_MSG,
logger)
self.assertTrue(False, "Did not get the expected abort request")
except AbortRequested:
logger.info("Master got AbortRequested, as expected")
for w in self.workers:
self.assertTrue(w.status==WorkerStatus.ABORTED or
w.status==WorkerStatus.ABORT_REQUESTED,
"Worker %s has status %s, expecting an abort" % (w.worker_id, w.status))
logger.info("All workers aborted as expected")
def test_exception_processing(self):
logger.info("Runing test_exception_processing")
poison_pill = random.randint(25, 80)
for i in range(100):
if i == poison_pill:
logger.info("poison pill is at %d" % poison_pill)
self.queue.put([-2])
else:
self.queue.put([1, 2, 3, 4, 5])
try:
WakeableQueueWorker.run_workers(self.workers, self.queue, STOP_MSG,
logger)
self.assertTrue(False, "Did not get the expected abort request")
except AbortRequested:
logger.info("Master got AbortRequested, as expected")
for w in self.workers:
self.assertTrue(w.status==WorkerStatus.ABORTED or
w.status==WorkerStatus.ABORT_REQUESTED,
"Worker %s has status %s, expecting an abort" % (w.worker_id, w.status))
logger.info("All workers aborted as expected")
def test_single_batch(self):
"""Test case where we have two workers, both competing for a single
batch.
"""
logger.info("Runing test_single_batch")
self.workers = [Worker(i, self.queue, True) for i in range(2)]
self.queue.put([1, 2, 3, 4, 5])
WakeableQueueWorker.run_workers(self.workers, self.queue, STOP_MSG,
logger)
for w in self.workers:
self.assertTrue(w.status==WorkerStatus.STOPPED,
"Worker %s not stopped, status was %s" %
(w.worker_id, w.status))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(argv=sys.argv)
| apache-2.0 |
hopeall/odoo | addons/l10n_us/__init__.py | 893 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tavallaie/RoboDynamixel | dxl/dxlregisters.py | 1 | 1221 | #!/usr/bin/env python
# Dynamixel library for MX28 and MX64
# WINDOWS WARNING: For best performance, parameters of the COM Port should be set to maximum baud rate, and 1ms delay (Device Manager, COM Ports, properties, advanced)
class DxlRegister():
def __init__(self,address,size,mode='r',eeprom=False,fromdxl= lambda x: x,todxl= lambda x: x,fromsi=lambda x:x,tosi=lambda x:x,range=None):
self.address=address
self.size=size
self.mode=mode
self.eeprom=eeprom
self.fromdxl=fromdxl
self.todxl=todxl
self.fromsi=fromsi
self.tosi=tosi
self.range=range
class DxlRegisterByte(DxlRegister):
def __init__(self,address,mode='r',eeprom=False,fromsi=lambda x:x,tosi=lambda x:x,range=None):
DxlRegister.__init__(self,address,1,mode,eeprom,fromdxl=lambda x:x[0],todxl=lambda x:[x],range=range,fromsi=fromsi,tosi=tosi)
class DxlRegisterWord(DxlRegister):
def __init__(self,address,mode='r',eeprom=False,fromsi=lambda x:x,tosi=lambda x:x,range=None):
DxlRegister.__init__(self,address,2,mode,eeprom,fromdxl=lambda x:x[0]+(x[1]<<8),todxl=lambda x:[int(x)&0xFF,(int(x)>>8)&0xFF] ,range=range,fromsi=fromsi,tosi=tosi)
| mit |
trmznt/genaf | genaf/views/utils/plot.py | 1 | 3274 |
# general plot / graphics utility using matplotlib
from genaf.views.tools import *
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import pandas
import io, base64
@roles( PUBLIC )
def index(request):
# check
if not request.GET.get('_method', None) in [ '_exec', '_dfexec' ]:
pform, jscode = create_form( request )
return render_to_response('genaf:templates/utils/index.mako',
{ 'title': 'Plotting Utility',
'html': pform,
'code': jscode,
}, request = request )
if request.GET.get('method') == '_dfexec':
df = parse_df(request.GET.get('dfdata'))
else:
df = parse_textdata(request.GET.get('textdata'))
plot_type = request.GET.get('plot_type')
if plot_type == 'B':
html, jscode = column_chart(df)
elif plot_type == 'S':
return error_page(request, 'Scatter plot not implemented yet')
elif plot_type == 'P':
html, jscode = pie_chart(df)
return render_to_response('genaf:templates/utils/index.mako',
{ 'title': 'Plot',
'html': html,
'code': jscode,
}, request = request )
def create_form(request):
""" return html, jscode """
pform = form(name='plotform', action='#')
pform.add(
fieldset(name='data')[
input_textarea('textdata', label='Data'),
],
fieldset(name='options')[
input_select(name='plot_type', label='Plot type', value='B',
options = [ ('B', 'Bar (vertical) / column chart'),
('S', 'Scatter x,y plot'),
('P', 'Pie chart'),
] ),
],
fieldset()[ submit_bar('Create plot', '_exec')]
)
return (pform, '')
def parse_textdata(textdata):
""" parse data, with the first line as header, and consecutive lines as data """
header, content = textdata.split('\n', 1)
columns = [ x.strip() for x in header.split('|') ]
buff = io.StringIO(content)
dataframe = pandas.read_table(buff, header=None, names = columns)
return dataframe
def save_figure(canvas):
figfile = io.BytesIO()
canvas.print_figure(figfile)
figfile.seek(0)
figdata_png = figfile.getvalue()
figdata_png = base64.b64encode(figdata_png).decode('ASCII')
fig_html = literal('<img src="data:image/png;base64,%s" >' % figdata_png)
return fig_html,''
def column_chart(df):
""" creates column (vertical bar) chart """
fig = Figure()
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.bar(df.index, df.iloc[:,1], align='center')
ax.set_xlabel(df.columns[0])
ax.set_xticks(df.index)
ax.set_xticklabels(df.iloc[:,0], rotation='vertical')
ax.set_ylabel(df.columns[1])
fig.tight_layout()
return save_figure(canvas)
def pie_chart(df):
fig = Figure()
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111, aspect=1)
ax.pie( df.iloc[:,1], labels = df.iloc[:,0], counterclock=False, startangle=90 )
ax.set_xlabel(df.columns[0])
fig.tight_layout()
return save_figure(canvas)
| lgpl-3.0 |
yufengg/tensorflow | tensorflow/contrib/labeled_tensor/__init__.py | 144 | 4001 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Labels for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.labeled_tensor.python.ops import core as _core
from tensorflow.contrib.labeled_tensor.python.ops import io_ops as _io_ops
from tensorflow.contrib.labeled_tensor.python.ops import nn
from tensorflow.contrib.labeled_tensor.python.ops import ops as _ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar as _sugar
# pylint: disable=invalid-name
# Core types.
Axis = _core.Axis
Axes = _core.Axes
LabeledTensor = _core.LabeledTensor
as_axis = _core.as_axis
convert_to_labeled_tensor = _core.convert_to_labeled_tensor
identity = _core.identity
slice = _core.slice_function # pylint: disable=redefined-builtin
transpose = _core.transpose
expand_dims = _core.expand_dims
align = _core.align
axis_order_scope = _core.axis_order_scope
check_axis_order = _core.check_axis_order
impose_axis_order = _core.impose_axis_order
AxisOrderError = _core.AxisOrderError
define_unary_op = _core.define_unary_op
define_binary_op = _core.define_binary_op
define_reduce_op = _ops.define_reduce_op
abs = _core.abs_function # pylint: disable=redefined-builtin
neg = _core.neg
sign = _core.sign
reciprocal = _core.reciprocal
square = _core.square
round = _core.round_function # pylint: disable=redefined-builtin
sqrt = _core.sqrt
rsqrt = _core.rsqrt
exp = _core.exp
log = _core.log
ceil = _core.ceil
floor = _core.floor
cos = _core.cos
sin = _core.sin
tan = _core.tan
acos = _core.acos
asin = _core.asin
atan = _core.atan
lgamma = _core.lgamma
digamma = _core.digamma
erf = _core.erf
erfc = _core.erfc
logical_not = _core.logical_not
tanh = _core.tanh
sigmoid = _core.sigmoid
add = _core.add
sub = _core.sub
mul = _core.mul
div = _core.div
mod = _core.mod
pow = _core.pow_function # pylint: disable=redefined-builtin
equal = _core.equal
greater = _core.greater
greater_equal = _core.greater_equal
not_equal = _core.not_equal
less = _core.less
less_equal = _core.less_equal
logical_and = _core.logical_and
logical_or = _core.logical_or
logical_xor = _core.logical_xor
maximum = _core.maximum
minimum = _core.minimum
squared_difference = _core.squared_difference
igamma = _core.igamma
igammac = _core.igammac
zeta = _core.zeta
polygamma = _core.polygamma
select = _ops.select
concat = _ops.concat
pack = _ops.pack
unpack = _ops.unpack
reshape = _ops.reshape
rename_axis = _ops.rename_axis
random_crop = _ops.random_crop
map_fn = _ops.map_fn
foldl = _ops.foldl
squeeze = _ops.squeeze
matmul = _ops.matmul
tile = _ops.tile
pad = _ops.pad
constant = _ops.constant
zeros_like = _ops.zeros_like
ones_like = _ops.ones_like
cast = _ops.cast
verify_tensor_all_finite = _ops.verify_tensor_all_finite
boolean_mask = _ops.boolean_mask
where = _ops.where
reduce_all = _ops.reduce_all
reduce_any = _ops.reduce_any
reduce_logsumexp = _ops.reduce_logsumexp
reduce_max = _ops.reduce_max
reduce_mean = _ops.reduce_mean
reduce_min = _ops.reduce_min
reduce_prod = _ops.reduce_prod
reduce_sum = _ops.reduce_sum
batch = _ops.batch
shuffle_batch = _ops.shuffle_batch
FixedLenFeature = _io_ops.FixedLenFeature
parse_example = _io_ops.parse_example
parse_single_example = _io_ops.parse_single_example
placeholder = _io_ops.placeholder
ReshapeCoder = _sugar.ReshapeCoder
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/confidentialledger/azure-mgmt-confidentialledger/azure/mgmt/confidentialledger/aio/_configuration.py | 1 | 3287 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ConfidentialLedgerConfiguration(Configuration):
"""Configuration for ConfidentialLedger.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ConfidentialLedgerConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-12-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-confidentialledger/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit |
mzupan/Diamond | src/collectors/filestat/test/testfilestat.py | 31 | 1836 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from filestat import FilestatCollector
##########################################################################
class TestFilestatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('FilestatCollector', {
'interval': 10
})
self.collector = FilestatCollector(config, None)
def test_import(self):
self.assertTrue(FilestatCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_sys_fs_file_nr(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/sys/fs/file-nr')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
FilestatCollector.PROC = self.getFixturePath('proc_sys_fs_file-nr')
self.collector.collect()
metrics = {
'assigned': 576,
'unused': 0,
'max': 4835852
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit |
RO-ny9/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_build_ext.py | 46 | 17712 | import sys
import os
from io import StringIO
import textwrap
from distutils.core import Distribution
from distutils.command.build_ext import build_ext
from distutils import sysconfig
from distutils.tests.support import (TempdirManager, LoggingSilencer,
copy_xxmodule_c, fixup_build_ext)
from distutils.extension import Extension
from distutils.errors import (
CompileError, DistutilsPlatformError, DistutilsSetupError,
UnknownFileError)
import unittest
from test import support
# http://bugs.python.org/issue4373
# Don't load the xx module more than once.
ALREADY_TESTED = False
class BuildExtTestCase(TempdirManager,
LoggingSilencer,
unittest.TestCase):
def setUp(self):
# Create a simple test environment
# Note that we're making changes to sys.path
super(BuildExtTestCase, self).setUp()
self.tmp_dir = self.mkdtemp()
self.sys_path = sys.path, sys.path[:]
sys.path.append(self.tmp_dir)
if sys.version > "2.6":
import site
self.old_user_base = site.USER_BASE
site.USER_BASE = self.mkdtemp()
from distutils.command import build_ext
build_ext.USER_BASE = site.USER_BASE
def test_build_ext(self):
global ALREADY_TESTED
copy_xxmodule_c(self.tmp_dir)
xx_c = os.path.join(self.tmp_dir, 'xxmodule.c')
xx_ext = Extension('xx', [xx_c])
dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]})
dist.package_dir = self.tmp_dir
cmd = build_ext(dist)
fixup_build_ext(cmd)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
old_stdout = sys.stdout
if not support.verbose:
# silence compiler output
sys.stdout = StringIO()
try:
cmd.ensure_finalized()
cmd.run()
finally:
sys.stdout = old_stdout
if ALREADY_TESTED:
return
else:
ALREADY_TESTED = True
import xx
for attr in ('error', 'foo', 'new', 'roj'):
self.assertTrue(hasattr(xx, attr))
self.assertEqual(xx.foo(2, 5), 7)
self.assertEqual(xx.foo(13,15), 28)
self.assertEqual(xx.new().demo(), None)
doc = 'This is a template module just for instruction.'
self.assertEqual(xx.__doc__, doc)
self.assertTrue(isinstance(xx.Null(), xx.Null))
self.assertTrue(isinstance(xx.Str(), xx.Str))
def tearDown(self):
# Get everything back to normal
support.unload('xx')
sys.path = self.sys_path[0]
sys.path[:] = self.sys_path[1]
if sys.version > "2.6":
import site
site.USER_BASE = self.old_user_base
from distutils.command import build_ext
build_ext.USER_BASE = self.old_user_base
super(BuildExtTestCase, self).tearDown()
def test_solaris_enable_shared(self):
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
old = sys.platform
sys.platform = 'sunos' # fooling finalize_options
from distutils.sysconfig import _config_vars
old_var = _config_vars.get('Py_ENABLE_SHARED')
_config_vars['Py_ENABLE_SHARED'] = 1
try:
cmd.ensure_finalized()
finally:
sys.platform = old
if old_var is None:
del _config_vars['Py_ENABLE_SHARED']
else:
_config_vars['Py_ENABLE_SHARED'] = old_var
# make sure we get some library dirs under solaris
self.assertTrue(len(cmd.library_dirs) > 0)
def test_user_site(self):
# site.USER_SITE was introduced in 2.6
if sys.version < '2.6':
return
import site
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
# making sure the user option is there
options = [name for name, short, lable in
cmd.user_options]
self.assertTrue('user' in options)
# setting a value
cmd.user = 1
# setting user based lib and include
lib = os.path.join(site.USER_BASE, 'lib')
incl = os.path.join(site.USER_BASE, 'include')
os.mkdir(lib)
os.mkdir(incl)
# let's run finalize
cmd.ensure_finalized()
# see if include_dirs and library_dirs
# were set
self.assertIn(lib, cmd.library_dirs)
self.assertIn(lib, cmd.rpath)
self.assertIn(incl, cmd.include_dirs)
def test_optional_extension(self):
# this extension will fail, but let's ignore this failure
# with the optional argument.
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.ensure_finalized()
self.assertRaises((UnknownFileError, CompileError),
cmd.run) # should raise an error
modules = [Extension('foo', ['xxx'], optional=True)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.ensure_finalized()
cmd.run() # should pass
def test_finalize_options(self):
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.finalize_options()
from distutils import sysconfig
py_include = sysconfig.get_python_inc()
self.assertTrue(py_include in cmd.include_dirs)
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
self.assertTrue(plat_py_include in cmd.include_dirs)
# make sure cmd.libraries is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.libraries = 'my_lib'
cmd.finalize_options()
self.assertEqual(cmd.libraries, ['my_lib'])
# make sure cmd.library_dirs is turned into a list
# if it's a string
cmd = build_ext(dist)
cmd.library_dirs = 'my_lib_dir'
cmd.finalize_options()
self.assertTrue('my_lib_dir' in cmd.library_dirs)
# make sure rpath is turned into a list
# if it's a list of os.pathsep's paths
cmd = build_ext(dist)
cmd.rpath = os.pathsep.join(['one', 'two'])
cmd.finalize_options()
self.assertEqual(cmd.rpath, ['one', 'two'])
# XXX more tests to perform for win32
# make sure define is turned into 2-tuples
# strings if they are ','-separated strings
cmd = build_ext(dist)
cmd.define = 'one,two'
cmd.finalize_options()
self.assertEqual(cmd.define, [('one', '1'), ('two', '1')])
# make sure undef is turned into a list of
# strings if they are ','-separated strings
cmd = build_ext(dist)
cmd.undef = 'one,two'
cmd.finalize_options()
self.assertEqual(cmd.undef, ['one', 'two'])
# make sure swig_opts is turned into a list
cmd = build_ext(dist)
cmd.swig_opts = None
cmd.finalize_options()
self.assertEqual(cmd.swig_opts, [])
cmd = build_ext(dist)
cmd.swig_opts = '1 2'
cmd.finalize_options()
self.assertEqual(cmd.swig_opts, ['1', '2'])
def test_check_extensions_list(self):
dist = Distribution()
cmd = build_ext(dist)
cmd.finalize_options()
#'extensions' option must be a list of Extension instances
self.assertRaises(DistutilsSetupError,
cmd.check_extensions_list, 'foo')
# each element of 'ext_modules' option must be an
# Extension instance or 2-tuple
exts = [('bar', 'foo', 'bar'), 'foo']
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# first element of each tuple in 'ext_modules'
# must be the extension name (a string) and match
# a python dotted-separated name
exts = [('foo-bar', '')]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# second element of each tuple in 'ext_modules'
# must be a ary (build info)
exts = [('foo.bar', '')]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
# ok this one should pass
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
'some': 'bar'})]
cmd.check_extensions_list(exts)
ext = exts[0]
self.assertTrue(isinstance(ext, Extension))
# check_extensions_list adds in ext the values passed
# when they are in ('include_dirs', 'library_dirs', 'libraries'
# 'extra_objects', 'extra_compile_args', 'extra_link_args')
self.assertEqual(ext.libraries, 'foo')
self.assertTrue(not hasattr(ext, 'some'))
# 'macros' element of build info dict must be 1- or 2-tuple
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
'some': 'bar', 'macros': [('1', '2', '3'), 'foo']})]
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
exts[0][1]['macros'] = [('1', '2'), ('3',)]
cmd.check_extensions_list(exts)
self.assertEqual(exts[0].undef_macros, ['3'])
self.assertEqual(exts[0].define_macros, [('1', '2')])
def test_get_source_files(self):
modules = [Extension('foo', ['xxx'], optional=False)]
dist = Distribution({'name': 'xx', 'ext_modules': modules})
cmd = build_ext(dist)
cmd.ensure_finalized()
self.assertEqual(cmd.get_source_files(), ['xxx'])
def test_compiler_option(self):
# cmd.compiler is an option and
# should not be overriden by a compiler instance
# when the command is run
dist = Distribution()
cmd = build_ext(dist)
cmd.compiler = 'unix'
cmd.ensure_finalized()
cmd.run()
self.assertEqual(cmd.compiler, 'unix')
def test_get_outputs(self):
tmp_dir = self.mkdtemp()
c_file = os.path.join(tmp_dir, 'foo.c')
self.write_file(c_file, 'void PyInit_foo(void) {}\n')
ext = Extension('foo', [c_file], optional=False)
dist = Distribution({'name': 'xx',
'ext_modules': [ext]})
cmd = build_ext(dist)
fixup_build_ext(cmd)
cmd.ensure_finalized()
self.assertEqual(len(cmd.get_outputs()), 1)
cmd.build_lib = os.path.join(self.tmp_dir, 'build')
cmd.build_temp = os.path.join(self.tmp_dir, 'tempt')
# issue #5977 : distutils build_ext.get_outputs
# returns wrong result with --inplace
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
cmd.inplace = 1
cmd.run()
so_file = cmd.get_outputs()[0]
finally:
os.chdir(old_wd)
self.assertTrue(os.path.exists(so_file))
so_ext = sysconfig.get_config_var('SO')
self.assertTrue(so_file.endswith(so_ext))
so_dir = os.path.dirname(so_file)
self.assertEqual(so_dir, other_tmp_dir)
cmd.inplace = 0
cmd.compiler = None
cmd.run()
so_file = cmd.get_outputs()[0]
self.assertTrue(os.path.exists(so_file))
self.assertTrue(so_file.endswith(so_ext))
so_dir = os.path.dirname(so_file)
self.assertEqual(so_dir, cmd.build_lib)
# inplace = 0, cmd.package = 'bar'
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {'': 'bar'}
path = cmd.get_ext_fullpath('foo')
# checking that the last directory is the build_dir
path = os.path.split(path)[0]
self.assertEqual(path, cmd.build_lib)
# inplace = 1, cmd.package = 'bar'
cmd.inplace = 1
other_tmp_dir = os.path.realpath(self.mkdtemp())
old_wd = os.getcwd()
os.chdir(other_tmp_dir)
try:
path = cmd.get_ext_fullpath('foo')
finally:
os.chdir(old_wd)
# checking that the last directory is bar
path = os.path.split(path)[0]
lastdir = os.path.split(path)[-1]
self.assertEqual(lastdir, 'bar')
def test_ext_fullpath(self):
ext = sysconfig.get_config_vars()['SO']
# building lxml.etree inplace
#etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
#etree_ext = Extension('lxml.etree', [etree_c])
#dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]})
dist = Distribution()
cmd = build_ext(dist)
cmd.inplace = 1
cmd.distribution.package_dir = {'': 'src'}
cmd.distribution.packages = ['lxml', 'lxml.html']
curdir = os.getcwd()
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
# building lxml.etree not inplace
cmd.inplace = 0
cmd.build_lib = os.path.join(curdir, 'tmpdir')
wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext)
path = cmd.get_ext_fullpath('lxml.etree')
self.assertEqual(wanted, path)
# building twisted.runner.portmap not inplace
build_py = cmd.get_finalized_command('build_py')
build_py.package_dir = {}
cmd.distribution.packages = ['twisted', 'twisted.runner.portmap']
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner',
'portmap' + ext)
self.assertEqual(wanted, path)
# building twisted.runner.portmap inplace
cmd.inplace = 1
path = cmd.get_ext_fullpath('twisted.runner.portmap')
wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext)
self.assertEqual(wanted, path)
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_default(self):
# Issue 9516: Test that, in the absence of the environment variable,
# an extension module is compiled with the same deployment target as
# the interpreter.
self._try_compile_deployment_target('==', None)
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_too_low(self):
# Issue 9516: Test that an extension module is not allowed to be
# compiled with a deployment target less than that of the interpreter.
self.assertRaises(DistutilsPlatformError,
self._try_compile_deployment_target, '>', '10.1')
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
def test_deployment_target_higher_ok(self):
# Issue 9516: Test that an extension module can be compiled with a
# deployment target higher than that of the interpreter: the ext
# module may depend on some newer OS feature.
deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if deptarget:
# increment the minor version number (i.e. 10.6 -> 10.7)
deptarget = [int(x) for x in deptarget.split('.')]
deptarget[-1] += 1
deptarget = '.'.join(str(i) for i in deptarget)
self._try_compile_deployment_target('<', deptarget)
def _try_compile_deployment_target(self, operator, target):
orig_environ = os.environ
os.environ = orig_environ.copy()
self.addCleanup(setattr, os, 'environ', orig_environ)
if target is None:
if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):
del os.environ['MACOSX_DEPLOYMENT_TARGET']
else:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c')
with open(deptarget_c, 'w') as fp:
fp.write(textwrap.dedent('''\
#include <AvailabilityMacros.h>
int dummy;
#if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED
#else
#error "Unexpected target"
#endif
''' % operator))
# get the deployment target that the interpreter was built with
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
target = tuple(map(int, target.split('.')))
target = '%02d%01d0' % target
deptarget_ext = Extension(
'deptarget',
[deptarget_c],
extra_compile_args=['-DTARGET=%s'%(target,)],
)
dist = Distribution({
'name': 'deptarget',
'ext_modules': [deptarget_ext]
})
dist.package_dir = self.tmp_dir
cmd = build_ext(dist)
cmd.build_lib = self.tmp_dir
cmd.build_temp = self.tmp_dir
try:
old_stdout = sys.stdout
if not support.verbose:
# silence compiler output
sys.stdout = StringIO()
try:
cmd.ensure_finalized()
cmd.run()
finally:
sys.stdout = old_stdout
except CompileError:
self.fail("Wrong deployment target during compilation")
def test_suite():
return unittest.makeSuite(BuildExtTestCase)
if __name__ == '__main__':
support.run_unittest(test_suite())
| apache-2.0 |
Osndok/zim-desktop-wiki | zim/plugins/tasklist/__init__.py | 1 | 8109 | # -*- coding: utf-8 -*-
# Copyright 2009-2017 Jaap Karssenberg <jaap.karssenberg@gmail.com>
# TODO: allow more complex queries for filter, in particular (NOT tag AND tag)
# allow multiple tabs in dialog / side pane with configurable query
#
# TODO: add an interface for this plugin in the WWW frontend
#
# TODO: commandline option
# - open dialog
# - output to stdout with configurable format
# - force update, intialization
#
# TODO: store parser settings in notebook, not in preferences
# in dialog make it clear what is per notebook and what is user prefs
# tab in properties, link to open that from plugin prefs ?
# TODO: test coverage for the start date label (and due with "<")
# TODO: test coverage for start / due date from calendar page
# TODO: test coverage for sorting in list_open_tasks
# TODO: test coverage include / exclude sections
# TODO: update manual
from __future__ import with_statement
from zim.plugins import PluginClass, extends, ObjectExtension, WindowExtension
from zim.actions import action
from zim.config import StringAllowEmpty
from zim.signals import DelayedCallback
from zim.gui.widgets import RIGHT_PANE, PANE_POSITIONS
from .indexer import TasksIndexer, TasksView
from .gui import TaskListDialog, TaskListWidget
class TaskListPlugin(PluginClass):
plugin_info = {
'name': _('Task List'), # T: plugin name
'description': _('''\
This plugin adds a dialog showing all open tasks in
this notebook. Open tasks can be either open checkboxes
or items marked with tags like "TODO" or "FIXME".
This is a core plugin shipping with zim.
'''), # T: plugin description
'author': 'Jaap Karssenberg',
'help': 'Plugins:Task List'
}
parser_preferences = (
# key, type, label, default
('all_checkboxes', 'bool', _('Consider all checkboxes as tasks'), True),
# T: label for plugin preferences dialog
('labels', 'string', _('Labels marking tasks'), 'FIXME, TODO', StringAllowEmpty),
# T: label for plugin preferences dialog - labels are e.g. "FIXME", "TODO"
('integrate_with_journal', 'choice', _('Use date from journal pages'), 'start', (
('none', _('do not use')), # T: choice for "Use date from journal pages"
('start', _('as start date for tasks')), # T: choice for "Use date from journal pages"
('due', _('as due date for tasks')) # T: choice for "Use date from journal pages"
)),
('included_subtrees', 'string', _('Section(s) to index'), '', StringAllowEmpty),
# T: Notebook sections to search for tasks - default is the whole tree (empty string means everything)
('excluded_subtrees', 'string', _('Section(s) to ignore'), '', StringAllowEmpty),
# T: Notebook sections to exclude when searching for tasks - default is none
)
plugin_preferences = (
# key, type, label, default
('embedded', 'bool', _('Show tasklist in sidepane'), False),
# T: preferences option
('pane', 'choice', _('Position in the window'), RIGHT_PANE, PANE_POSITIONS),
# T: preferences option
) + parser_preferences + (
('nonactionable_tags', 'string', _('Tags for non-actionable tasks'), '', StringAllowEmpty),
# T: label for plugin preferences dialog
('tag_by_page', 'bool', _('Turn page name into tags for task items'), False),
# T: label for plugin preferences dialog
('use_workweek', 'bool', _('Flag tasks due on Monday or Tuesday before the weekend'), False),
# T: label for plugin preferences dialog
)
hide_preferences = ('nonactionable_tags', 'tag_by_page', 'use_workweek')
# These are deprecated, but I don't dare to remove them yet
# so hide them in the configuration dialog instead
@extends('Notebook')
class NotebookExtension(ObjectExtension):
__signals__ = {
'tasklist-changed': (None, None, ()),
}
def __init__(self, plugin, notebook):
ObjectExtension.__init__(self, plugin, notebook)
self.notebook = notebook
self._parser_key = self._get_parser_key()
self.index = notebook.index
if self.index.get_property(TasksIndexer.PLUGIN_NAME) != TasksIndexer.PLUGIN_DB_FORMAT:
self.index._db.executescript(TasksIndexer.TEARDOWN_SCRIPT) # XXX
self.index.flag_reindex()
self.indexer = None
self._setup_indexer(self.index, self.index.update_iter)
self.connectto(self.index, 'new-update-iter', self._setup_indexer)
self.connectto(plugin.preferences, 'changed', self.on_preferences_changed)
def _setup_indexer(self, index, update_iter):
if self.indexer is not None:
self.disconnect_from(self.indexer)
self.indexer.disconnect_all()
self.indexer = TasksIndexer.new_from_index(index, self.plugin.preferences)
update_iter.add_indexer(self.indexer)
self.connectto(self.indexer, 'tasklist-changed')
def on_preferences_changed(self, preferences):
# Need to construct new parser, re-index pages
if self._parser_key != self._get_parser_key():
self._parser_key = self._get_parser_key()
self.disconnect_from(self.indexer)
self.indexer.disconnect_all()
self.indexer = TasksIndexer.new_from_index(self.index, preferences)
self.index.flag_reindex()
self.connectto(self.indexer, 'tasklist-changed')
def on_tasklist_changed(self, indexer):
self.emit('tasklist-changed')
def _get_parser_key(self):
return tuple(
self.plugin.preferences[t[0]]
for t in self.plugin.parser_preferences
)
def teardown(self):
self.indexer.disconnect_all()
self.notebook.index.update_iter.remove_indexer(self.indexer)
self.index._db.executescript(TasksIndexer.TEARDOWN_SCRIPT) # XXX
self.index.set_property(TasksIndexer.PLUGIN_NAME, None)
@extends('MainWindow')
class MainWindowExtension(WindowExtension):
uimanager_xml = '''
<ui>
<menubar name='menubar'>
<menu action='view_menu'>
<placeholder name="plugin_items">
<menuitem action="show_task_list" />
</placeholder>
</menu>
</menubar>
<toolbar name='toolbar'>
<placeholder name='tools'>
<toolitem action='show_task_list'/>
</placeholder>
</toolbar>
</ui>
'''
def __init__(self, plugin, window):
WindowExtension.__init__(self, plugin, window)
self._widget = None
self.on_preferences_changed(plugin.preferences)
self.connectto(plugin.preferences, 'changed', self.on_preferences_changed)
@action(_('Task List'), stock='zim-task-list', readonly=True) # T: menu item
def show_task_list(self):
# TODO: add check + dialog for index probably_up_to_date
index = self.window.ui.notebook.index # XXX
tasksview = TasksView.new_from_index(index)
dialog = TaskListDialog.unique(self, self.window, tasksview, self.plugin.preferences)
dialog.present()
def on_preferences_changed(self, preferences):
if preferences['embedded']:
if self._widget is None:
self._init_widget()
else:
self._widget.task_list.refresh()
try:
self.window.remove(self._widget)
except ValueError:
pass
self.window.add_tab(_('Tasks'), self._widget, preferences['pane'])
# T: tab label for side pane
self._widget.show_all()
else:
if self._widget:
self.window.remove(self._widget)
self._widget = None
def _init_widget(self):
index = self.window.ui.notebook.index # XXX
tasksview = TasksView.new_from_index(index)
opener = self.window.get_resource_opener()
uistate = self.window.ui.uistate['TaskListSidePane']
self._widget = TaskListWidget(tasksview, opener, self.plugin.preferences, uistate)
def on_tasklist_changed(o):
self._widget.task_list.refresh()
callback = DelayedCallback(10, on_tasklist_changed)
# Don't really care about the delay, but want to
# make it less blocking - now it is at least on idle
### XXX HACK to get dependency to connect to
### -- no access to plugin, so can;t use get_extension()
## -- duplicat of this snippet in TaskListDialog
for e in self.window.ui.notebook.__zim_extension_objects__:
if hasattr(e, 'indexer') and e.indexer.__class__.__name__ == 'TasksIndexer':
self.connectto(e, 'tasklist-changed', callback)
break
else:
raise AssertionError('Could not find tasklist notebook extension')
def teardown(self):
if self._widget:
self.window.remove(self._widget)
self._widget = None
| gpl-2.0 |
dchen1107/test-infra | kettle/make_json.py | 3 | 7347 | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import hashlib
import logging
import json
import os
import sqlite3
import subprocess
import sys
import time
try:
import defusedxml.ElementTree as ET
except ImportError:
import xml.etree.cElementTree as ET
import model
def parse_junit(xml):
"""Generate failed tests as a series of dicts. Ignore skipped tests."""
# NOTE: this is modified from gubernator/view_build.py
tree = ET.fromstring(xml)
def make_result(name, time, failure_text, skipped):
if failure_text:
if time is None:
return {'name': name, 'failed': True, 'failure_text': failure_text}
else:
return {'name': name, 'time': time, 'failed': True, 'failure_text': failure_text}
else:
if time is None:
return {'name': name}
else:
return {'name': name, 'time': time}
# Note: skipped tests are ignored because they make rows too large for BigQuery.
# Knowing that a given build could have ran a test but didn't for some reason isn't very interesting.
if tree.tag == 'testsuite':
for child in tree:
name = child.attrib['name']
time = float(child.attrib['time'] or 0)
failure_text = None
for param in child.findall('failure'):
failure_text = param.text
skipped = child.findall('skipped')
if skipped:
continue
yield make_result(name, time, failure_text, skipped)
elif tree.tag == 'testsuites':
for testsuite in tree:
suite_name = testsuite.attrib['name']
for child in testsuite.findall('testcase'):
name = '%s %s' % (suite_name, child.attrib['name'])
time = float(child.attrib['time'] or 0)
failure_text = None
for param in child.findall('failure'):
failure_text = param.text
skipped = child.findall('skipped')
if skipped:
continue
yield make_result(name, time, failure_text, skipped)
else:
logging.error('unable to find failures, unexpected tag %s', tree.tag)
# pypy compatibility hack
BUCKETS = json.loads(subprocess.check_output(
['python2', '-c', 'import json,yaml; print json.dumps(yaml.load(open("../buckets.yaml")))'],
cwd=os.path.dirname(os.path.abspath(__file__))))
def path_to_job_and_number(path):
assert not path.endswith('/')
for bucket, meta in BUCKETS.iteritems():
if path.startswith(bucket):
prefix = meta['prefix']
break
else:
if path.startswith('gs://kubernetes-jenkins/pr-logs'):
prefix = 'pr:'
else:
raise ValueError('unknown build path')
build = os.path.basename(path)
job = prefix + os.path.basename(os.path.dirname(path))
try:
return job, int(build)
except ValueError:
return job, None
def row_for_build(path, started, finished, results):
tests = []
for result in results:
tests.extend(parse_junit(result))
build = {
'path': path,
'test': tests,
'tests_run': len(tests),
'tests_failed': sum(t.get('failed', 0) for t in tests)
}
job, number = path_to_job_and_number(path)
build['job'] = job
if number:
build['number'] = number
if started:
build['started'] = int(started['timestamp'])
if 'jenkins-node' in started:
build['executor'] = started['jenkins-node']
if 'node' in started:
build['executor'] = started['node']
if finished:
build['finished'] = int(finished['timestamp'])
build['result'] = finished['result']
if 'version' in finished:
build['version'] = finished['version']
def get_metadata():
metadata = None
if finished and 'metadata' in finished:
metadata = finished['metadata']
elif started:
metadata = started.get('metadata')
if metadata:
# clean useless/duplicated metadata fields
if 'repo' in metadata and not metadata['repo']:
metadata.pop('repo')
build_version = build.get('version', 'N/A')
if metadata.get('job-version') == build_version:
metadata.pop('job-version')
if metadata.get('version') == build_version:
metadata.pop('version')
for k, v in metadata.items():
if not isinstance(v, basestring):
# the schema specifies a string value. force it!
metadata[k] = json.dumps(v)
if not metadata:
return None
return [{'key': k, 'value': v} for k, v in sorted(metadata.items())]
metadata = get_metadata()
if metadata:
build['metadata'] = metadata
if started and finished:
build['elapsed'] = build['finished'] - build['started']
return build
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--day', action='store_true',
help='Grab data for builds in the last day')
parser.add_argument('--days', type=float,
help='Grab data for builds within N days')
parser.add_argument('--reset-emitted', action='store_true',
help='Clear list of already-emitted builds.')
return parser.parse_args(args)
def main(db, opts, outfile):
incremental_table = 'build_emitted'
min_started = None
if opts.day or opts.days:
min_started = time.time() - (opts.days or 1) * 24 * 60 * 60
incremental_table = ('build_emitted_%g' % (opts.days or 1)).replace('.', '_')
if opts.reset_emitted:
db.reset_emitted(incremental_table)
builds = db.get_builds(min_started=min_started, incremental_table=incremental_table)
rows_emitted = set()
for rowid, path, started, finished in builds:
try:
results = db.test_results_for_build(path)
row = row_for_build(path, started, finished, results)
json.dump(row, outfile, sort_keys=True)
outfile.write('\n')
rows_emitted.add(rowid)
except IOError:
return
except:
logging.exception('error on %s', path)
if rows_emitted:
gen = db.insert_emitted(rows_emitted, incremental_table=incremental_table)
print >>sys.stderr, 'incremental progress gen #%d' % gen
else:
print >>sys.stderr, 'no rows emitted'
if __name__ == '__main__':
db = model.Database('build.db')
opts = parse_args(sys.argv[1:])
main(db, opts, sys.stdout)
| apache-2.0 |
jxzhxch/androguard | androdd.py | 37 | 9441 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012/2013, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import shutil
import sys
import os
import re
from optparse import OptionParser
from androguard.core.androgen import Androguard
from androguard.core import androconf
from androguard.core.analysis import analysis
from androguard.core.bytecodes import dvm
from androguard.core.bytecode import method2dot, method2format
from androguard.decompiler import decompiler
option_0 = { 'name' : ('-i', '--input'), 'help' : 'file : use this filename', 'nargs' : 1 }
option_1 = { 'name' : ('-o', '--output'), 'help' : 'base directory to output all files', 'nargs' : 1 }
option_2 = { 'name' : ('-d', '--decompiler'), 'help' : 'choose a decompiler', 'nargs' : 1 }
option_3 = { 'name' : ('-j', '--jar'), 'help' : 'output jar file', 'action' : 'count' }
option_4 = { 'name' : ('-f', '--format'), 'help' : 'write the method in specific format (png, ...)', 'nargs' : 1 }
option_5 = { 'name' : ('-l', '--limit'), 'help' : 'limit analysis to specific methods/classes by using a regexp', 'nargs' : 1}
option_6 = { 'name' : ('-v', '--version'), 'help' : 'version of the API', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4, option_5, option_6]
def valid_class_name(class_name):
if class_name[-1] == ";":
return class_name[1:-1]
return class_name
def create_directory(class_name, output):
output_name = output
if output_name[-1] != "/":
output_name = output_name + "/"
pathdir = output_name + class_name
try:
if not os.path.exists(pathdir):
os.makedirs(pathdir)
except OSError:
# FIXME
pass
def export_apps_to_format(filename, a, output, methods_filter=None, jar=None, decompiler_type=None, format=None):
print "Dump information %s in %s" % (filename, output)
if not os.path.exists(output):
print "Create directory %s" % output
os.makedirs(output)
else:
print "Clean directory %s" % output
androconf.rrmdir(output)
os.makedirs(output)
methods_filter_expr = None
if methods_filter:
methods_filter_expr = re.compile(methods_filter)
output_name = output
if output_name[-1] != "/":
output_name = output_name + "/"
dump_classes = []
for vm in a.get_vms():
print "Analysis ...",
sys.stdout.flush()
vmx = analysis.VMAnalysis(vm)
print "End"
print "Decompilation ...",
sys.stdout.flush()
if not decompiler_type:
vm.set_decompiler(decompiler.DecompilerDAD(vm, vmx))
elif decompiler_type == "dex2jad":
vm.set_decompiler(decompiler.DecompilerDex2Jad(vm,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_JAD"],
androconf.CONF["BIN_JAD"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler_type == "dex2winejad":
vm.set_decompiler(decompiler.DecompilerDex2WineJad(vm,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_JAD"],
androconf.CONF["BIN_WINEJAD"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler_type == "ded":
vm.set_decompiler(decompiler.DecompilerDed(vm,
androconf.CONF["PATH_DED"],
androconf.CONF["BIN_DED"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler_type == "dex2fernflower":
vm.set_decompiler(decompiler.DecompilerDex2Fernflower(vm,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_FERNFLOWER"],
androconf.CONF["BIN_FERNFLOWER"],
androconf.CONF["OPTIONS_FERNFLOWER"],
androconf.CONF["TMP_DIRECTORY"]))
else:
raise("invalid decompiler !")
print "End"
if options.jar:
print "jar ...",
filenamejar = decompiler.Dex2Jar(vm,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["TMP_DIRECTORY"]).get_jar()
shutil.move(filenamejar, output + "classes.jar")
print "End"
for method in vm.get_methods():
if methods_filter_expr:
msig = "%s%s%s" % (method.get_class_name(),
method.get_name(),
method.get_descriptor())
if not methods_filter_expr.search(msig):
continue
filename_class = valid_class_name(method.get_class_name())
create_directory(filename_class, output)
print "Dump %s %s %s ..." % (method.get_class_name(),
method.get_name(),
method.get_descriptor()),
filename_class = output_name + filename_class
if filename_class[-1] != "/":
filename_class = filename_class + "/"
descriptor = method.get_descriptor()
descriptor = descriptor.replace(";", "")
descriptor = descriptor.replace(" ", "")
descriptor = descriptor.replace("(", "-")
descriptor = descriptor.replace(")", "-")
descriptor = descriptor.replace("/", "_")
filename = filename_class + method.get_name() + descriptor
if len(method.get_name() + descriptor) > 250:
all_identical_name_methods = vm.get_methods_descriptor(method.get_class_name(), method.get_name())
pos = 0
for i in all_identical_name_methods:
if i.get_descriptor() == method.get_descriptor():
break
pos += 1
filename = filename_class + method.get_name() + "_%d" % pos
buff = method2dot(vmx.get_method(method))
if format:
print "%s ..." % format,
method2format(filename + "." + format, format, None, buff)
if method.get_class_name() not in dump_classes:
print "source codes ...",
current_class = vm.get_class(method.get_class_name())
current_filename_class = valid_class_name(current_class.get_name())
create_directory(filename_class, output)
current_filename_class = output_name + current_filename_class + ".java"
fd = open(current_filename_class, "w")
fd.write(current_class.get_source())
fd.close()
dump_classes.append(method.get_class_name())
print "bytecodes ...",
bytecode_buff = dvm.get_bytecodes_method(vm, vmx, method)
fd = open(filename + ".ag", "w")
fd.write(bytecode_buff)
fd.close()
print
def main(options, arguments):
if options.input != None and options.output != None:
a = Androguard([options.input])
export_apps_to_format(options.input, a, options.output, options.limit, options.jar, options.decompiler, options.format)
elif options.version != None:
print "Androdd version %s" % androconf.ANDROGUARD_VERSION
else:
print "Please, specify an input file and an output directory"
if __name__ == "__main__":
parser = OptionParser()
for option in options:
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
| apache-2.0 |
psolstice/zcoin | qa/rpc-tests/getblocktemplate_proposals.py | 101 | 5726 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytearray(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return bytearray(blk)
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytearray(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(BitcoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
def run_test(self):
node = self.nodes[0]
node.generate(1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = 0xff
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytearray(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
| mit |
turdusmerula/kipartman | kipartbase/swagger_server/controllers/controller_upload_file.py | 1 | 1796 | import connexion
from swagger_server.models.upload_file import UploadFile
from swagger_server.models.upload_file_data import UploadFileData
from swagger_server.models.error import Error
from datetime import date, datetime
from typing import List, Dict
from six import iteritems
from ..util import deserialize_date, deserialize_datetime
import api.models
import api.file_storage
from os.path import expanduser
home = expanduser("~")
def serialize_UploadFileData(fupload_file, upload_file=None):
if upload_file is None:
upload_file = UploadFileData()
upload_file.source_name = fupload_file.source_name
upload_file.storage_path = fupload_file.storage_path
return upload_file
def serialize_UploadFile(fupload_file, upload_file=None):
if upload_file is None:
upload_file = UploadFile()
upload_file.id = fupload_file.id
serialize_UploadFileData(fupload_file, upload_file)
return upload_file
def add_upload_file(upfile=None, description=None):
"""
add_upload_file
Upload a file.
:param upfile: The file to upload.
:type upfile: werkzeug.datastructures.FileStorage
:param description: The file to upload.
:type description: str
:rtype: UploadFile
"""
storage = api.file_storage.FileStorage()
fupload_file = storage.add_file(upfile)
return serialize_UploadFile(fupload_file)
def find_upload_file(upload_file_id):
"""
find_upload_file
Return a file
:param upload_file_id: File id
:type upload_file_id: int
:rtype: UploadFile
"""
try:
fupload_file = api.models.File.objects.get(id=upload_file_id)
except:
return Error(code=1000, message='File %d does not exists'%upload_file_id), 403
return serialize_UploadFile(fupload_file)
| gpl-3.0 |
frreiss/tensorflow-fred | tensorflow/lite/testing/op_tests/neg.py | 17 | 1918 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for neg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_neg_tests(options):
"""Make a set of tests to do neg."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [5], []],
}]
def build_graph(parameters):
"""Build the neg op testing graph."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.negative(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| apache-2.0 |
mikelolasagasti/revelation | src/lib/ui.py | 1 | 48073 | #
# Revelation - a password manager for GNOME 2
# http://oss.codepoet.no/revelation/
# $Id$
#
# Module for UI functionality
#
#
# Copyright (c) 2003-2006 Erik Grinaker
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from . import config, data, dialog, entry, io, util
import gettext
import time
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import GObject, Gtk, Gdk, Gio, Pango # noqa: E402
_ = gettext.gettext
STOCK_CONTINUE = _("_Continue") # "revelation-continue"
STOCK_DISCARD = "revelation-discard"
STOCK_EDIT = "revelation-edit"
STOCK_EXPORT = _("_Export") # "revelation-export"
STOCK_FOLDER = "revelation-folder"
STOCK_GENERATE = _("_Generate") # "revelation-generate"
STOCK_IMPORT = _("_Import") # "revelation-import"
STOCK_GOTO = "revelation-goto"
STOCK_LOCK = "revelation-lock"
STOCK_NEW_ENTRY = _("_Add Entry") # "revelation-new-entry"
STOCK_NEW_FOLDER = _("_Add Folder") # "revelation-new-folder"
STOCK_NEXT = "go-down" # "revelation-next"
STOCK_PASSWORD_CHANGE = _("_Change") # "revelation-password-change"
STOCK_PASSWORD_CHECK = "revelation-password-check" # nosec
STOCK_PASSWORD_STRONG = "security-high" # nosec "revelation-password-strong"
STOCK_PASSWORD_WEAK = "security-low" # nosec "revelation-password-weak"
STOCK_PREVIOUS = "go-up" # "revelation-previous"
STOCK_RELOAD = _("_Reload") # "revelation-reload"
STOCK_REMOVE = "revelation-remove"
STOCK_REPLACE = _("_Replace") # "revelation-replace"
STOCK_UNKNOWN = "dialog-question" # "revelation-unknown"
STOCK_UNLOCK = _("_Unlock") # "revelation-unlock"
STOCK_UPDATE = _("_Update") # "revelation-update"
STOCK_ENTRY_FOLDER = "folder" # "revelation-account-folder"
STOCK_ENTRY_FOLDER_OPEN = "folder-open" # "revelation-account-folder-open"
STOCK_ENTRY_CREDITCARD = "x-office-contact" # "revelation-account-creditcard"
STOCK_ENTRY_CRYPTOKEY = "dialog-password" # "revelation-account-cryptokey"
STOCK_ENTRY_DATABASE = "server-database" # "revelation-account-database"
STOCK_ENTRY_DOOR = "changes-allow" # "revelation-account-door"
STOCK_ENTRY_EMAIL = "emblem-mail" # "revelation-account-email"
STOCK_ENTRY_FTP = "system-file-manager" # "revelation-account-ftp"
STOCK_ENTRY_GENERIC = "document-new" # "revelation-account-generic"
STOCK_ENTRY_PHONE = "phone" # "revelation-account-phone"
STOCK_ENTRY_SHELL = "utilities-terminal" # "revelation-account-shell"
STOCK_ENTRY_REMOTEDESKTOP = "preferences-desktop-remote-desktop" # "revelation-account-remotedesktop"
STOCK_ENTRY_WEBSITE = "web-browser" # "revelation-account-website"
ICON_SIZE_APPLET = Gtk.IconSize.LARGE_TOOLBAR
ICON_SIZE_DATAVIEW = Gtk.IconSize.LARGE_TOOLBAR
ICON_SIZE_DROPDOWN = Gtk.IconSize.SMALL_TOOLBAR
ICON_SIZE_ENTRY = Gtk.IconSize.MENU
ICON_SIZE_FALLBACK = Gtk.IconSize.LARGE_TOOLBAR
ICON_SIZE_HEADLINE = Gtk.IconSize.LARGE_TOOLBAR
ICON_SIZE_LABEL = Gtk.IconSize.MENU
ICON_SIZE_LOGO = Gtk.IconSize.DND
ICON_SIZE_TREEVIEW = Gtk.IconSize.MENU
STOCK_ICONS = (
(STOCK_ENTRY_CREDITCARD, "contact-new", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_CRYPTOKEY, "dialog-password", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_DATABASE, "package_system", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_DOOR, "changes-allow", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_EMAIL, "emblem-mail", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_FTP, "system-file-manager", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_GENERIC, "document-new", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_PHONE, "phone", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_SHELL, "utilities-terminal", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_REMOTEDESKTOP, "preferences-desktop-remote-desktop", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_WEBSITE, "web-browser", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_FOLDER, "folder", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
(STOCK_ENTRY_FOLDER_OPEN, "folder-open", (ICON_SIZE_DATAVIEW, ICON_SIZE_DROPDOWN, ICON_SIZE_ENTRY, ICON_SIZE_TREEVIEW)),
)
STOCK_ITEMS = (
(STOCK_CONTINUE, _('_Continue'), "stock_test-mode"),
(STOCK_DISCARD, _('_Discard'), Gtk.STOCK_DELETE),
(STOCK_EDIT, _('_Edit'), Gtk.STOCK_EDIT),
(STOCK_EXPORT, _('_Export'), Gtk.STOCK_EXECUTE),
(STOCK_FOLDER, '', "stock_folder"),
(STOCK_GENERATE, _('_Generate'), Gtk.STOCK_EXECUTE),
(STOCK_GOTO, _('_Go to'), Gtk.STOCK_JUMP_TO),
(STOCK_IMPORT, _('_Import'), Gtk.STOCK_CONVERT),
(STOCK_LOCK, _('_Lock'), "stock_lock"),
(STOCK_NEW_ENTRY, _('_Add Entry'), Gtk.STOCK_ADD),
(STOCK_NEW_FOLDER, _('_Add Folder'), "stock_folder"),
(STOCK_NEXT, _('Next'), Gtk.STOCK_GO_DOWN),
(STOCK_PASSWORD_CHANGE, _('_Change'), "stock_lock-ok"),
(STOCK_PASSWORD_CHECK, _('_Check'), "stock_lock-ok"),
(STOCK_PASSWORD_STRONG, '', "stock_lock-ok"),
(STOCK_PASSWORD_WEAK, '', "stock_lock-broken"),
(STOCK_PREVIOUS, _('Previous'), Gtk.STOCK_GO_UP),
(STOCK_RELOAD, _('_Reload'), Gtk.STOCK_REFRESH),
(STOCK_REMOVE, _('Re_move'), Gtk.STOCK_DELETE),
(STOCK_REPLACE, _('_Replace'), Gtk.STOCK_SAVE_AS),
(STOCK_UNKNOWN, _('Unknown'), "dialog-question"),
(STOCK_UNLOCK, _('_Unlock'), "stock_lock-open"),
(STOCK_UPDATE, _('_Update'), "stock_edit"),
)
# EXCEPTIONS #
class DataError(Exception):
"Exception for invalid data"
pass
# FUNCTIONS #
def generate_field_display_widget(field, cfg = None, userdata = None):
"Generates a widget for displaying a field value"
if field.datatype == entry.DATATYPE_EMAIL:
widget = LinkButton("mailto:%s" % field.value, util.escape_markup(field.value))
elif field.datatype == entry.DATATYPE_PASSWORD:
widget = PasswordLabel(util.escape_markup(field.value), cfg, userdata)
elif field.datatype == entry.DATATYPE_URL:
widget = LinkButton(field.value, util.escape_markup(field.value))
else:
widget = Label(util.escape_markup(field.value))
widget.set_selectable(True)
return widget
def generate_field_edit_widget(field, cfg = None, userdata = None):
"Generates a widget for editing a field"
if type(field) == entry.PasswordField:
widget = PasswordEntryGenerate(None, cfg, userdata)
elif type(field) == entry.UsernameField:
widget = Gtk.ComboBox.new_with_entry()
setup_comboboxentry(widget, userdata)
elif field.datatype == entry.DATATYPE_FILE:
widget = FileEntry()
elif field.datatype == entry.DATATYPE_PASSWORD:
widget = PasswordEntry(None, cfg, userdata)
else:
widget = Entry()
widget.set_text(field.value or "")
return widget
def setup_comboboxentry(widget, userdata=None):
widget.entry = widget.get_child()
widget.entry.set_activates_default(True)
widget.set_text = widget.entry.set_text
widget.get_text = widget.entry.get_text
widget.model = Gtk.ListStore(GObject.TYPE_STRING)
widget.set_model(widget.model)
widget.set_entry_text_column(0)
widget.completion = Gtk.EntryCompletion()
widget.completion.set_model(widget.model)
widget.completion.set_text_column(0)
widget.completion.set_minimum_key_length(1)
widget.entry.set_completion(widget.completion)
def set_values(vlist):
"Sets the values for the dropdown"
widget.model.clear()
for item in vlist:
widget.model.append((item,))
widget.set_values = set_values
if userdata is not None:
widget.set_values(userdata)
# CONTAINERS #
class HBox(Gtk.HBox):
"A horizontal container"
def __init__(self, *args):
Gtk.HBox.__init__(self)
self.set_spacing(6)
self.set_border_width(0)
for widget in args:
self.pack_start(widget, True, True, 0)
class HButtonBox(Gtk.HButtonBox):
"A horizontal button box"
def __init__(self, *args):
Gtk.HButtonBox.__init__(self)
self.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.set_spacing(12)
for button in args:
self.pack_start(button, True, True, 0)
class VBox(Gtk.VBox):
"A vertical container"
def __init__(self, *args):
Gtk.VBox.__init__(self)
self.set_spacing(6)
self.set_border_width(0)
for widget in args:
self.pack_start(widget, True, True, 0)
class Notebook(Gtk.Notebook):
"A notebook (tabbed view)"
def __init__(self):
Gtk.Notebook.__init__(self)
def create_page(self, title):
"Creates a notebook page"
page = NotebookPage()
self.append_page(page, Label(title))
return page
class NotebookPage(VBox):
"A notebook page"
def __init__(self):
VBox.__init__(self)
self.sizegroup = Gtk.SizeGroup(mode=Gtk.SizeGroupMode.HORIZONTAL)
self.set_border_width(12)
self.set_spacing(18)
def add_section(self, title, description = None):
"Adds an input section to the notebook"
section = InputSection(title, description, self.sizegroup)
self.pack_start(section, False, False, 0)
return section
class ScrolledWindow(Gtk.ScrolledWindow):
"A scrolled window for partially displaying a child widget"
def __init__(self, contents = None):
Gtk.ScrolledWindow.__init__(self)
self.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
if contents is not None:
self.add(contents)
class Toolbar(Gtk.Toolbar):
"A Toolbar subclass"
def append_space(self):
"Appends a space to the toolbar"
space = Gtk.SeparatorToolItem()
space.set_draw(False)
self.insert(space, -1)
def append_widget(self, widget, tooltip = None):
"Appends a widget to the toolbar"
toolitem = Gtk.ToolItem()
toolitem.add(widget)
if tooltip != None:
toolitem.set_tooltip_text(tooltip)
self.insert(toolitem, -1)
class InputSection(VBox):
"A section of input fields"
def __init__(self, title = None, description = None, sizegroup = None):
VBox.__init__(self)
self.title = None
self.desc = None
self.sizegroup = sizegroup
if title is not None:
self.title = Label("<span weight=\"bold\">%s</span>" % util.escape_markup(title))
self.pack_start(self.title, False, True, 0)
if description is not None:
self.desc = Label(util.escape_markup(description))
self.pack_start(self.desc, False, True, 0)
if sizegroup is None:
self.sizegroup = Gtk.SizeGroup(mode=Gtk.SizeGroupMode.HORIZONTAL)
def append_widget(self, title, widget, indent = True):
"Adds a widget to the section"
row = HBox()
row.set_spacing(12)
self.pack_start(row, False, False, 0)
if self.title is not None and indent == True:
row.pack_start(Label(""), False, False, 0)
if title is not None:
label = Label("%s:" % util.escape_markup(title))
self.sizegroup.add_widget(label)
row.pack_start(label, False, False, 0)
row.pack_start(widget, True, True, 0)
def clear(self):
"Removes all widgets"
for child in self.get_children():
if child not in (self.title, self.desc):
child.destroy()
# DISPLAY WIDGETS #
class EventBox(Gtk.EventBox):
"A container which handles events for a widget (for tooltips etc)"
def __init__(self, widget = None):
Gtk.EventBox.__init__(self)
if widget is not None:
self.add(widget)
class Image(Gtk.Image):
"A widget for displaying an image"
def __init__(self, stock = None, size = None):
Gtk.Image.__init__(self)
if stock is not None:
self.set_from_icon_name(stock, size)
class ImageLabel(HBox):
"A label with an image"
def __init__(self, text = None, stock = None, size = ICON_SIZE_LABEL):
HBox.__init__(self)
self.image = Image()
self.pack_start(self.image, False, True, 0)
self.label = Label(text)
self.pack_start(self.label, True, True, 0)
if text != None:
self.set_text(text)
if stock != None:
self.set_stock(stock, size)
def set_ellipsize(self, ellipsize):
"Sets label ellisization"
self.label.set_ellipsize(ellipsize)
def set_stock(self, stock, size):
"Sets the image"
self.image.set_from_icon_name(stock, size)
def set_text(self, text):
"Sets the label text"
self.label.set_text(text)
class Label(Gtk.Label):
"A text label"
def __init__(self, text = None, justify = Gtk.Justification.LEFT):
Gtk.Label.__init__(self)
self.set_text(text)
self.set_justify(justify)
self.set_use_markup(True)
self.set_line_wrap(True)
self.set_valign(Gtk.Align.CENTER)
if justify == Gtk.Justification.LEFT:
self.set_halign(Gtk.Align.START)
elif justify == Gtk.Justification.CENTER:
self.set_halign(Gtk.Align.CENTER)
elif justify == Gtk.Justification.RIGHT:
self.set_halign(Gtk.Align.END)
def set_text(self, text):
"Sets the text of the label"
if text is None:
Gtk.Label.set_text(self, "")
else:
Gtk.Label.set_markup(self, text)
class PasswordLabel(EventBox):
"A label for displaying passwords"
def __init__(self, password = "", cfg = None, clipboard = None, justify = Gtk.Justification.LEFT): # nosec
EventBox.__init__(self)
self.password = util.unescape_markup(password)
self.config = cfg
self.clipboard = clipboard
self.label = Label(util.escape_markup(self.password), justify)
self.label.set_selectable(True)
self.add(self.label)
self.show_password(cfg.get_boolean("view-passwords"))
self.config.connect('changed::view-passwords', lambda w, k: self.show_password(w.get_boolean(k)))
self.connect("button-press-event", self.__cb_button_press)
self.connect("drag-data-get", self.__cb_drag_data_get)
def __cb_drag_data_get(self, widget, context, selection, info, timestamp, data = None):
"Provides data for a drag operation"
selection.set_text(self.password, -1)
def __cb_button_press(self, widget, data = None):
"Populates the popup menu"
if self.label.get_selectable() == True:
return False
elif data.button == 3:
menu = Menu()
menuitem = ImageMenuItem(Gtk.STOCK_COPY, _('Copy password'))
menuitem.connect("activate", lambda w: self.clipboard.set([self.password], True))
menu.append(menuitem)
menu.show_all()
menu.popup_at_pointer(data)
return True
def set_ellipsize(self, ellipsize):
"Sets ellipsize for the label"
self.label.set_ellipsize(ellipsize)
def show_password(self, show = True):
"Sets whether to display the password"
if show == True:
self.label.set_text(util.escape_markup(self.password))
self.label.set_selectable(True)
self.drag_source_unset()
else:
self.label.set_text(Gtk.Entry().get_invisible_char()*6)
self.label.set_selectable(False)
self.drag_source_set(
Gdk.ModifierType.BUTTON1_MASK,
[
Gtk.TargetEntry.new("text/plain", 0, 0),
Gtk.TargetEntry.new("TEXT", 0, 1),
Gtk.TargetEntry.new("STRING", 0, 2),
Gtk.TargetEntry.new("COMPOUND TEXT", 0, 3),
Gtk.TargetEntry.new("UTF8_STRING", 0, 4)
],
Gdk.DragAction.COPY
)
class EditableTextView(Gtk.ScrolledWindow):
"An editable text view"
def __init__(self, buffer = None, text = None):
Gtk.ScrolledWindow.__init__(self)
self.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.set_shadow_type(Gtk.ShadowType.ETCHED_OUT)
self.textview = Gtk.TextView(buffer=buffer)
self.textbuffer = self.textview.get_buffer()
self.add(self.textview)
if text is not None:
self.textview.get_buffer().set_text(text)
def set_text(self, text):
"Sets the entry contents"
if text is None:
self.textbuffer.set_text("")
self.textbuffer.set_text(text)
def get_text(self):
"Returns the text of the entry"
return self.textbuffer.get_text(self.textbuffer.get_start_iter(), self.textbuffer.get_end_iter(), False)
class TextView(Gtk.TextView):
"A text view"
def __init__(self, buffer = None, text = None):
Gtk.TextView.__init__(self)
self.set_buffer(buffer)
self.set_editable(False)
self.set_wrap_mode(Gtk.WrapMode.NONE)
self.set_cursor_visible(False)
self.modify_font(Pango.FontDescription("Monospace"))
if text is not None:
self.get_buffer().set_text(text)
# TEXT ENTRIES #
class Entry(Gtk.Entry):
"A normal text entry"
def __init__(self, text = None):
Gtk.Entry.__init__(self)
self.set_activates_default(True)
self.set_text(text)
def set_text(self, text):
"Sets the entry contents"
if text is None:
text = ""
Gtk.Entry.set_text(self, text)
class FileEntry(HBox):
"A file entry"
def __init__(self, title = None, file = None, type = Gtk.FileChooserAction.OPEN):
HBox.__init__(self)
self.title = title is not None and title or _('Select File')
self.type = type
self.entry = Entry()
self.entry.connect("changed", lambda w: self.emit("changed"))
self.pack_start(self.entry, True, True, 0)
self.button = Button(_('Browse...'), self.__cb_filesel)
self.pack_start(self.button, False, False, 0)
if file is not None:
self.set_filename(file)
def __cb_filesel(self, widget, data = None):
"Displays a file selector when Browse is pressed"
try:
fsel = dialog.FileSelector(None, self.title, self.type)
file = self.get_filename()
if file != None:
fsel.set_filename(file)
self.set_filename(fsel.run())
except dialog.CancelError:
pass
def get_filename(self):
"Gets the current filename"
return io.file_normpath(self.entry.get_text())
def get_text(self):
"Wrapper to emulate Entry"
return self.entry.get_text()
def set_filename(self, filename):
"Sets the current filename"
self.entry.set_text(io.file_normpath(filename))
self.entry.set_position(-1)
def set_text(self, text):
"Wrapper to emulate Entry"
self.entry.set_text(text)
GObject.type_register(FileEntry)
GObject.signal_new("changed", FileEntry, GObject.SignalFlags.ACTION,
GObject.TYPE_BOOLEAN, ())
class PasswordEntry(Gtk.Entry):
"An entry for editing a password (follows the 'show passwords' preference)"
def __init__(self, password = None, cfg = None, clipboard = None):
Gtk.Entry.__init__(self)
self.set_visibility(False)
if password:
self.set_text(password)
self.autocheck = True
self.config = cfg
self.clipboard = clipboard
self.connect("changed", self.__cb_check_password)
self.connect("populate-popup", self.__cb_popup)
if cfg != None:
self.config.bind('view-passwords', self, "visibility", Gio.SettingsBindFlags.DEFAULT)
def __cb_check_password(self, widget, data = None):
"Callback for changed, checks the password"
if self.autocheck == False:
return
password = self.get_text()
if len(password) == 0:
self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, None)
else:
try:
util.check_password(password)
except ValueError as reason:
self.set_password_strong(False, _('The password %s') % str(reason))
else:
self.set_password_strong(True, _('The password seems good'))
def __cb_popup(self, widget, menu):
"Populates the popup menu"
if self.clipboard != None:
menuitem = ImageMenuItem(Gtk.STOCK_COPY, _('Copy password'))
menuitem.connect("activate", lambda w: self.clipboard.set([self.get_text()], True))
menu.insert(menuitem, 2)
menu.show_all()
def set_password_strong(self, strong, reason = ""):
"Sets whether the password is strong or not"
self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, strong and STOCK_PASSWORD_STRONG or STOCK_PASSWORD_WEAK)
self.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, reason)
class PasswordEntryGenerate(HBox):
"A password entry with a generator button"
def __init__(self, password = None, cfg = None, clipboard = None):
HBox.__init__(self)
self.config = cfg
self.pwentry = PasswordEntry(password, cfg, clipboard)
self.pack_start(self.pwentry, True, True, 0)
self.button = Button(_('Generate'), lambda w: self.generate())
self.pack_start(self.button, False, False, 0)
self.entry = self.pwentry
def generate(self):
"Generates a password for the entry"
password = util.generate_password(self.config.get_int("passwordgen-length"), self.config.get_boolean("passwordgen-punctuation"))
self.pwentry.set_text(password)
def get_text(self):
"Wrapper for the entry"
return self.pwentry.get_text()
def set_text(self, text):
"Wrapper for the entry"
self.pwentry.set_text(text)
class SpinEntry(Gtk.SpinButton):
"An entry for numbers"
def __init__(self, adjustment = None, climb_rate = 0.0, digits = 0):
Gtk.SpinButton.__init__(self)
self.configure(adjustment, climb_rate, digits)
self.set_increments(1, 5)
self.set_range(0, 100000)
self.set_numeric(True)
# BUTTONS #
class Button(Gtk.Button):
"A normal button"
def __init__(self, label, callback = None):
Gtk.Button.__init__(self, label=label)
if callback is not None:
self.connect("clicked", callback)
class CheckButton(Gtk.CheckButton):
"A checkbutton"
def __init__(self, label = None):
Gtk.CheckButton.__init__(self, label=label)
class DropDown(Gtk.ComboBox):
"A dropdown button"
def __init__(self, icons = False):
Gtk.ComboBox.__init__(self)
self.model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING, GObject.TYPE_PYOBJECT)
self.set_model(self.model)
if icons == True:
cr = Gtk.CellRendererPixbuf()
cr.set_fixed_size(Gtk.icon_size_lookup(ICON_SIZE_DROPDOWN)[1] + 5, -1)
self.pack_start(cr, False)
self.add_attribute(cr, "icon-name", 1)
cr = Gtk.CellRendererText()
self.pack_start(cr, True)
self.add_attribute(cr, "text", 0)
self.connect("realize", self.__cb_show)
def __cb_show(self, widget, data = None):
"Callback for when widget is shown"
if self.get_active() == -1:
self.set_active(0)
def append_item(self, text, stock = None, data = None):
"Appends an item to the dropdown"
self.model.append((text, stock, data))
def delete_item(self, index):
"Removes an item from the dropdown"
if self.model.iter_n_children(None) > index:
iter = self.model.iter_nth_child(None, index)
self.model.remove(iter)
def get_active_item(self):
"Returns a tuple with data for the current item"
iter = self.model.iter_nth_child(None, self.get_active())
return self.model.get(iter, 0, 1, 2)
def get_item(self, index):
"Returns data for an item"
return self.model.get(self.model.iter_nth_child(None, index), 0, 1, 2)
def get_num_items(self):
"Returns the number of items in the dropdown"
return self.model.iter_n_children(None)
def insert_item(self, index, text, stock = None, data = None):
"Inserts an item in the dropdown"
self.model.insert(index, (text, stock, data))
class EntryDropDown(DropDown):
"An entry type dropdown"
def __init__(self):
DropDown.__init__(self, True)
for e in entry.ENTRYLIST:
if e != entry.FolderEntry:
self.append_item(e().typename, e().icon, e)
def get_active_type(self):
"Get the currently active type"
item = self.get_active_item()
if item is not None:
return item[2]
def set_active_type(self, entrytype):
"Set the active type"
for i in range(self.model.iter_n_children(None)):
iter = self.model.iter_nth_child(None, i)
if self.model.get_value(iter, 2) == entrytype:
self.set_active(i)
class FileButton(Gtk.FileChooserButton):
"A file chooser button"
def __init__(self, title = None, file = None, type = Gtk.FileChooserAction.OPEN):
Gtk.FileChooserButton.__init__(self, title)
self.set_action(type)
self.set_local_only(False)
if file != None:
self.set_filename(file)
def get_filename(self):
"Gets the filename"
return io.file_normpath(self.get_uri())
def set_filename(self, filename):
"Sets the filename"
filename = io.file_normpath(filename)
if filename != io.file_normpath(self.get_filename()):
Gtk.FileChooserButton.set_filename(self, filename)
class LinkButton(Gtk.LinkButton):
"A link button"
def __init__(self, url, label):
Gtk.LinkButton.__init__(self, uri=url, label=label)
self.set_halign(Gtk.Align.START)
self.label = self.get_children()[0]
"If URI is too long reduce it for the label"
if len(label) > 60:
self.label.set_text(label[0:59] + " (...)")
def set_ellipsize(self, ellipsize):
"Sets ellipsize for label"
self.label.set_ellipsize(ellipsize)
def set_justify(self, justify):
"Sets justify for label"
self.label.set_justify(justify)
class RadioButton(Gtk.RadioButton):
"A radio button"
def __init__(self, group, label):
Gtk.RadioButton.__init__(self, group, label)
# MENUS AND MENU ITEMS #
class ImageMenuItem(Gtk.ImageMenuItem):
"A menuitem with a stock icon"
def __init__(self, stock, text = None):
Gtk.ImageMenuItem.__init__(self, stock)
self.label = self.get_children()[0]
self.image = self.get_image()
if text is not None:
self.set_text(text)
def set_stock(self, stock):
"Set the stock item to use as icon"
self.image.set_from_icon_name(stock, Gtk.IconSize.MENU)
def set_text(self, text):
"Set the item text"
self.label.set_text(text)
class Menu(Gtk.Menu):
"A menu"
def __init__(self):
Gtk.Menu.__init__(self)
# MISCELLANEOUS WIDGETS #
class TreeView(Gtk.TreeView):
"A tree display"
def __init__(self, model):
Gtk.TreeView.__init__(self, model=model)
self.set_headers_visible(False)
self.model = model
self.__cbid_drag_motion = None
self.__cbid_drag_end = None
self.selection = self.get_selection()
self.selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self.connect("button-press-event", self.__cb_buttonpress)
self.connect("key-press-event", self.__cb_keypress)
def __cb_buttonpress(self, widget, data):
"Callback for handling mouse clicks"
path = self.get_path_at_pos(int(data.x), int(data.y))
# handle click outside entry
if path is None:
self.unselect_all()
# handle doubleclick
if data.button == 1 and data.type == Gdk.EventType._2BUTTON_PRESS and path != None:
iter = self.model.get_iter(path[0])
self.toggle_expanded(iter)
if iter != None:
self.emit("doubleclick", iter)
# display popup on right-click
elif data.button == 3:
if path != None and self.selection.iter_is_selected(self.model.get_iter(path[0])) == False:
self.set_cursor(path[0], path[1], False)
self.emit("popup", data)
return True
# handle drag-and-drop of multiple rows
elif self.__cbid_drag_motion is None and data.button in (1, 2) and data.type == Gdk.EventType.BUTTON_PRESS and path != None and self.selection.iter_is_selected(self.model.get_iter(path[0])) == True and len(self.get_selected()) > 1:
self.__cbid_drag_motion = self.connect("motion-notify-event", self.__cb_drag_motion, data.copy())
self.__cbid_drag_end = self.connect("button-release-event", self.__cb_button_release, data.copy())
return True
def __cb_button_release(self, widget, data, userdata = None):
"Ends a drag"
self.emit("button-press-event", userdata)
self.__drag_check_end()
def __cb_drag_motion(self, widget, data, userdata = None):
"Monitors drag motion"
if self.drag_check_threshold(int(userdata.x), int(userdata.y), int(data.x), int(data.y)) == True:
self.__drag_check_end()
uritarget = Gtk.TargetEntry.new("revelation/treerow", Gtk.TargetFlags.SAME_APP | Gtk.TargetFlags.SAME_WIDGET, 0)
self.drag_begin_with_coordinates(Gtk.TargetList([uritarget]), Gdk.DragAction.MOVE, userdata.button.button, userdata, userdata.x, userdata.y)
def __cb_keypress(self, widget, data = None):
"Callback for handling key presses"
# expand/collapse node on space
if data.keyval == Gdk.KEY_space:
self.toggle_expanded(self.get_active())
def __drag_check_end(self):
"Ends a drag check"
self.disconnect(self.__cbid_drag_motion)
self.disconnect(self.__cbid_drag_end)
self.__cbid_drag_motion = None
self.__cbid_drag_end = None
def collapse_row(self, iter):
"Collapse a tree row"
Gtk.TreeView.collapse_row(self, self.model.get_path(iter))
def expand_row(self, iter):
"Expand a tree row"
if iter is not None and self.model.iter_n_children(iter) > 0:
Gtk.TreeView.expand_row(self, self.model.get_path(iter), False)
def expand_to_iter(self, iter):
"Expand all items up to and including a given iter"
path = self.model.get_path(iter)
for i in range(len(path)):
iter = self.model.get_iter(path[0:i])
self.expand_row(iter)
def get_active(self):
"Get the currently active row"
if self.model is None:
return None
iter = self.model.get_iter(self.get_cursor()[0])
if iter is None or self.selection.iter_is_selected(iter) == False:
return None
return iter
def get_selected(self):
"Get a list of currently selected rows"
list = []
self.selection.selected_foreach(lambda model, path, iter: list.append(iter))
return list
def select(self, iter):
"Select a particular row"
if iter is None:
self.unselect_all()
else:
self.expand_to_iter(iter)
self.set_cursor(self.model.get_path(iter))
def select_all(self):
"Select all rows in the tree"
self.selection.select_all()
self.selection.emit("changed")
self.emit("cursor_changed")
def set_model(self, model):
"Change the tree model which is being displayed"
Gtk.TreeView.set_model(self, model)
self.model = model
def toggle_expanded(self, iter):
"Toggle the expanded state of a row"
if iter is None:
return
elif self.row_expanded(self.model.get_path(iter)):
self.collapse_row(iter)
else:
self.expand_row(iter)
def unselect_all(self):
"Unselect all rows in the tree"
self.selection.unselect_all()
self.selection.emit("changed")
self.emit("cursor_changed")
self.emit("unselect_all")
GObject.signal_new("doubleclick", TreeView, GObject.SignalFlags.ACTION,
GObject.TYPE_BOOLEAN, (GObject.TYPE_PYOBJECT, ))
GObject.signal_new("popup", TreeView, GObject.SignalFlags.ACTION,
GObject.TYPE_BOOLEAN, (GObject.TYPE_PYOBJECT, ))
class EntryTree(TreeView):
"An entry tree"
def __init__(self, entrystore):
TreeView.__init__(self, entrystore)
column = Gtk.TreeViewColumn()
self.append_column(column)
cr = Gtk.CellRendererPixbuf()
column.pack_start(cr, False)
column.add_attribute(cr, "icon-name", data.COLUMN_ICON)
cr.set_property("stock-size", ICON_SIZE_TREEVIEW)
cr = Gtk.CellRendererText()
column.pack_start(cr, True)
column.add_attribute(cr, "text", data.COLUMN_NAME)
self.connect("doubleclick", self.__cb_doubleclick)
self.connect("row-expanded", self.__cb_row_expanded)
self.connect("row-collapsed", self.__cb_row_collapsed)
def __cb_doubleclick(self, widget, iter):
"Stop doubleclick emission on folder"
if type(self.model.get_entry(iter)) == entry.FolderEntry:
self.stop_emission("doubleclick")
def __cb_row_collapsed(self, object, iter, extra):
"Updates folder icons when collapsed"
self.model.folder_expanded(iter, False)
def __cb_row_expanded(self, object, iter, extra):
"Updates folder icons when expanded"
# make sure all children are collapsed (some may have lingering expand icons)
for i in range(self.model.iter_n_children(iter)):
child = self.model.iter_nth_child(iter, i)
if self.row_expanded(self.model.get_path(child)) == False:
self.model.folder_expanded(child, False)
self.model.folder_expanded(iter, True)
def set_model(self, model):
"Sets the model displayed by the tree view"
TreeView.set_model(self, model)
if model is None:
return
for i in range(model.iter_n_children(None)):
model.folder_expanded(model.iter_nth_child(None, i), False)
class Statusbar(Gtk.Statusbar):
"An application statusbar"
def __init__(self):
Gtk.Statusbar.__init__(self)
self.contextid = self.get_context_id("statusbar")
def clear(self):
"Clears the statusbar"
self.pop(self.contextid)
def set_status(self, text):
"Displays a text in the statusbar"
self.clear()
self.push(self.contextid, text or "")
# ACTION HANDLING #
class Action(Gtk.Action):
"UI Manager Action"
def __init__(self, name, label = None, tooltip = None, stock = "", important = False):
Gtk.Action.__init__(self, name, label, tooltip, stock)
if important == True:
self.set_property("is-important", True)
class ActionGroup(Gtk.ActionGroup):
"UI Manager Actiongroup"
def add_action(self, action, accel = None):
"Adds an action to the actiongroup"
if accel is None:
Gtk.ActionGroup.add_action(self, action)
else:
self.add_action_with_accel(action, accel)
class ToggleAction(Gtk.ToggleAction):
"A toggle action item"
def __init__(self, name, label, tooltip = None, stock = None):
Gtk.ToggleAction.__init__(self, name, label, tooltip, stock)
class UIManager(Gtk.UIManager):
"UI item manager"
def __init__(self):
Gtk.UIManager.__init__(self)
self.connect("connect-proxy", self.__cb_connect_proxy)
def __cb_connect_proxy(self, uimanager, action, widget):
"Callback for connecting proxies to an action"
if type(widget) in (Gtk.MenuItem, Gtk.ImageMenuItem, Gtk.CheckMenuItem):
widget.tooltip = action.get_property("tooltip")
else:
widget.set_property("label", widget.get_property("label").replace("...", ""))
def add_ui_from_file(self, file):
"Loads ui from a file"
try:
Gtk.UIManager.add_ui_from_file(self, file)
except GObject.GError:
raise IOError
def append_action_group(self, actiongroup):
"Appends an action group"
Gtk.UIManager.insert_action_group(self, actiongroup, len(self.get_action_groups()))
def get_action(self, name):
"Looks up an action in the managers actiongroups"
for actiongroup in self.get_action_groups():
action = actiongroup.get_action(name)
if action is not None:
return action
def get_action_group(self, name):
"Returns the named action group"
for actiongroup in self.get_action_groups():
if actiongroup.get_name() == name:
return actiongroup
# APPLICATION COMPONENTS #
class AppWindow(Gtk.ApplicationWindow):
"An application window"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class App(Gtk.Application):
"An application"
def __init__(self, appname):
Gtk.Application.__init__(self,
application_id='info.olasagasti.revelation')
self.toolbars = {}
def __connect_menu_statusbar(self, menu):
"Connects a menus items to the statusbar"
for item in menu.get_children():
if isinstance(item, Gtk.MenuItem) == True:
item.connect("select", self.cb_menudesc, True)
item.connect("deselect", self.cb_menudesc, False)
def cb_menudesc(self, item, show):
"Displays menu descriptions in the statusbar"
if show == True:
self.statusbar.set_status(item.get_label())
else:
self.statusbar.clear()
def __cb_toolbar_hide(self, widget, name):
"Hides the toolbar dock when the toolbar is hidden"
if name in self.toolbars:
self.toolbars[name].hide()
def __cb_toolbar_show(self, widget, name):
"Shows the toolbar dock when the toolbar is shown"
if name in self.toolbars:
self.toolbars[name].show()
def add_toolbar(self, toolbar, name, band):
"Adds a toolbar"
self.toolbars[name] = toolbar
self.main_vbox.pack_start(toolbar, False, True, 0)
toolbar.connect("show", self.__cb_toolbar_show, name)
toolbar.connect("hide", self.__cb_toolbar_hide, name)
toolbar.show_all()
def get_title(self):
"Returns the app title"
title = Gtk.Window.get_title(self.window)
return title.replace(" - " + config.APPNAME, "")
def popup(self, menu, button, time):
"Displays a popup menu"
# get Gtk.Menu
gmenu = Gtk.Menu.new_from_model(menu)
gmenu.attach_to_widget(self.window, None)
# transfer the tooltips from Gio.Menu to Gtk.Menu
menu_item_index = 0
menu_items = gmenu.get_children()
for sect in range(menu.get_n_items()):
for item in range(menu.get_item_link(sect, 'section').get_n_items()):
tooltip_text = menu.get_item_link(sect, 'section').get_item_attribute_value(item, 'tooltip', None)
if tooltip_text:
tooltip_text = tooltip_text.unpack()
menu_items[menu_item_index].set_tooltip_text(tooltip_text)
menu_item_index += 1
# skip section separator
menu_item_index += 1
self.__connect_menu_statusbar(gmenu)
gmenu.popup_at_pointer()
def set_menus(self, menubar):
"Sets the menubar for the application"
for item in menubar.get_children():
self.__connect_menu_statusbar(item.get_submenu())
self.main_vbox.pack_start(menubar, False, True, 0)
def set_title(self, title):
"Sets the window title"
Gtk.Window.set_title(self.window, title + " - " + config.APPNAME)
def set_toolbar(self, toolbar):
"Sets the application toolbar"
self.main_vbox.pack_start(toolbar, False, True, 0)
toolbar.connect("show", self.__cb_toolbar_show, "Toolbar")
toolbar.connect("hide", self.__cb_toolbar_hide, "Toolbar")
def set_contents(self, widget):
self.main_vbox.pack_start(widget, True, True, 0)
class EntryView(VBox):
"A component for displaying an entry"
def __init__(self, cfg = None, clipboard = None):
VBox.__init__(self)
self.set_spacing(12)
self.set_border_width(12)
self.config = cfg
self.clipboard = clipboard
self.entry = None
def clear(self, force = False):
"Clears the data view"
self.entry = None
for child in self.get_children():
child.destroy()
def display_entry(self, e):
"Displays info about an entry"
self.clear()
self.entry = e
if self.entry is None:
return
# set up metadata display
metabox = VBox()
self.pack_start(metabox)
label = ImageLabel(
"<span size=\"large\" weight=\"bold\">%s</span>" % util.escape_markup(e.name),
e.icon, ICON_SIZE_DATAVIEW
)
label.set_halign(Gtk.Align.CENTER)
label.set_valign(Gtk.Align.CENTER)
metabox.pack_start(label, True, True, 0)
label = Label("<span weight=\"bold\">%s</span>%s" % (e.typename + (e.description != "" and ": " or ""), util.escape_markup(e.description)), Gtk.Justification.CENTER)
metabox.pack_start(label, True, True, 0)
# set up field list
fields = [field for field in e.fields if field.value != ""]
if len(fields) > 0:
table = Gtk.Grid()
self.pack_start(table)
table.set_column_spacing(10)
table.set_row_spacing(5)
for rowindex, field in zip(range(len(fields)), fields):
label = Label("<span weight=\"bold\">%s: </span>" % util.escape_markup(field.name))
label.set_hexpand(True)
table.attach(label, 0, rowindex, 1, 1)
widget = generate_field_display_widget(field, self.config, self.clipboard)
widget.set_hexpand(True)
table.attach(widget, 1, rowindex, 1, 1)
# notes
label = Label("<span weight=\"bold\">%s</span>%s" % ((e.notes != "" and _("Notes: ") or ""),
util.escape_markup(e.notes)), Gtk.Justification.LEFT)
self.pack_start(label)
# display updatetime
if type(e) != entry.FolderEntry:
label = Label((_('Updated %s ago') + "\n%s") % (util.time_period_rough(e.updated, time.time()), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(e.updated))), Gtk.Justification.CENTER)
self.pack_start(label)
self.show_all()
def pack_start(self, widget):
"Adds a widget to the data view"
widget.set_halign(Gtk.Align.CENTER)
widget.set_valign(Gtk.Align.CENTER)
VBox.pack_start(self, widget, False, False, 0)
class Searchbar(Toolbar):
"A toolbar for easy searching"
def __init__(self):
Toolbar.__init__(self)
self.entry = Gtk.SearchEntry()
self.entry.set_tooltip_text(_('Text to search for'))
self.dropdown = EntryDropDown()
self.dropdown.insert_item(0, _('Any type'), "help-about")
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)
Gtk.StyleContext.add_class(box.get_style_context(), "linked")
self.button_prev = Gtk.Button.new_from_icon_name(STOCK_PREVIOUS,
Gtk.IconSize.BUTTON)
self.button_prev.set_tooltip_text(_('Find the previous match'))
self.button_next = Gtk.Button.new_from_icon_name(STOCK_NEXT,
Gtk.IconSize.BUTTON)
self.button_next.set_tooltip_text(_('Find the next match'))
box.add(self.entry)
box.add(self.button_prev)
box.add(self.button_next)
box.add(self.dropdown)
self.append_widget(box)
self.connect("show", self.__cb_show)
self.entry.connect("changed", self.__cb_entry_changed)
self.entry.connect("key-press-event", self.__cb_key_press)
self.button_next.set_sensitive(False)
self.button_prev.set_sensitive(False)
def __cb_entry_changed(self, widget, data = None):
"Callback for entry changes"
s = self.entry.get_text() != ""
self.button_next.set_sensitive(s)
self.button_prev.set_sensitive(s)
def __cb_key_press(self, widget, data = None):
"Callback for key presses"
# return
if data.keyval == Gdk.KEY_Return and widget.get_text() != "":
if (data.state & Gdk.ModifierType.SHIFT_MASK) == Gdk.ModifierType.SHIFT_MASK:
self.button_prev.activate()
else:
self.button_next.activate()
return True
def __cb_show(self, widget, data = None):
"Callback for widget display"
self.entry.select_region(0, -1)
self.entry.grab_focus()
| gpl-2.0 |
ric2b/Vivaldi-browser | chromium/third_party/blink/web_tests/external/wpt/mathml/tools/utils/misc.py | 7 | 1200 | from __future__ import print_function
import os
import progressbar
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
UnicodeXMLURL = "https://mathml-refresh.github.io/xml-entities/unicode.xml"
InlineAxisOperatorsURL = "https://mathml-refresh.github.io/mathml-core/tables/inline-axis-operators.txt"
MathMLAssociationCopyright = "Copyright (c) 2016 MathML Association"
def downloadWithProgressBar(url, outputDirectory="./", forceDownload=False):
baseName = os.path.basename(url)
fileName = os.path.join(outputDirectory, baseName)
if not forceDownload and os.path.exists(fileName):
return fileName
request = urlopen(url)
totalSize = int(request.info().getheader('Content-Length').strip())
bar = progressbar.ProgressBar(maxval=totalSize).start()
chunkSize = 16 * 1024
downloaded = 0
print("Downloading %s" % url)
os.umask(0o002)
with open(fileName, 'wb') as fp:
while True:
chunk = request.read(chunkSize)
downloaded += len(chunk)
bar.update(downloaded)
if not chunk: break
fp.write(chunk)
bar.finish()
return fileName
| bsd-3-clause |
aachik/flask-blog-abdulmx | env/lib/python2.7/site-packages/flask/_compat.py | 783 | 2164 | # -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
| mit |
c0710204/edx-platform | cms/djangoapps/contentstore/features/video.py | 16 | 8655 | # pylint: disable=C0111
from lettuce import world, step
from selenium.webdriver.common.keys import Keys
from xmodule.modulestore.django import modulestore
VIDEO_BUTTONS = {
'CC': '.hide-subtitles',
'volume': '.volume',
'play': '.video_control.play',
'pause': '.video_control.pause',
'handout': '.video-handout.video-download-button a',
}
SELECTORS = {
'spinner': '.video-wrapper .spinner',
'controls': 'section.video-controls',
}
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@step('youtube stub server (.*) YouTube API')
def configure_youtube_api(_step, action):
action=action.strip()
if action == 'proxies':
world.youtube.config['youtube_api_blocked'] = False
elif action == 'blocks':
world.youtube.config['youtube_api_blocked'] = True
else:
raise ValueError('Parameter `action` should be one of "proxies" or "blocks".')
@step('I have created a Video component$')
def i_created_a_video_component(step):
step.given('I am in Studio editing a new unit')
world.create_component_instance(
step=step,
category='video',
)
world.wait_for_xmodule()
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait(DELAY)
world.wait_for_invisible(SELECTORS['spinner'])
if not world.youtube.config.get('youtube_api_blocked'):
world.wait_for_visible(SELECTORS['controls'])
@step('I have created a Video component with subtitles$')
def i_created_a_video_with_subs(_step):
_step.given('I have created a Video component with subtitles "OEoXaMPEzfM"')
@step('I have created a Video component with subtitles "([^"]*)"$')
def i_created_a_video_with_subs_with_name(_step, sub_id):
_step.given('I have created a Video component')
# Store the current URL so we can return here
video_url = world.browser.url
# Upload subtitles for the video using the upload interface
_step.given('I have uploaded subtitles "{}"'.format(sub_id))
# Return to the video
world.visit(video_url)
world.wait_for_xmodule()
# update .sub filed with proper subs name (which mimics real Studio/XML behavior)
# this is needed only for that videos which are created in acceptance tests.
_step.given('I edit the component')
world.wait_for_ajax_complete()
_step.given('I save changes')
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait_for_invisible(SELECTORS['spinner'])
@step('I have uploaded subtitles "([^"]*)"$')
def i_have_uploaded_subtitles(_step, sub_id):
_step.given('I go to the files and uploads page')
_step.given('I upload the test file "subs_{}.srt.sjson"'.format(sub_id.strip()))
@step('when I view the (.*) it does not have autoplay enabled$')
def does_not_autoplay(_step, video_type):
world.wait(DELAY)
world.wait_for_ajax_complete()
actual = world.css_find('.%s' % video_type)[0]['data-autoplay']
expected = [u'False', u'false', False]
assert actual in expected
assert world.css_has_class('.video_control', 'play')
@step('creating a video takes a single click$')
def video_takes_a_single_click(_step):
component_css = '.xmodule_VideoModule'
assert world.is_css_not_present(component_css)
world.css_click("a[data-category='video']")
assert world.is_css_present(component_css)
@step('I edit the component$')
def i_edit_the_component(_step):
world.edit_component()
@step('I have (hidden|toggled) captions$')
def hide_or_show_captions(step, shown):
button_css = 'a.hide-subtitles'
if shown == 'hidden':
world.css_click(button_css)
if shown == 'toggled':
world.css_click(button_css)
# When we click the first time, a tooltip shows up. We want to
# click the button rather than the tooltip, so move the mouse
# away to make it disappear.
button = world.css_find(button_css)
# mouse_out is not implemented on firefox with selenium
if not world.is_firefox:
button.mouse_out()
world.css_click(button_css)
@step('I have created a video with only XML data$')
def xml_only_video(step):
# Create a new video *without* metadata. This requires a certain
# amount of rummaging to make sure all the correct data is present
step.given('I have clicked the new unit button')
# Wait for the new unit to be created and to load the page
world.wait(1)
course = world.scenario_dict['COURSE']
store = modulestore()
parent_location = store.get_items(course.id, qualifiers={'category': 'vertical'})[0].location
youtube_id = 'ABCDEFG'
world.scenario_dict['YOUTUBE_ID'] = youtube_id
# Create a new Video component, but ensure that it doesn't have
# metadata. This allows us to test that we are correctly parsing
# out XML
world.ItemFactory.create(
parent_location=parent_location,
category='video',
data='<video youtube="1.00:%s"></video>' % youtube_id,
modulestore=store,
user_id=world.scenario_dict["USER"].id
)
@step('The correct Youtube video is shown$')
def the_youtube_video_is_shown(_step):
ele = world.css_find('.video').first
assert ele['data-streams'].split(':')[1] == world.scenario_dict['YOUTUBE_ID']
@step('Make sure captions are (.+)$')
def set_captions_visibility_state(_step, captions_state):
SELECTOR = '.closed .subtitles'
world.wait_for_visible('.hide-subtitles')
if captions_state == 'closed':
if world.is_css_not_present(SELECTOR):
world.css_find('.hide-subtitles').click()
else:
if world.is_css_present(SELECTOR):
world.css_find('.hide-subtitles').click()
@step('I hover over button "([^"]*)"$')
def hover_over_button(_step, button):
world.css_find(VIDEO_BUTTONS[button.strip()]).mouse_over()
@step('Captions (?:are|become) "([^"]*)"$')
def check_captions_visibility_state(_step, visibility_state):
if visibility_state == 'visible':
assert world.css_visible('.subtitles')
else:
assert not world.css_visible('.subtitles')
def find_caption_line_by_data_index(index):
SELECTOR = ".subtitles > li[data-index='{index}']".format(index=index)
return world.css_find(SELECTOR).first
@step('I focus on caption line with data-index "([^"]*)"$')
def focus_on_caption_line(_step, index):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'), timeout=30)
find_caption_line_by_data_index(int(index.strip()))._element.send_keys(Keys.TAB)
@step('I press "enter" button on caption line with data-index "([^"]*)"$')
def click_on_the_caption(_step, index):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'), timeout=30)
find_caption_line_by_data_index(int(index.strip()))._element.send_keys(Keys.ENTER)
@step('I see caption line with data-index "([^"]*)" has class "([^"]*)"$')
def caption_line_has_class(_step, index, className):
SELECTOR = ".subtitles > li[data-index='{index}']".format(index=int(index.strip()))
assert world.css_has_class(SELECTOR, className.strip())
@step('I see a range on slider$')
def see_a_range_slider_with_proper_range(_step):
world.wait_for_visible(VIDEO_BUTTONS['pause'])
assert world.css_visible(".slider-range")
@step('I (.*) see video button "([^"]*)"$')
def do_not_see_or_not_button_video(_step, action, button_type):
world.wait(DELAY)
world.wait_for_ajax_complete()
action=action.strip()
button = button_type.strip()
if action == 'do not':
assert not world.is_css_present(VIDEO_BUTTONS[button])
elif action == 'can':
assert world.css_visible(VIDEO_BUTTONS[button])
else:
raise ValueError('Parameter `action` should be one of "do not" or "can".')
@step('I click video button "([^"]*)"$')
def click_button_video(_step, button_type):
world.wait(DELAY)
world.wait_for_ajax_complete()
button = button_type.strip()
world.css_click(VIDEO_BUTTONS[button])
@step('I seek video to "([^"]*)" seconds$')
def seek_video_to_n_seconds(_step, seconds):
time = float(seconds.strip())
jsCode = "$('.video').data('video-player-state').videoPlayer.onSlideSeek({{time: {0:f}}})".format(time)
world.browser.execute_script(jsCode)
@step('I see video starts playing from "([^"]*)" position$')
def start_playing_video_from_n_seconds(_step, position):
world.wait_for(
func=lambda _: world.css_html('.vidtime')[:4] == position.strip(),
timeout=5
)
| agpl-3.0 |
nanolearningllc/edx-platform-cypress | openedx/core/djangoapps/credit/api/provider.py | 21 | 15424 | """
API for initiating and tracking requests for credit from a provider.
"""
import datetime
import logging
import pytz
import uuid
from django.db import transaction
from lms.djangoapps.django_comment_client.utils import JsonResponse
from openedx.core.djangoapps.credit.exceptions import (
UserIsNotEligible,
CreditProviderNotConfigured,
RequestAlreadyCompleted,
CreditRequestNotFound,
InvalidCreditStatus,
)
from openedx.core.djangoapps.credit.models import (
CreditProvider,
CreditRequirementStatus,
CreditRequest,
CreditEligibility,
)
from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key
from student.models import User
from util.date_utils import to_timestamp
log = logging.getLogger(__name__)
def get_credit_providers(providers_list=None):
"""Retrieve all available credit providers or filter on given providers_list.
Arguments:
providers_list (list of strings or None): contains list of ids of credit providers
or None.
Returns:
list of credit providers represented as dictionaries
Response Values:
>>> get_credit_providers(['hogwarts'])
[
{
"id": "hogwarts",
"name": "Hogwarts School of Witchcraft and Wizardry",
"url": "https://credit.example.com/",
"status_url": "https://credit.example.com/status/",
"description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": false,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
},
...
]
"""
return CreditProvider.get_credit_providers(providers_list=providers_list)
def get_credit_provider_info(request, provider_id): # pylint: disable=unused-argument
"""Retrieve the 'CreditProvider' model data against provided
credit provider.
Args:
provider_id (str): The identifier for the credit provider
Returns: 'CreditProvider' data dictionary
Example Usage:
>>> get_credit_provider_info("hogwarts")
{
"provider_id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"provider_url": "https://credit.example.com/",
"provider_status_url": "https://credit.example.com/status/",
"provider_description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": False,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
"thumbnail_url": "https://credit.example.com/logo.png"
}
"""
credit_provider = CreditProvider.get_credit_provider(provider_id=provider_id)
credit_provider_data = {}
if credit_provider:
credit_provider_data = {
"provider_id": credit_provider.provider_id,
"display_name": credit_provider.display_name,
"provider_url": credit_provider.provider_url,
"provider_status_url": credit_provider.provider_status_url,
"provider_description": credit_provider.provider_description,
"enable_integration": credit_provider.enable_integration,
"fulfillment_instructions": credit_provider.fulfillment_instructions,
"thumbnail_url": credit_provider.thumbnail_url
}
return JsonResponse(credit_provider_data)
@transaction.commit_on_success
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
username (str): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": 0.95,
"user_username": "ron",
"user_email": "ron@example.com",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = CreditProvider.objects.get(provider_id=provider_id)
except CreditEligibility.DoesNotExist:
log.warning(
u'User "%s" tried to initiate a request for credit in course "%s", '
u'but the user is not eligible for credit',
username, course_key
)
raise UserIsNotEligible
except CreditProvider.DoesNotExist:
log.error(u'Credit provider with ID "%s" has not been configured.', provider_id)
raise CreditProviderNotConfigured
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": credit_provider.provider_url,
"method": "GET",
"parameters": {}
}
else:
# If automatic credit integration is enabled, then try
# to retrieve the shared signature *before* creating the request.
# That way, if there's a misconfiguration, we won't have requests
# in our system that we know weren't sent to the provider.
shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
if shared_secret_key is None:
msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format(
provider_id=credit_provider.provider_id
)
log.error(msg)
raise CreditProviderNotConfigured(msg)
# Initiate a new request if one has not already been created
credit_request, created = CreditRequest.objects.get_or_create(
course=credit_course,
provider=credit_provider,
username=username,
)
# Check whether we've already gotten a response for a request,
# If so, we're not allowed to issue any further requests.
# Skip checking the status if we know that we just created this record.
if not created and credit_request.status != "pending":
log.warning(
(
u'Cannot initiate credit request because the request with UUID "%s" '
u'exists with status "%s"'
), credit_request.uuid, credit_request.status
)
raise RequestAlreadyCompleted
if created:
credit_request.uuid = uuid.uuid4().hex
# Retrieve user account and profile info
user = User.objects.select_related('profile').get(username=username)
# Retrieve the final grade from the eligibility table
try:
final_grade = CreditRequirementStatus.objects.get(
username=username,
requirement__namespace="grade",
requirement__name="grade",
requirement__course__course_key=course_key,
status="satisfied"
).reason["final_grade"]
except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
log.exception(
"Could not retrieve final grade from the credit eligibility table "
"for user %s in course %s.",
user.id, course_key
)
raise UserIsNotEligible
parameters = {
"request_uuid": credit_request.uuid,
"timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
"course_org": course_key.org,
"course_num": course_key.course,
"course_run": course_key.run,
"final_grade": final_grade,
"user_username": user.username,
"user_email": user.email,
"user_full_name": user.profile.name,
"user_mailing_address": (
user.profile.mailing_address
if user.profile.mailing_address is not None
else ""
),
"user_country": (
user.profile.country.code
if user.profile.country.code is not None
else ""
),
}
credit_request.parameters = parameters
credit_request.save()
if created:
log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid)
else:
log.info(
u'Updated request for credit with UUID "%s" so the user can re-issue the request',
credit_request.uuid
)
# Sign the parameters using a secret key we share with the credit provider.
parameters["signature"] = signature(parameters, shared_secret_key)
return {
"url": credit_provider.provider_url,
"method": "POST",
"parameters": parameters
}
def update_credit_request_status(request_uuid, provider_id, status):
"""
Update the status of a credit request.
Approve or reject a request for a student to receive credit in a course
from a particular credit provider.
This function does NOT check that the status update is authorized.
The caller needs to handle authentication and authorization (checking the signature
of the message received from the credit provider)
The function is idempotent; if the request has already been updated to the status,
the function does nothing.
Arguments:
request_uuid (str): The unique identifier for the credit request.
provider_id (str): Identifier for the credit provider.
status (str): Either "approved" or "rejected"
Returns: None
Raises:
CreditRequestNotFound: No request exists that is associated with the given provider.
InvalidCreditStatus: The status is not either "approved" or "rejected".
"""
if status not in [CreditRequest.REQUEST_STATUS_APPROVED, CreditRequest.REQUEST_STATUS_REJECTED]:
raise InvalidCreditStatus
try:
request = CreditRequest.objects.get(uuid=request_uuid, provider__provider_id=provider_id)
old_status = request.status
request.status = status
request.save()
log.info(
u'Updated request with UUID "%s" from status "%s" to "%s" for provider with ID "%s".',
request_uuid, old_status, status, provider_id
)
except CreditRequest.DoesNotExist:
msg = (
u'Credit provider with ID "{provider_id}" attempted to '
u'update request with UUID "{request_uuid}", but no request '
u'with this UUID is associated with the provider.'
).format(provider_id=provider_id, request_uuid=request_uuid)
log.warning(msg)
raise CreditRequestNotFound(msg)
def get_credit_requests_for_user(username):
"""
Retrieve the status of a credit request.
Returns either "pending", "approved", or "rejected"
Arguments:
username (unicode): The username of the user who initiated the requests.
Returns: list
Example Usage:
>>> get_credit_request_status_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return CreditRequest.credit_requests_for_user(username)
def get_credit_request_status(username, course_key):
"""Get the credit request status.
This function returns the status of credit request of user for given course.
It returns the latest request status for the any credit provider.
The valid status are 'pending', 'approved' or 'rejected'.
Args:
username(str): The username of user
course_key(CourseKey): The course locator key
Returns:
A dictionary of credit request user has made if any
"""
credit_request = CreditRequest.get_user_request_status(username, course_key)
return {
"uuid": credit_request.uuid,
"timestamp": credit_request.modified,
"course_key": credit_request.course.course_key,
"provider": {
"id": credit_request.provider.provider_id,
"display_name": credit_request.provider.display_name
},
"status": credit_request.status
} if credit_request else {}
| agpl-3.0 |
batxes/4Cin | Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/mtx1_models/Six_zebra_models33593.py | 4 | 13932 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-2231.91, 1873.04, 4948.61), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((-2547.73, 1221.59, 5626.14), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((-1448.44, 2810.11, 5700.04), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-152.843, 4750.61, 5786.52), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((294.827, 5357.78, 5835.43), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((-639.633, 5644.51, 3949.03), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((822.007, 6873.82, 3838.46), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((673.65, 8167.01, 2698.07), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((1276.89, 9627.72, 2796.25), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2404.76, 11074.2, 2734.25), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2376.07, 12060.6, 4148.68), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((1346.22, 13902.6, 3911.1), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((332.215, 15676, 3590.22), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((-526.5, 14317.8, 3676.01), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((1003.37, 14708.6, 3102.11), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((2352.72, 13874.2, 2834.11), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((2700.56, 12458, 2672.82), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((3295.12, 10963.4, 2579.79), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((2293.56, 9952.17, 1486.56), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((3157.78, 8766.41, 1025.09), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((4562.39, 8235.54, 108.495), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((5514.28, 8224.34, -1305.16), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((5457.73, 8158.74, 143.26), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5788.89, 8839.54, 2115.57), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((6511.14, 10101, 3697.02), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6907.61, 10777.7, 4429.22), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7305.6, 8604.85, 6046.13), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((8357.81, 7698.28, 7279.06), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((8585.97, 6606.02, 6706.21), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((9595.28, 5004.78, 5356.35), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((8369.43, 5045.35, 5921.9), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((9588.8, 4783.53, 7167.7), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((10272.2, 2960.81, 8340.71), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((9225.08, 2486.81, 9256.55), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((7841.53, 1883.43, 9512.22), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((6851.36, 411.84, 9779.54), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((6204.37, -875.278, 8889.85), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((6955.38, -298.396, 7572.44), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((8388.18, -52.4982, 8107.93), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((8483.37, 1844.38, 8742.74), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8658.38, 1683.06, 9980.16), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((8128.79, 2530.3, 9040.23), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((7318.05, 2245.56, 8799.73), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((8069.96, 2344.36, 9108.75), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((9248.73, 3604.42, 8487.28), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((8389.61, 6365.29, 8520.05), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7792.35, 7641.52, 9764.97), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((7034.02, 7852.89, 10606.8), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((7044.29, 7350.49, 12541.7), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7940.32, 6939.68, 15007.1), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((8698.78, 5524.17, 14489.3), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((10008.7, 6586.57, 12112.9), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((9420.97, 6105.8, 11896), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((9157.96, 4211.28, 12079.1), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((8279.46, 2949.88, 11962.2), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((6854.04, 3222.06, 10818.4), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
callmealien/wazimap_zambia | censusreporter/apps/census/models.py | 9 | 5579 | from django.db import models
class Table(models.Model):
table_id = models.CharField(max_length=8)
table_name = models.CharField(max_length=255)
table_universe = models.CharField(max_length=128)
table_size = models.IntegerField()
subject_area = models.CharField(max_length=32)
topics = models.CharField(max_length=255, blank=True)
release = models.CharField(max_length=16)
class Meta:
ordering = ('release','table_id')
def __unicode__(self):
return '%s' % self.table_name
class Column(models.Model):
table = models.ForeignKey(Table)
parent_table_id = models.CharField(max_length=8)
column_id = models.CharField(max_length=16)
column_order = models.DecimalField(max_digits=4, decimal_places=1)
column_name = models.CharField(max_length=255)
indent_value = models.IntegerField(blank=True, null=True)
parent_column_id = models.CharField(max_length=255)
has_children = models.BooleanField()
class Meta:
ordering = ('table__table_id','table__release','column_id')
def __unicode__(self):
return '%s' % self.column_name
class SummaryLevel(models.Model):
summary_level = models.CharField(max_length=3)
name = models.CharField(max_length=128)
slug = models.SlugField()
short_name = models.CharField(max_length=128, blank=True)
plural_name = models.CharField(max_length=128, blank=True)
description = models.TextField(blank=True)
census_description = models.TextField(blank=True)
census_code_description = models.TextField(blank=True)
census_notes = models.TextField(blank=True)
source = models.CharField(max_length=64, blank=True)
# Relationships
parent = models.ForeignKey('self', related_name='children', blank=True, null=True)
ancestors = models.ManyToManyField('self', related_name='descendants', symmetrical=False, blank=True, null=True)
class Meta:
ordering = ('summary_level',)
def __unicode__(self):
return '%s' % self.name
@property
def display_name(self):
return self.short_name or self.name
def pretty_ancestor_list(self):
return ', '.join([ancestor.name for ancestor in self.ancestors.all()])
def pretty_ancestor_options(self):
_list = self.pretty_ancestor_list()
return ' or '.join(_list.rsplit(',',1))
def pretty_ancestor_sumlev_list(self):
return ','.join([ancestor.summary_level for ancestor in self.ancestors.all()])
def ancestor_sumlev_list(self):
return [ancestor.summary_level for ancestor in self.ancestors.all()]
class SubjectConcept(models.Model):
name = models.CharField(max_length=128)
slug = models.SlugField()
census_category = models.CharField(max_length=128, blank=True, default="Population")
census_description = models.TextField(blank=True)
census_history = models.TextField(blank=True)
census_comparability = models.TextField(blank=True)
census_notes = models.TextField(blank=True)
description = models.TextField(blank=True)
source = models.CharField(max_length=64, blank=True, default="American Community Survey Subject Definitions")
class Meta:
ordering = ('name',)
def __unicode__(self):
return '%s' % self.name
class Geography(models.Model):
full_geoid = models.CharField(max_length=16)
full_name = models.CharField(max_length=128)
sumlev = models.CharField(max_length=3)
geo_type = models.CharField(max_length=24)
region = models.CharField(max_length=2, blank=True, null=True)
region_name = models.CharField(max_length=24, blank=True)
division = models.CharField(max_length=2, blank=True, null=True)
division_name = models.CharField(max_length=24, blank=True)
statefp = models.CharField(max_length=2, blank=True, null=True)
geoid = models.CharField(max_length=24, blank=True, null=True)
cd112fp = models.CharField(max_length=12, blank=True, null=True)
cdsessn = models.CharField(max_length=12, blank=True, null=True)
countyfp = models.CharField(max_length=12, blank=True, null=True)
placefp = models.CharField(max_length=12, blank=True, null=True)
classfp = models.CharField(max_length=12, blank=True)
sldlst = models.CharField(max_length=12, blank=True, null=True)
sldust = models.CharField(max_length=12, blank=True, null=True)
elsdlea = models.CharField(max_length=12, blank=True, null=True)
scsdlea = models.CharField(max_length=12, blank=True, null=True)
unsdlea = models.CharField(max_length=12, blank=True, null=True)
pcicbsa = models.CharField(max_length=1, blank=True, null=True)
pcinecta = models.CharField(max_length=1, blank=True, null=True)
csafp = models.CharField(max_length=12, blank=True, null=True)
cbsafp = models.CharField(max_length=12, blank=True, null=True)
metdivfp = models.CharField(max_length=12, blank=True, null=True)
zcta5ce10 = models.CharField(max_length=12, blank=True, null=True)
name = models.CharField(max_length=128, blank=True)
namelsad = models.CharField(max_length=128, blank=True)
lsad = models.CharField(max_length=4, blank=True, null=True)
aland = models.CharField(max_length=24, blank=True, null=True)
intptlat = models.CharField(max_length=16, blank=True)
intptlon = models.CharField(max_length=16, blank=True)
class Meta:
ordering = ('full_geoid',)
verbose_name_plural = "Geographies"
def __unicode__(self):
return '%s' % self.full_name
| mit |
conklinbd/MovementAnalysis | TemplateInstall/PortalDeploy/arcrest/ags/featureservice.py | 1 | 15911 | """
Contains information regarding an ArcGIS Server Feature Server
"""
from re import search
from .._abstract.abstract import BaseAGSServer, BaseSecurityHandler
from ..security import security
import layer
import json
from ..common.geometry import SpatialReference
from ..common.general import FeatureSet
from ..common.filters import LayerDefinitionFilter, GeometryFilter, TimeFilter
########################################################################
class FeatureService(BaseAGSServer):
""" contains information about a feature service """
_url = None
_currentVersion = None
_serviceDescription = None
_hasVersionedData = None
_supportsDisconnectedEditing = None
_hasStaticData = None
_maxRecordCount = None
_supportedQueryFormats = None
_capabilities = None
_description = None
_copyrightText = None
_spatialReference = None
_initialExtent = None
_fullExtent = None
_allowGeometryUpdates = None
_units = None
_syncEnabled = None
_syncCapabilities = None
_editorTrackingInfo = None
_documentInfo = None
_layers = None
_tables = None
_enableZDefaults = None
_zDefault = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler=None,
initialize=False, proxy_url=None, proxy_port=None):
"""Constructor"""
self._proxy_url = proxy_url
self._proxy_port = proxy_port
self._url = url
if securityHandler is not None:
self._securityHandler = securityHandler
elif securityHandler is None:
pass
else:
raise AttributeError("Invalid Security Handler")
if not securityHandler is None and \
hasattr(securityHandler, 'referer_url'):
self._referer_url = securityHandler.referer_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" loads the data into the class """
params = {"f": "json"}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print k, " - attribute not implemented for Feature Service."
#----------------------------------------------------------------------
@property
def administration(self):
"""returns the service admin object (if accessible)"""
from ..manageags._services import AGSService
url = self._url
res = search("/rest/", url).span()
addText = "/admin/"
part1 = url[:res[1]].lower().replace('/rest/', '')
part2 = url[res[1]:].lower().replace('/featureserver', ".mapserver")
adminURL = "%s%s%s" % (part1, addText, part2)
return AGSService(url=adminURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
#----------------------------------------------------------------------
@property
def itemInfo(self):
"""gets the item's info"""
params = {"f" : "json"}
url = self._url + "/info/iteminfo"
return self._do_get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def downloadThumbnail(self, outPath):
"""downloads the items's thumbnail"""
url = self._url + "/info/thumbnail"
params = {}
return self._download_file(url=url,
save_path=outPath,
securityHandler=self._securityHandler,
file_name=None,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def downloadMetadataFile(self, outPath):
"""downloads the metadata file to a given path"""
fileName = "metadata.xml"
url = self._url + "/info/metadata"
params = {}
return self._download_file(url=url,
save_path=outPath,
file_name=fileName,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def __str__(self):
"""returns object as a string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""returns the JSON response in key/value pairs"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.iteritems():
yield [k,v]
#----------------------------------------------------------------------
@property
def securityHandler(self):
""" gets the security handler """
return self._securityHandler
#----------------------------------------------------------------------
@securityHandler.setter
def securityHandler(self, value):
""" sets the security handler """
if isinstance(value, BaseSecurityHandler):
if isinstance(value, security.AGSTokenSecurityHandler):
self._securityHandler = value
else:
pass
elif value is None:
self._securityHandler = None
self._token = None
#----------------------------------------------------------------------
@property
def maxRecordCount(self):
"""returns the max record count"""
if self._maxRecordCount is None:
self.__init()
return self._maxRecordCount
#----------------------------------------------------------------------
@property
def supportedQueryFormats(self):
""""""
if self._supportedQueryFormats is None:
self.__init()
return self._supportedQueryFormats
#----------------------------------------------------------------------
@property
def capabilities(self):
""" returns a list of capabilities """
if self._capabilities is None:
self.__init()
return self._capabilities
#----------------------------------------------------------------------
@property
def description(self):
""" returns the service description """
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def copyrightText(self):
""" returns the copyright text """
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def spatialReference(self):
""" returns the spatial reference """
if self._spatialReference is None:
self.__init()
return self._spatialReference
#----------------------------------------------------------------------
@property
def initialExtent(self):
""" returns the initial extent of the feature service """
if self._initialExtent is None:
self.__init()
return self._initialExtent
#----------------------------------------------------------------------
@property
def fullExtent(self):
""" returns the full extent of the feature service """
if self._fullExtent is None:
self.__init()
return self._fullExtent
#----------------------------------------------------------------------
@property
def allowGeometryUpdates(self):
""" informs the user if the data allows geometry updates """
if self._allowGeometryUpdates is None:
self.__init()
return self._allowGeometryUpdates
#----------------------------------------------------------------------
@property
def units(self):
""" returns the measurement unit """
if self._units is None:
self.__init()
return self._units
#----------------------------------------------------------------------
@property
def syncEnabled(self):
""" informs the user if sync of data can be performed """
if self._syncEnabled is None:
self.__init()
return self._syncEnabled
#----------------------------------------------------------------------
@property
def syncCapabilities(self):
""" type of sync that can be performed """
if self._syncCapabilities is None:
self.__init()
return self._syncCapabilities
#----------------------------------------------------------------------
@property
def editorTrackingInfo(self):
""""""
if self._editorTrackingInfo is None:
self.__init()
return self._editorTrackingInfo
#----------------------------------------------------------------------
@property
def documentInfo(self):
""""""
if self._documentInfo is None:
self.__init()
return self._documentInfo
#----------------------------------------------------------------------
@property
def layers(self):
""" gets the layers for the feature service """
if self._layers is None:
self.__init()
self._getLayers()
return self._layers
#----------------------------------------------------------------------
def _getLayers(self):
""" gets layers for the featuer service """
params = {"f": "json"}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._layers = []
if json_dict.has_key("layers"):
for l in json_dict["layers"]:
self._layers.append(
layer.FeatureLayer(url=self._url + "/%s" % l['id'],
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
)
#----------------------------------------------------------------------
@property
def tables(self):
"""lists the tables on the feature service"""
if self._tables is None:
self.__init()
return self._tables
#----------------------------------------------------------------------
@property
def enableZDefaults(self):
""""""
if self._enableZDefaults is None:
self.__init()
return self._enableZDefaults
#----------------------------------------------------------------------
@property
def zDefault(self):
""""""
if self._zDefault is None:
self.__init()
return self._zDefault
#----------------------------------------------------------------------
@property
def hasStaticData(self):
""""""
if self._hasStaticData is None:
self.__init()
return self._hasStaticData
#----------------------------------------------------------------------
@property
def currentVersion(self):
""" returns the map service current version """
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def serviceDescription(self):
""" returns the serviceDescription of the map service """
if self._serviceDescription is None:
self.__init()
return self._serviceDescription
#----------------------------------------------------------------------
@property
def hasVersionedData(self):
""" returns boolean for versioned data """
if self._hasVersionedData is None:
self.__init()
return self._hasVersionedData
#----------------------------------------------------------------------
@property
def supportsDisconnectedEditing(self):
""" returns boolean is disconnecting editted supported """
if self._supportsDisconnectedEditing is None:
self.__init()
return self._supportsDisconnectedEditing
#----------------------------------------------------------------------
def query(self,
layerDefsFilter=None,
geometryFilter=None,
timeFilter=None,
returnGeometry=True,
returnIdsOnly=False,
returnCountOnly=False,
returnZ=False,
returnM=False,
outSR=None
):
"""
The Query operation is performed on a feature service resource
"""
qurl = self._url + "/query"
params = {"f": "json",
"returnGeometry": returnGeometry,
"returnIdsOnly": returnIdsOnly,
"returnCountOnly": returnCountOnly,
"returnZ": returnZ,
"returnM" : returnM}
if not layerDefsFilter is None and \
isinstance(layerDefsFilter, LayerDefinitionFilter):
params['layerDefs'] = layerDefsFilter.filter
if not geometryFilter is None and \
isinstance(geometryFilter, GeometryFilter):
gf = geometryFilter.filter
params['geometryType'] = gf['geometryType']
params['spatialRel'] = gf['spatialRel']
params['geometry'] = gf['geometry']
params['inSR'] = gf['inSR']
if not outSR is None and \
isinstance(outSR, SpatialReference):
params['outSR'] = outSR.asDictionary
if not timeFilter is None and \
isinstance(timeFilter, TimeFilter):
params['time'] = timeFilter.filter
res = self._do_get(url=qurl,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if returnIdsOnly == False and returnCountOnly == False:
if isinstance(res, str):
jd = json.loads(res)
return [FeatureSet.fromJSON(json.dumps(lyr)) for lyr in jd['layers']]
elif isinstance(res, dict):
return [FeatureSet.fromJSON(json.dumps(lyr)) for lyr in res['layers']]
else:
return res
return res
| apache-2.0 |
popazerty/blackhole-vuplus | lib/python/Components/ScrollLabel.py | 21 | 5642 | import skin
from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from enigma import eLabel, eWidget, eSlider, fontRenderClass, ePoint, eSize
class ScrollLabel(HTMLComponent, GUIComponent):
def __init__(self, text=""):
GUIComponent.__init__(self)
self.message = text
self.instance = None
self.long_text = None
self.right_text = None
self.scrollbar = None
self.pages = None
self.total = None
self.split = False
self.splitchar = "|"
self.column = 0
def applySkin(self, desktop, parent):
ret = False
if self.skinAttributes is not None:
widget_attribs = [ ]
scrollbar_attribs = [ ]
for (attrib, value) in self.skinAttributes:
if "borderColor" in attrib or "borderWidth" in attrib:
scrollbar_attribs.append((attrib,value))
if "transparent" in attrib or "backgroundColor" in attrib:
widget_attribs.append((attrib,value))
if "split" in attrib:
self.split = int(value)
if self.split:
self.right_text = eLabel(self.instance)
if "colposition" in attrib:
self.column = int(value)
if "dividechar" in attrib:
self.splitchar = value
if self.split:
skin.applyAllAttributes(self.long_text, desktop, self.skinAttributes + [("halign", "left")], parent.scale)
skin.applyAllAttributes(self.right_text, desktop, self.skinAttributes + [("transparent", "1"), ("halign", "left" and self.column or "right")], parent.scale)
else:
skin.applyAllAttributes(self.long_text, desktop, self.skinAttributes, parent.scale)
skin.applyAllAttributes(self.instance, desktop, widget_attribs, parent.scale)
skin.applyAllAttributes(self.scrollbar, desktop, scrollbar_attribs+widget_attribs, parent.scale)
ret = True
s = self.long_text.size()
self.instance.move(self.long_text.position())
lineheight=fontRenderClass.getInstance().getLineHeight( self.long_text.getFont() )
if not lineheight:
lineheight = 30 # assume a random lineheight if nothing is visible
lines = (int)(s.height() / lineheight)
self.pageHeight = (int)(lines * lineheight)
self.instance.resize(eSize(s.width(), self.pageHeight+(int)(lineheight/6)))
self.scrollbar.move(ePoint(s.width()-20,0))
self.scrollbar.resize(eSize(20,self.pageHeight+(int)(lineheight/6)))
self.scrollbar.setOrientation(eSlider.orVertical);
self.scrollbar.setRange(0,100)
self.scrollbar.setBorderWidth(1)
self.long_text.move(ePoint(0,0))
self.long_text.resize(eSize(s.width()-30, self.pageHeight*16))
if self.split:
self.right_text.move(ePoint(self.column,0))
self.right_text.resize(eSize(s.width()-self.column-30, self.pageHeight*16))
self.setText(self.message)
return ret
def setText(self, text):
self.message = text
if self.long_text is not None and self.pageHeight:
self.long_text.move(ePoint(0,0))
if self.split:
left = []
right = []
for line in self.message.split("\n"):
line = line.split(self.splitchar,1)
if len(line) == 1:
line.append("")
left.append(line[0])
right.append(line[1].lstrip(' '))
self.long_text.setText("\n".join(left))
self.right_text.setText("\n".join(right))
else:
self.long_text.setText(self.message)
text_height=self.long_text.calculateSize().height()
total=self.pageHeight
pages=1
while total < text_height:
total=total+self.pageHeight
pages=pages+1
if pages > 1:
self.scrollbar.show()
self.total = total
self.pages = pages
self.updateScrollbar()
else:
self.scrollbar.hide()
self.total = None
self.pages = None
def appendText(self, text):
old_text = self.getText()
if len(str(old_text)) >0:
self.message += text
else:
self.message = text
if self.long_text is not None:
self.long_text.setText(self.message)
text_height=self.long_text.calculateSize().height()
total=self.pageHeight
pages=1
while total < text_height:
total=total+self.pageHeight
pages=pages+1
if pages > 1:
self.scrollbar.show()
self.total = total
self.pages = pages
self.updateScrollbar()
else:
self.scrollbar.hide()
self.total = None
self.pages = None
def updateScrollbar(self):
start = -self.long_text.position().y() * 100 / self.total
vis = self.pageHeight * 100 / self.total;
self.scrollbar.setStartEnd(start, start+vis)
def getText(self):
return self.message
def GUIcreate(self, parent):
self.instance = eWidget(parent)
self.scrollbar = eSlider(self.instance)
self.long_text = eLabel(self.instance)
def GUIdelete(self):
self.long_text = None
self.scrollbar = None
self.instance = None
self.right_text = None
def pageUp(self):
if self.total is not None:
curPos = self.long_text.position()
if curPos.y() < 0:
self.long_text.move( ePoint( curPos.x(), curPos.y() + self.pageHeight ) )
self.split and self.right_text.move( ePoint( curPos.x(), curPos.y() + self.pageHeight ) )
self.updateScrollbar()
def pageDown(self):
if self.total is not None:
curPos = self.long_text.position()
if self.total-self.pageHeight >= abs( curPos.y() - self.pageHeight ):
self.long_text.move( ePoint( curPos.x(), curPos.y() - self.pageHeight ) )
self.split and self.right_text.move( ePoint( curPos.x(), curPos.y() - self.pageHeight ) )
self.updateScrollbar()
def lastPage(self):
if self.pages is not None:
i = 1
while i < self.pages:
self.pageDown()
i += 1
def isAtLastPage(self):
if self.total is not None:
curPos = self.long_text.position()
return self.total - self.pageHeight < abs( curPos.y() - self.pageHeight )
else:
return True
def produceHTML(self):
return self.getText()
| gpl-2.0 |
cctaylor/googleads-python-lib | examples/dfp/v201505/user_service/get_all_users.py | 3 | 1658 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all users. To create users, run create_users.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201505')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get users by statement.
while True:
response = user_service.getUsersByStatement(statement.ToStatement())
if 'results' in response:
# Display results.
for user in response['results']:
print ('User with id \'%s\', email \'%s\', and role \'%s\' was found.'
% (user['id'], user['email'], user['roleName']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
2014cdbg17/2015cd_midterm2 | static/Brython3.1.0-20150301-090019/Lib/multiprocessing/__init__.py | 693 | 6866 | #
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool',
'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
#brython fix me
#def Pipe(duplex=True):
# '''
# Returns two connection object connected by a pipe
# '''
# from multiprocessing.connection import Pipe
# return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
#brython fix me
#def allow_connection_pickling():
# '''
# Install support for sending connections and sockets between processes
# '''
# # This is undocumented. In previous versions of multiprocessing
# # its only effect was to make socket objects inheritable on Windows.
# import multiprocessing.connection
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Barrier(parties, action=None, timeout=None):
'''
Returns a barrier object
'''
from multiprocessing.synchronize import Barrier
return Barrier(parties, action, timeout)
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def SimpleQueue():
'''
Returns a queue object
'''
from multiprocessing.queues import SimpleQueue
return SimpleQueue()
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, lock=True):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, lock=lock)
def Array(typecode_or_type, size_or_initializer, *, lock=True):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, lock=lock)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
| gpl-3.0 |
turbokongen/home-assistant | tests/components/pilight/test_init.py | 3 | 14678 | """The tests for the pilight component."""
from datetime import timedelta
import logging
import socket
from unittest.mock import patch
from voluptuous import MultipleInvalid
from homeassistant.components import pilight
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import assert_setup_component, async_fire_time_changed
_LOGGER = logging.getLogger(__name__)
class PilightDaemonSim:
"""Class to fake the interface of the pilight python package.
Is used in an asyncio loop, thus the mock cannot be accessed to
determine if methods where called?!
This is solved here in a hackish way by printing errors
that can be checked using logging.error mocks.
"""
callback = None
called = None
test_message = {
"protocol": "kaku_switch",
"uuid": "1-2-3-4",
"message": {"id": 0, "unit": 0, "off": 1},
}
def __init__(self, host, port):
"""Init pilight client, ignore parameters."""
def send_code(self, call): # pylint: disable=no-self-use
"""Handle pilight.send service callback."""
_LOGGER.error("PilightDaemonSim payload: %s", call)
def start(self):
"""Handle homeassistant.start callback.
Also sends one test message after start up
"""
_LOGGER.error("PilightDaemonSim start")
# Fake one code receive after daemon started
if not self.called:
self.callback(self.test_message)
self.called = True
def stop(self): # pylint: disable=no-self-use
"""Handle homeassistant.stop callback."""
_LOGGER.error("PilightDaemonSim stop")
def set_callback(self, function):
"""Handle pilight.pilight_received event callback."""
self.callback = function
_LOGGER.error("PilightDaemonSim callback: %s", function)
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_connection_failed_error(mock_error, hass):
"""Try to connect at 127.0.0.1:5001 with socket error."""
with assert_setup_component(4):
with patch("pilight.pilight.Client", side_effect=socket.error) as mock_client:
assert not await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
mock_client.assert_called_once_with(
host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT
)
assert mock_error.call_count == 1
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_connection_timeout_error(mock_error, hass):
"""Try to connect at 127.0.0.1:5001 with socket timeout."""
with assert_setup_component(4):
with patch("pilight.pilight.Client", side_effect=socket.timeout) as mock_client:
assert not await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
mock_client.assert_called_once_with(
host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT
)
assert mock_error.call_count == 1
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code_no_protocol(hass):
"""Try to send data without protocol information, should give error."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call without protocol info, should raise an error
try:
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data={"noprotocol": "test", "value": 42},
blocking=True,
)
await hass.async_block_till_done()
except MultipleInvalid as error:
assert "required key not provided @ data['protocol']" in str(error)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code(mock_pilight_error, hass):
"""Try to send proper data."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call with protocol info, should not give error
service_data = {"protocol": "test", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
service_data["protocol"] = [service_data["protocol"]]
assert str(service_data) in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_send_code_fail(mock_pilight_error, hass):
"""Check IOError exception error message."""
with assert_setup_component(4):
with patch("pilight.pilight.Client.send_code", side_effect=IOError):
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
# Call with protocol info, should not give error
service_data = {"protocol": "test", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert "Pilight send failed" in str(error_log_call)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code_delay(mock_pilight_error, hass):
"""Try to send proper data with delay afterwards."""
with assert_setup_component(4):
assert await async_setup_component(
hass,
pilight.DOMAIN,
{pilight.DOMAIN: {pilight.CONF_SEND_DELAY: 5.0}},
)
# Call with protocol info, should not give error
service_data1 = {"protocol": "test11", "value": 42}
service_data2 = {"protocol": "test22", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data1,
blocking=True,
)
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data2,
blocking=True,
)
service_data1["protocol"] = [service_data1["protocol"]]
service_data2["protocol"] = [service_data2["protocol"]]
async_fire_time_changed(hass, dt_util.utcnow())
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert str(service_data1) in str(error_log_call)
new_time = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, new_time)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert str(service_data2) in str(error_log_call)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_start_stop(mock_pilight_error, hass):
"""Check correct startup and stop of pilight daemon."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Test startup
await hass.async_start()
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-2]
assert "PilightDaemonSim callback" in str(error_log_call)
error_log_call = mock_pilight_error.call_args_list[-1]
assert "PilightDaemonSim start" in str(error_log_call)
# Test stop
with patch.object(hass.loop, "stop"):
await hass.async_stop()
error_log_call = mock_pilight_error.call_args_list[-1]
assert "PilightDaemonSim stop" in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_receive_code(mock_debug, hass):
"""Check if code receiving via pilight daemon works."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Test startup
await hass.async_start()
await hass.async_block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
debug_log_call = mock_debug.call_args_list[-3]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(debug_log_call)
assert str(value) in str(debug_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_whitelist_exact_match(mock_debug, hass):
"""Check whitelist filter with matched data."""
with assert_setup_component(4):
whitelist = {
"protocol": [PilightDaemonSim.test_message["protocol"]],
"uuid": [PilightDaemonSim.test_message["uuid"]],
"id": [PilightDaemonSim.test_message["message"]["id"]],
"unit": [PilightDaemonSim.test_message["message"]["unit"]],
}
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
await hass.async_start()
await hass.async_block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
debug_log_call = mock_debug.call_args_list[-3]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(debug_log_call)
assert str(value) in str(debug_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_whitelist_partial_match(mock_debug, hass):
"""Check whitelist filter with partially matched data, should work."""
with assert_setup_component(4):
whitelist = {
"protocol": [PilightDaemonSim.test_message["protocol"]],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
await hass.async_start()
await hass.async_block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
debug_log_call = mock_debug.call_args_list[-3]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(debug_log_call)
assert str(value) in str(debug_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_whitelist_or_match(mock_debug, hass):
"""Check whitelist filter with several subsection, should work."""
with assert_setup_component(4):
whitelist = {
"protocol": [
PilightDaemonSim.test_message["protocol"],
"other_protocol",
],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
await hass.async_start()
await hass.async_block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
debug_log_call = mock_debug.call_args_list[-3]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(debug_log_call)
assert str(value) in str(debug_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.debug")
async def test_whitelist_no_match(mock_debug, hass):
"""Check whitelist filter with unmatched data, should not work."""
with assert_setup_component(4):
whitelist = {
"protocol": ["wrong_protocol"],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
await hass.async_start()
await hass.async_block_till_done()
debug_log_call = mock_debug.call_args_list[-3]
assert not ("Event pilight_received" in debug_log_call)
async def test_call_rate_delay_throttle_enabled(hass):
"""Test that throttling actually work."""
runs = []
delay = 5.0
limit = pilight.CallRateDelayThrottle(hass, delay)
action = limit.limited(lambda x: runs.append(x))
for i in range(3):
await hass.async_add_executor_job(action, i)
await hass.async_block_till_done()
assert runs == [0]
exp = []
now = dt_util.utcnow()
for i in range(3):
exp.append(i)
shifted_time = now + (timedelta(seconds=delay + 0.1) * i)
async_fire_time_changed(hass, shifted_time)
await hass.async_block_till_done()
assert runs == exp
def test_call_rate_delay_throttle_disabled(hass):
"""Test that the limiter is a noop if no delay set."""
runs = []
limit = pilight.CallRateDelayThrottle(hass, 0.0)
action = limit.limited(lambda x: runs.append(x))
for i in range(3):
action(i)
assert runs == [0, 1, 2]
| apache-2.0 |
bclau/nova | nova/tests/cells/test_cells_manager.py | 9 | 37403 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsManager
"""
import copy
import datetime
from oslo.config import cfg
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
from nova.tests import fake_instance_actions
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)]
FAKE_SERVICES = [dict(id=1, host='host1',
compute_node=[FAKE_COMPUTE_NODES[0]]),
dict(id=2, host='host2',
compute_node=[FAKE_COMPUTE_NODES[1]]),
dict(id=3, host='host3', compute_node=[])]
FAKE_TASK_LOGS = [dict(id=1, host='host1'),
dict(id=2, host='host2')]
class CellsManagerClassTestCase(test.NoDBTestCase):
"""Test case for CellsManager class."""
def setUp(self):
super(CellsManagerClassTestCase, self).setUp()
fakes.init(self)
# pick a child cell to use for tests.
self.our_cell = 'grandchild-cell1'
self.cells_manager = fakes.get_cells_manager(self.our_cell)
self.msg_runner = self.cells_manager.msg_runner
self.state_manager = fakes.get_state_manager(self.our_cell)
self.driver = self.cells_manager.driver
self.ctxt = 'fake_context'
def _get_fake_response(self, raw_response=None, exc=False):
if exc:
return messaging.Response('fake', test.TestingException(),
True)
if raw_response is None:
raw_response = 'fake-response'
return messaging.Response('fake', raw_response, False)
def test_get_cell_info_for_neighbors(self):
self.mox.StubOutWithMock(self.cells_manager.state_manager,
'get_cell_info_for_neighbors')
self.cells_manager.state_manager.get_cell_info_for_neighbors()
self.mox.ReplayAll()
self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
def test_post_start_hook_child_cell(self):
self.mox.StubOutWithMock(self.driver, 'start_consumers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
self.driver.start_consumers(self.msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
self.cells_manager._update_our_parents(self.ctxt)
self.mox.ReplayAll()
self.cells_manager.post_start_hook()
def test_post_start_hook_middle_cell(self):
cells_manager = fakes.get_cells_manager('child-cell2')
msg_runner = cells_manager.msg_runner
driver = cells_manager.driver
self.mox.StubOutWithMock(driver, 'start_consumers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capabilities')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capacities')
driver.start_consumers(msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
msg_runner.ask_children_for_capabilities(self.ctxt)
msg_runner.ask_children_for_capacities(self.ctxt)
self.mox.ReplayAll()
cells_manager.post_start_hook()
def test_update_our_parents(self):
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capabilities')
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capacities')
self.msg_runner.tell_parents_our_capabilities(self.ctxt)
self.msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.cells_manager._update_our_parents(self.ctxt)
def test_schedule_run_instance(self):
host_sched_kwargs = 'fake_host_sched_kwargs_silently_passed'
self.mox.StubOutWithMock(self.msg_runner, 'schedule_run_instance')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(self.ctxt, our_cell,
host_sched_kwargs)
self.mox.ReplayAll()
self.cells_manager.schedule_run_instance(self.ctxt,
host_sched_kwargs=host_sched_kwargs)
def test_build_instances(self):
build_inst_kwargs = {'instances': [1, 2]}
self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
self.mox.ReplayAll()
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=build_inst_kwargs)
def test_run_compute_api_method(self):
# Args should just be silently passed through
cell_name = 'fake-cell-name'
method_info = 'fake-method-info'
self.mox.StubOutWithMock(self.msg_runner,
'run_compute_api_method')
fake_response = self._get_fake_response()
self.msg_runner.run_compute_api_method(self.ctxt,
cell_name,
method_info,
True).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.run_compute_api_method(
self.ctxt, cell_name=cell_name, method_info=method_info,
call=True)
self.assertEqual('fake-response', response)
def test_instance_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_update_at_top(self.ctxt,
instance='fake-instance')
def test_instance_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_destroy_at_top(self.ctxt,
instance='fake-instance')
def test_instance_delete_everywhere(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_delete_everywhere')
self.msg_runner.instance_delete_everywhere(self.ctxt,
'fake-instance',
'fake-type')
self.mox.ReplayAll()
self.cells_manager.instance_delete_everywhere(
self.ctxt, instance='fake-instance',
delete_type='fake-type')
def test_instance_fault_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_fault_create_at_top')
self.msg_runner.instance_fault_create_at_top(self.ctxt,
'fake-fault')
self.mox.ReplayAll()
self.cells_manager.instance_fault_create_at_top(
self.ctxt, instance_fault='fake-fault')
def test_bw_usage_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bw_usage_update_at_top')
self.msg_runner.bw_usage_update_at_top(self.ctxt,
'fake-bw-info')
self.mox.ReplayAll()
self.cells_manager.bw_usage_update_at_top(
self.ctxt, bw_update_info='fake-bw-info')
def test_heal_instances(self):
self.flags(instance_updated_at_threshold=1000,
instance_update_num_instances=2,
group='cells')
fake_context = context.RequestContext('fake', 'fake')
stalled_time = timeutils.utcnow()
updated_since = stalled_time - datetime.timedelta(seconds=1000)
def utcnow():
return stalled_time
call_info = {'get_instances': 0, 'sync_instances': []}
instances = ['instance1', 'instance2', 'instance3']
def get_instances_to_sync(context, **kwargs):
self.assertEqual(context, fake_context)
call_info['shuffle'] = kwargs.get('shuffle')
call_info['project_id'] = kwargs.get('project_id')
call_info['updated_since'] = kwargs.get('updated_since')
call_info['get_instances'] += 1
return iter(instances)
def instance_get_by_uuid(context, uuid):
return instances[int(uuid[-1]) - 1]
def sync_instance(context, instance):
self.assertEqual(context, fake_context)
call_info['sync_instances'].append(instance)
self.stubs.Set(cells_utils, 'get_instances_to_sync',
get_instances_to_sync)
self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
instance_get_by_uuid)
self.stubs.Set(self.cells_manager, '_sync_instance',
sync_instance)
self.stubs.Set(timeutils, 'utcnow', utcnow)
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertEqual(call_info['project_id'], None)
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 1)
# Only first 2
self.assertEqual(call_info['sync_instances'],
instances[:2])
call_info['sync_instances'] = []
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertEqual(call_info['project_id'], None)
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 2)
# Now the last 1 and the first 1
self.assertEqual(call_info['sync_instances'],
[instances[-1], instances[0]])
def test_sync_instances(self):
self.mox.StubOutWithMock(self.msg_runner,
'sync_instances')
self.msg_runner.sync_instances(self.ctxt, 'fake-project',
'fake-time', 'fake-deleted')
self.mox.ReplayAll()
self.cells_manager.sync_instances(self.ctxt,
project_id='fake-project',
updated_since='fake-time',
deleted='fake-deleted')
def test_service_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of services.
# Manager should turn these into a single list of responses.
for i in xrange(3):
cell_name = 'path!to!cell%i' % i
services = []
for service in FAKE_SERVICES:
services.append(copy.deepcopy(service))
expected_service = copy.deepcopy(service)
cells_utils.add_cell_to_service(expected_service, cell_name)
expected_response.append(expected_service)
response = messaging.Response(cell_name, services, False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'service_get_all')
self.msg_runner.service_get_all(self.ctxt,
'fake-filters').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.service_get_all(self.ctxt,
filters='fake-filters')
self.assertEqual(expected_response, response)
def test_service_get_by_compute_host(self):
self.mox.StubOutWithMock(self.msg_runner,
'service_get_by_compute_host')
fake_cell = 'fake-cell'
fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0],
False)
expected_response = copy.deepcopy(FAKE_SERVICES[0])
cells_utils.add_cell_to_service(expected_response, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.msg_runner.service_get_by_compute_host(self.ctxt,
fake_cell, 'fake-host').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.service_get_by_compute_host(self.ctxt,
host_name=cell_and_host)
self.assertEqual(expected_response, response)
def test_get_host_uptime(self):
fake_cell = 'parent!fake-cell'
fake_host = 'fake-host'
fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host)
host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
fake_response = messaging.Response(fake_cell, host_uptime, False)
self.mox.StubOutWithMock(self.msg_runner,
'get_host_uptime')
self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\
AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.get_host_uptime(self.ctxt,
fake_cell_and_host)
self.assertEqual(host_uptime, response)
def test_service_update(self):
fake_cell = 'fake-cell'
fake_response = messaging.Response(
fake_cell, FAKE_SERVICES[0], False)
expected_response = copy.deepcopy(FAKE_SERVICES[0])
cells_utils.add_cell_to_service(expected_response, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
params_to_update = {'disabled': True}
self.mox.StubOutWithMock(self.msg_runner, 'service_update')
self.msg_runner.service_update(self.ctxt,
fake_cell, 'fake-host', 'nova-api',
params_to_update).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.service_update(
self.ctxt, host_name=cell_and_host, binary='nova-api',
params_to_update=params_to_update)
self.assertEqual(expected_response, response)
def test_proxy_rpc_to_manager(self):
self.mox.StubOutWithMock(self.msg_runner,
'proxy_rpc_to_manager')
fake_response = self._get_fake_response()
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
topic = rpc.queue_get_for(self.ctxt, CONF.compute_topic,
cell_and_host)
self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
'fake-host', topic, 'fake-rpc-msg',
True, -1).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
topic=topic, rpc_message='fake-rpc-msg', call=True,
timeout=-1)
self.assertEqual('fake-response', response)
def _build_task_log_responses(self, num):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of task log
# entries. Manager should turn these into a single list of
# task log entries.
for i in xrange(num):
cell_name = 'path!to!cell%i' % i
task_logs = []
for task_log in FAKE_TASK_LOGS:
task_logs.append(copy.deepcopy(task_log))
expected_task_log = copy.deepcopy(task_log)
cells_utils.add_cell_to_task_log(expected_task_log,
cell_name)
expected_response.append(expected_task_log)
response = messaging.Response(cell_name, task_logs, False)
responses.append(response)
return expected_response, responses
def test_task_log_get_all(self):
expected_response, responses = self._build_task_log_responses(3)
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, None,
'fake-name', 'fake-begin',
'fake-end', host=None, state=None).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_filters(self):
expected_response, responses = self._build_task_log_responses(1)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host='fake-host',
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_cell_but_no_host_filters(self):
expected_response, responses = self._build_task_log_responses(1)
# Host filter only has cell name.
cell_and_host = 'fake-cell'
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host=None,
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_compute_node_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of computes.
# Manager should turn these into a single list of responses.
for i in xrange(3):
cell_name = 'path!to!cell%i' % i
compute_nodes = []
for compute_node in FAKE_COMPUTE_NODES:
compute_nodes.append(copy.deepcopy(compute_node))
expected_compute_node = copy.deepcopy(compute_node)
cells_utils.add_cell_to_compute_node(expected_compute_node,
cell_name)
expected_response.append(expected_compute_node)
response = messaging.Response(cell_name, compute_nodes, False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get_all')
self.msg_runner.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match')
self.assertEqual(expected_response, response)
def test_compute_node_stats(self):
raw_resp1 = {'key1': 1, 'key2': 2}
raw_resp2 = {'key2': 1, 'key3': 2}
raw_resp3 = {'key3': 1, 'key4': 2}
responses = [messaging.Response('cell1', raw_resp1, False),
messaging.Response('cell2', raw_resp2, False),
messaging.Response('cell2', raw_resp3, False)]
expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_stats')
self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_stats(self.ctxt)
self.assertEqual(expected_resp, response)
def test_compute_node_get(self):
fake_cell = 'fake-cell'
fake_response = messaging.Response(fake_cell,
FAKE_COMPUTE_NODES[0],
False)
expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0])
cells_utils.add_cell_to_compute_node(expected_response, fake_cell)
cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get')
self.msg_runner.compute_node_get(self.ctxt,
'fake-cell', 'fake-id').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get(self.ctxt,
compute_id=cell_and_id)
self.assertEqual(expected_response, response)
def test_actions_get(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response('fake-cell', [fake_act], False)
expected_response = [fake_act]
self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
self.msg_runner.actions_get(self.ctxt, 'fake-cell',
'fake-uuid').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
'fake-uuid')
self.assertEqual(expected_response, response)
def test_action_get_by_request_id(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response('fake-cell', fake_act, False)
expected_response = fake_act
self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
'fake-uuid', 'req-fake').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_get_by_request_id(self.ctxt,
'fake-cell',
'fake-uuid',
'req-fake')
self.assertEqual(expected_response, response)
def test_action_events_get(self):
fake_action_id = fake_instance_actions.FAKE_ACTION_ID1
fake_events = fake_instance_actions.FAKE_EVENTS[fake_action_id]
fake_response = messaging.Response('fake-cell', fake_events, False)
expected_response = fake_events
self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
'fake-action').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
'fake-action')
self.assertEqual(expected_response, response)
def test_consoleauth_delete_tokens(self):
instance_uuid = 'fake-instance-uuid'
self.mox.StubOutWithMock(self.msg_runner,
'consoleauth_delete_tokens')
self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid)
self.mox.ReplayAll()
self.cells_manager.consoleauth_delete_tokens(self.ctxt,
instance_uuid=instance_uuid)
def test_get_capacities(self):
cell_name = 'cell_name'
response = {"ram_free":
{"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}}
self.mox.StubOutWithMock(self.state_manager,
'get_capacities')
self.state_manager.get_capacities(cell_name).AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.get_capacities(self.ctxt, cell_name))
def test_validate_console_port(self):
instance_uuid = 'fake-instance-uuid'
cell_name = 'fake-cell-name'
instance = {'cell_name': cell_name}
console_port = 'fake-console-port'
console_type = 'fake-console-type'
self.mox.StubOutWithMock(self.msg_runner,
'validate_console_port')
self.mox.StubOutWithMock(self.cells_manager.db,
'instance_get_by_uuid')
fake_response = self._get_fake_response()
self.cells_manager.db.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn(instance)
self.msg_runner.validate_console_port(self.ctxt, cell_name,
instance_uuid, console_port,
console_type).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.validate_console_port(self.ctxt,
instance_uuid=instance_uuid, console_port=console_port,
console_type=console_type)
self.assertEqual('fake-response', response)
def test_bdm_update_or_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bdm_update_or_create_at_top')
self.msg_runner.bdm_update_or_create_at_top(self.ctxt,
'fake-bdm',
create='foo')
self.mox.ReplayAll()
self.cells_manager.bdm_update_or_create_at_top(self.ctxt,
'fake-bdm',
create='foo')
def test_bdm_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top')
self.msg_runner.bdm_destroy_at_top(self.ctxt,
'fake_instance_uuid',
device_name='fake_device_name',
volume_id='fake_volume_id')
self.mox.ReplayAll()
self.cells_manager.bdm_destroy_at_top(self.ctxt,
'fake_instance_uuid',
device_name='fake_device_name',
volume_id='fake_volume_id')
def test_get_migrations(self):
filters = {'status': 'confirmed'}
cell1_migrations = [{'id': 123}]
cell2_migrations = [{'id': 456}]
fake_responses = [self._get_fake_response(cell1_migrations),
self._get_fake_response(cell2_migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, None, False, filters).\
AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response)
def test_get_migrations_for_a_given_cell(self):
filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'}
target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name'])
migrations = [{'id': 123}]
fake_responses = [self._get_fake_response(migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, target_cell, False,
filters).AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual(migrations, response)
def test_instance_update_from_api(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_update_from_api')
self.msg_runner.instance_update_from_api(self.ctxt,
'fake-instance',
'exp_vm', 'exp_task',
'admin_reset')
self.mox.ReplayAll()
self.cells_manager.instance_update_from_api(
self.ctxt, instance='fake-instance',
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset='admin_reset')
def test_start_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'start_instance')
self.msg_runner.start_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.start_instance(self.ctxt, instance='fake-instance')
def test_stop_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
do_cast='meow')
self.mox.ReplayAll()
self.cells_manager.stop_instance(self.ctxt,
instance='fake-instance',
do_cast='meow')
def test_cell_create(self):
values = 'values'
response = 'created_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_create')
self.state_manager.cell_create(self.ctxt, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_create(self.ctxt, values))
def test_cell_update(self):
cell_name = 'cell_name'
values = 'values'
response = 'updated_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_update')
self.state_manager.cell_update(self.ctxt, cell_name, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_update(self.ctxt, cell_name,
values))
def test_cell_delete(self):
cell_name = 'cell_name'
response = 1
self.mox.StubOutWithMock(self.state_manager,
'cell_delete')
self.state_manager.cell_delete(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_delete(self.ctxt, cell_name))
def test_cell_get(self):
cell_name = 'cell_name'
response = 'cell_info'
self.mox.StubOutWithMock(self.state_manager,
'cell_get')
self.state_manager.cell_get(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_get(self.ctxt, cell_name))
def test_reboot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance')
self.msg_runner.reboot_instance(self.ctxt, 'fake-instance',
'HARD')
self.mox.ReplayAll()
self.cells_manager.reboot_instance(self.ctxt,
instance='fake-instance',
reboot_type='HARD')
def test_suspend_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance')
self.msg_runner.suspend_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.suspend_instance(self.ctxt,
instance='fake-instance')
def test_resume_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'resume_instance')
self.msg_runner.resume_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.resume_instance(self.ctxt,
instance='fake-instance')
def test_terminate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
self.msg_runner.terminate_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.terminate_instance(self.ctxt,
instance='fake-instance')
def test_soft_delete_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.soft_delete_instance(self.ctxt,
instance='fake-instance')
def test_resize_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
'fake-flavor', 'fake-updates')
self.mox.ReplayAll()
self.cells_manager.resize_instance(
self.ctxt, instance='fake-instance', flavor='fake-flavor',
extra_instance_updates='fake-updates')
def test_live_migrate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance',
'fake-block', 'fake-commit',
'fake-host')
self.mox.ReplayAll()
self.cells_manager.live_migrate_instance(
self.ctxt, instance='fake-instance',
block_migration='fake-block', disk_over_commit='fake-commit',
host_name='fake-host')
def test_revert_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'revert_resize')
self.msg_runner.revert_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.revert_resize(self.ctxt, instance='fake-instance')
def test_confirm_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize')
self.msg_runner.confirm_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance')
def test_reset_network(self):
self.mox.StubOutWithMock(self.msg_runner, 'reset_network')
self.msg_runner.reset_network(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.reset_network(self.ctxt, instance='fake-instance')
def test_inject_network_info(self):
self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info')
self.msg_runner.inject_network_info(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.inject_network_info(self.ctxt,
instance='fake-instance')
def test_snapshot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance')
self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance',
'fake-id')
self.mox.ReplayAll()
self.cells_manager.snapshot_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id')
def test_backup_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'backup_instance')
self.msg_runner.backup_instance(self.ctxt, 'fake-instance',
'fake-id', 'backup-type',
'rotation')
self.mox.ReplayAll()
self.cells_manager.backup_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id',
backup_type='backup-type',
rotation='rotation')
| apache-2.0 |
Lekanich/intellij-community | python/lib/Lib/encodings/cp437.py | 593 | 34820 | """ Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xa5' # 0x009d -> YEN SIGN
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
wbonnet/lffs | toolkit/dft/build_firmware_update.py | 1 | 8151 | #
# The contents of this file are subject to the Apache 2.0 license you may not
# use this file except in compliance with the License.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
#
# Copyright 2016 DFT project (http://www.debianfirmwaretoolkit.org).
# All rights reserved. Use is subject to license terms.
#
# Debian Firmware Toolkit is the new name of Linux Firmware From Scratch
# Copyright 2014 LFFS project (http://www.linuxfirmwarefromscratch.org).
#
#
# Contributors list :
#
# William Bonnet wllmbnnt@gmail.com, wbonnet@theitmakers.com
#
#
""" This modules implements the functionnalities used to create the initramfs in charge of
setting up the firmware in memory at system boot.
"""
import logging
import os
import tarfile
from dft.cli_command import CliCommand
from dft.enumkey import Key
#
# Class BuildFirmwareUpdate
#
class BuildFirmwareUpdate(CliCommand):
"""This class implements method needed to create the archives containing
firmware update, and all the scripts needed at deployment.
"""
# -------------------------------------------------------------------------
#
# __init__
#
# -------------------------------------------------------------------------
def __init__(self, dft, project):
"""Default constructor
"""
# Initialize ancestor
CliCommand.__init__(self, dft, project)
# -------------------------------------------------------------------------
#
# build_update_archive
#
# -------------------------------------------------------------------------
def build_update_archive(self):
"""This method generates the final archive containing the elements of the
firmware. The main steps :
. Creating a manisfest describing the content items (hash value)
. Creat a tar file, containing all the data from the content subirectory
. Create a detached signature using either gnupg or openssl
The two generated files are stored under firmware (same levelas content)
"""
# Check that there is a firmware configuration file first
if self.project.firmware is None:
self.project.logging.critical("The firmware configuration file is not defined in \
project file")
exit(1)
# Check that the target files and directories exists
if not os.path.isdir(self.project.get_firmware_content_directory()):
self.project.logging.critical("The firmware directory does not exist. Did you forget to run \
assemble_firmwarec command before ? Expected directory is " + \
self.project.get_firmware_content_directory())
exit(1)
# Create the tar archive
self.create_main_archive()
# Sign the main archive
self.sign_main_archive()
# And we are done
return
# -------------------------------------------------------------------------
#
# create_main_archive
#
# -------------------------------------------------------------------------
def create_main_archive(self):
"""This method create the manifest of the archive (a file listing all the
files with their checksums). Then it creates the archive to be signed.
All the files are stored under firmware directory. In the en only two
files should be produced. The archive, created by this method, and the
detached signature. Coded in next method.
"""
# Output current task to logs
logging.info("Creating the main archive")
# Creating the manifest
# Creating the archive
dest_archive = self.project.get_firmware_output_directory()
dest_archive += "/" + self.project.firmware[Key.CONFIGURATION.value][Key.FILENAME.value]
# Create the tar itself
tar = tarfile.open(name=dest_archive, mode='w')
# Iterate firmware content directory
for name in os.listdir(self.project.get_firmware_content_directory()):
# And add each and every file
filename = self.project.get_firmware_content_directory() + "/" + name
tar.add(filename, name, recursive=True)
# Let's close the tar to flushit
tar.close()
logging.debug("Archive " + dest_archive + " has been created")
# -------------------------------------------------------------------------
#
# sign_main_archive
#
# -------------------------------------------------------------------------
def sign_main_archive(self):
"""This method does a digital signature of the archive, or a hash (should
not be used). Depending on configuration, it ca use either a hash function
such as sha1sum, or a signature software such as gnupg or openssl.
"""
# Output current task to logs
logging.info("Signing the main archive")
# Check if signature is activated
if Key.SECURITY.value in self.project.firmware:
if Key.SIGNATURE.value in self.project.firmware[Key.SECURITY.value]:
# Retrieve the signature tool to use
signing_tool = self.project.firmware[Key.SECURITY.value][Key.SIGNATURE.value]
# Generate the path to the archive and detached signature file
dest_archive = self.project.get_firmware_output_directory()
dest_archive += "/" + self.project.firmware[Key.CONFIGURATION.value][Key.FILENAME.value]
dest_sign = dest_archive + ".sig"
# Remove any exsting signature
if os.path.isfile(dest_sign):
os.remove(dest_sign)
self.project.logging.info("Existing " + dest_archive + " has been removed")
# Expected values are empty (means deactivated), gpg2 (or gnupg2), or openssl
if len(signing_tool) == 0:
self.project.logging.info("Signature is not activated in the security section of the \
firmware definition file")
# Are we using a known tool
elif signing_tool not in [Key.GPG.value, Key.GPG2.value, Key.OPENSSL.value]:
self.project.logging.critical("Unknown signing tool : " + signing_tool)
self.project.logging.critical("Valid values are gpg, gpg2, openssl or empty string to \
deactivate signature")
exit(1)
# Signing tool is valid, now let's generate the command to do it
# First case, are we using GnuPG 1 or 2
if signing_tool == Key.GPG.value or signing_tool == Key.GPG2.value:
# Now let's prepare the signing command
command = signing_tool
# Are we using armor format export ?
if Key.GPG_ARMOR_SIGNATURE.value in self.project.firmware[Key.SECURITY.value] and \
self.project.firmware[Key.SECURITY.value][Key.GPG_ARMOR_SIGNATURE.value]:
# Yes, let's append --armor to the command
command += " --armor"
command += " --output " + dest_sign + " --detach-sig " + dest_archive
self.execute_command(command)
self.project.logging.info(dest_archive + " has been created and signed successfully")
# Update archive has been signed, let's verify signature before finishing
command = signing_tool + " --verify " + dest_sign + " " + dest_archive
self.execute_command(command)
#TODO : add test case
self.project.logging.info(dest_sign + " has been verfied successfully")
# Or is it OpenSSL ?
elif signing_tool == Key.OPENSSL.value:
# TODO OpenSSL support
self.project.logging.critical("OpenSSL is not yet supported for firmware signature")
self.project.logging.critical("Please use GnuPG until support is available")
exit(1)
else:
self.project.logging.info("Signature is not activated in the security section of the \
firmware definition file")
else:
self.project.logging.error("The firmware definition file does not include a security section")
self.project.logging.error("Unable to create signature file. You should add security.")
| apache-2.0 |
slevenhagen/odoo | openerp/addons/base/ir/ir_translation.py | 117 | 24047 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import tools
import openerp.modules
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
TRANSLATION_TYPE = [
('field', 'Field'),
('model', 'Object'),
('rml', 'RML (deprecated - use Report)'), # Pending deprecation - to be replaced by report!
('report', 'Report/Template'),
('selection', 'Selection'),
('view', 'View'),
('wizard_button', 'Wizard Button'),
('wizard_field', 'Wizard Field'),
('wizard_view', 'Wizard View'),
('xsl', 'XSL'),
('help', 'Help'),
('code', 'Code'),
('constraint', 'Constraint'),
('sql_constraint', 'SQL Constraint')
]
class ir_translation_import_cursor(object):
"""Temporary cursor for optimizing mass insert into ir.translation
Open it (attached to a sql cursor), feed it with translation data and
finish() it in order to insert multiple translations in a batch.
"""
_table_name = 'tmp_ir_translation_import'
def __init__(self, cr, uid, parent, context):
""" Initializer
Store some values, and also create a temporary SQL table to accept
the data.
@param parent an instance of ir.translation ORM model
"""
self._cr = cr
self._uid = uid
self._context = context
self._overwrite = context.get('overwrite', False)
self._debug = False
self._parent_table = parent._table
# Note that Postgres will NOT inherit the constraints or indexes
# of ir_translation, so this copy will be much faster.
cr.execute('''CREATE TEMP TABLE %s(
imd_model VARCHAR(64),
imd_name VARCHAR(128)
) INHERITS (%s) ''' % (self._table_name, self._parent_table))
def push(self, trans_dict):
"""Feed a translation, as a dictionary, into the cursor
"""
params = dict(trans_dict, state="translated" if trans_dict['value'] else "to_translate")
if params['type'] == 'view':
# ugly hack for QWeb views - pending refactoring of translations in master
if params['imd_model'] == 'website':
params['imd_model'] = "ir.ui.view"
# non-QWeb views do not need a matching res_id -> force to 0 to avoid dropping them
elif params['res_id'] is None:
params['res_id'] = 0
self._cr.execute("""INSERT INTO %s (name, lang, res_id, src, type, imd_model, module, imd_name, value, state, comments)
VALUES (%%(name)s, %%(lang)s, %%(res_id)s, %%(src)s, %%(type)s, %%(imd_model)s, %%(module)s,
%%(imd_name)s, %%(value)s, %%(state)s, %%(comments)s)""" % self._table_name,
params)
def finish(self):
""" Transfer the data from the temp table to ir.translation
"""
cr = self._cr
if self._debug:
cr.execute("SELECT count(*) FROM %s" % self._table_name)
c = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: We have %d entries to process", c)
# Step 1: resolve ir.model.data references to res_ids
cr.execute("""UPDATE %s AS ti
SET res_id = imd.res_id
FROM ir_model_data AS imd
WHERE ti.res_id IS NULL
AND ti.module IS NOT NULL AND ti.imd_name IS NOT NULL
AND ti.module = imd.module AND ti.imd_name = imd.name
AND ti.imd_model = imd.model; """ % self._table_name)
if self._debug:
cr.execute("SELECT module, imd_name, imd_model FROM %s " \
"WHERE res_id IS NULL AND module IS NOT NULL" % self._table_name)
for row in cr.fetchall():
_logger.info("ir.translation.cursor: missing res_id for %s.%s <%s> ", *row)
# Records w/o res_id must _not_ be inserted into our db, because they are
# referencing non-existent data.
cr.execute("DELETE FROM %s WHERE res_id IS NULL AND module IS NOT NULL" % self._table_name)
find_expr = """
irt.lang = ti.lang
AND irt.type = ti.type
AND irt.module = ti.module
AND irt.name = ti.name
AND (ti.type IN ('field', 'help') OR irt.src = ti.src)
AND ( ti.type NOT IN ('model', 'view')
OR (ti.type = 'model' AND ti.res_id = irt.res_id)
OR (ti.type = 'view' AND (irt.res_id IS NULL OR ti.res_id = irt.res_id))
)
"""
# Step 2: update existing (matching) translations
if self._overwrite:
cr.execute("""UPDATE ONLY %s AS irt
SET value = ti.value,
src = ti.src,
state = 'translated'
FROM %s AS ti
WHERE %s AND ti.value IS NOT NULL AND ti.value != ''
""" % (self._parent_table, self._table_name, find_expr))
# Step 3: insert new translations
cr.execute("""INSERT INTO %s(name, lang, res_id, src, type, value, module, state, comments)
SELECT name, lang, res_id, src, type, value, module, state, comments
FROM %s AS ti
WHERE NOT EXISTS(SELECT 1 FROM ONLY %s AS irt WHERE %s);
""" % (self._parent_table, self._table_name, self._parent_table, find_expr))
if self._debug:
cr.execute('SELECT COUNT(*) FROM ONLY %s' % self._parent_table)
c1 = cr.fetchone()[0]
cr.execute('SELECT COUNT(*) FROM ONLY %s AS irt, %s AS ti WHERE %s' % \
(self._parent_table, self._table_name, find_expr))
c = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: %d entries now in ir.translation, %d common entries with tmp", c1, c)
# Step 4: cleanup
cr.execute("DROP TABLE %s" % self._table_name)
return True
class ir_translation(osv.osv):
_name = "ir.translation"
_log_access = False
def _get_language(self, cr, uid, context):
lang_model = self.pool.get('res.lang')
lang_ids = lang_model.search(cr, uid, [('translatable', '=', True)], context=context)
lang_data = lang_model.read(cr, uid, lang_ids, ['code', 'name'], context=context)
return [(d['code'], d['name']) for d in lang_data]
def _get_src(self, cr, uid, ids, name, arg, context=None):
''' Get source name for the translation. If object type is model then
return the value store in db. Otherwise return value store in src field
'''
if context is None:
context = {}
res = dict.fromkeys(ids, False)
for record in self.browse(cr, uid, ids, context=context):
if record.type != 'model':
res[record.id] = record.src
else:
model_name, field = record.name.split(',')
model = self.pool.get(model_name)
if model is not None:
# Pass context without lang, need to read real stored field, not translation
context_no_lang = dict(context, lang=None)
result = model.read(cr, uid, [record.res_id], [field], context=context_no_lang)
res[record.id] = result[0][field] if result else False
return res
def _set_src(self, cr, uid, id, name, value, args, context=None):
''' When changing source term of a translation, change its value in db for
the associated object, and the src field
'''
if context is None:
context = {}
record = self.browse(cr, uid, id, context=context)
if record.type == 'model':
model_name, field = record.name.split(',')
model = self.pool.get(model_name)
#We need to take the context without the language information, because we want to write on the
#value store in db and not on the one associate with current language.
#Also not removing lang from context trigger an error when lang is different
context_wo_lang = context.copy()
context_wo_lang.pop('lang', None)
model.write(cr, uid, [record.res_id], {field: value}, context=context_wo_lang)
return self.write(cr, uid, id, {'src': value}, context=context)
_columns = {
'name': fields.char('Translated field', required=True),
'res_id': fields.integer('Record ID', select=True),
'lang': fields.selection(_get_language, string='Language'),
'type': fields.selection(TRANSLATION_TYPE, string='Type', select=True),
'src': fields.text('Old source'),
'source': fields.function(_get_src, fnct_inv=_set_src, type='text', string='Source'),
'value': fields.text('Translation Value'),
'module': fields.char('Module', help="Module this term belongs to", select=True),
'state': fields.selection(
[('to_translate','To Translate'),
('inprogress','Translation in Progress'),
('translated','Translated')],
string="Status",
help="Automatically set to let administators find new terms that might need to be translated"),
# aka gettext extracted-comments - we use them to flag openerp-web translation
# cfr: http://www.gnu.org/savannah-checkouts/gnu/gettext/manual/html_node/PO-Files.html
'comments': fields.text('Translation comments', select=True),
}
_defaults = {
'state': 'to_translate',
}
_sql_constraints = [ ('lang_fkey_res_lang', 'FOREIGN KEY(lang) REFERENCES res_lang(code)',
'Language code of translation item must be among known languages' ), ]
def _auto_init(self, cr, context=None):
super(ir_translation, self)._auto_init(cr, context)
# FIXME: there is a size limit on btree indexed values so we can't index src column with normal btree.
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_ltns',))
if cr.fetchone():
#temporarily removed: cr.execute('CREATE INDEX ir_translation_ltns ON ir_translation (name, lang, type, src)')
cr.execute('DROP INDEX ir_translation_ltns')
cr.commit()
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_lts',))
if cr.fetchone():
#temporarily removed: cr.execute('CREATE INDEX ir_translation_lts ON ir_translation (lang, type, src)')
cr.execute('DROP INDEX ir_translation_lts')
cr.commit()
# add separate hash index on src (no size limit on values), as postgres 8.1+ is able to combine separate indexes
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_src_hash_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_translation_src_hash_idx ON ir_translation using hash (src)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_translation_ltn',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_translation_ltn ON ir_translation (name, lang, type)')
cr.commit()
def _check_selection_field_value(self, cr, uid, field, value, context=None):
if field == 'lang':
return
return super(ir_translation, self)._check_selection_field_value(cr, uid, field, value, context=context)
@tools.ormcache_multi(skiparg=3, multi=6)
def _get_ids(self, cr, uid, name, tt, lang, ids):
translations = dict.fromkeys(ids, False)
if ids:
cr.execute('select res_id,value '
'from ir_translation '
'where lang=%s '
'and type=%s '
'and name=%s '
'and res_id IN %s',
(lang,tt,name,tuple(ids)))
for res_id, value in cr.fetchall():
translations[res_id] = value
return translations
def _set_ids(self, cr, uid, name, tt, lang, ids, value, src=None):
self._get_ids.clear_cache(self)
self.__get_source.clear_cache(self)
cr.execute('update ir_translation '
'set value=%s '
' , src=%s '
' , state=%s '
'where lang=%s '
'and type=%s '
'and name=%s '
'and res_id IN %s '
'returning res_id',
(value,src,'translated',lang,tt,name,tuple(ids),))
existing_ids = [x[0] for x in cr.fetchall()]
for id in list(set(ids) - set(existing_ids)):
self.create(cr, uid, {
'lang':lang,
'type':tt,
'name':name,
'res_id':id,
'value':value,
'src':src,
'state':'translated'
})
return len(ids)
def _get_source_query(self, cr, uid, name, types, lang, source, res_id):
if source:
query = """SELECT value
FROM ir_translation
WHERE lang=%s
AND type in %s
AND src=%s"""
params = (lang or '', types, tools.ustr(source))
if res_id:
query += " AND res_id in %s"
params += (res_id,)
if name:
query += " AND name=%s"
params += (tools.ustr(name),)
else:
query = """SELECT value
FROM ir_translation
WHERE lang=%s
AND type in %s
AND name=%s"""
params = (lang or '', types, tools.ustr(name))
return (query, params)
@tools.ormcache(skiparg=3)
def __get_source(self, cr, uid, name, types, lang, source, res_id):
# res_id is a tuple or None, otherwise ormcache cannot cache it!
query, params = self._get_source_query(cr, uid, name, types, lang, source, res_id)
cr.execute(query, params)
res = cr.fetchone()
trad = res and res[0] or u''
if source and not trad:
return tools.ustr(source)
return trad
def _get_source(self, cr, uid, name, types, lang, source=None, res_id=None):
"""
Returns the translation for the given combination of name, type, language
and source. All values passed to this method should be unicode (not byte strings),
especially ``source``.
:param name: identification of the term to translate, such as field name (optional if source is passed)
:param types: single string defining type of term to translate (see ``type`` field on ir.translation), or sequence of allowed types (strings)
:param lang: language code of the desired translation
:param source: optional source term to translate (should be unicode)
:param res_id: optional resource id or a list of ids to translate (if used, ``source`` should be set)
:rtype: unicode
:return: the request translation, or an empty unicode string if no translation was
found and `source` was not passed
"""
# FIXME: should assert that `source` is unicode and fix all callers to always pass unicode
# so we can remove the string encoding/decoding.
if not lang:
return tools.ustr(source or '')
if isinstance(types, basestring):
types = (types,)
if res_id:
if isinstance(res_id, (int, long)):
res_id = (res_id,)
else:
res_id = tuple(res_id)
return self.__get_source(cr, uid, name, types, lang, source, res_id)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
ids = super(ir_translation, self).create(cr, uid, vals, context=context)
self.__get_source.clear_cache(self)
self._get_ids.clear_cache(self)
self.pool['ir.ui.view'].clear_cache()
return ids
def write(self, cursor, user, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('src') or ('value' in vals and not(vals.get('value'))):
vals.update({'state':'to_translate'})
if vals.get('value'):
vals.update({'state':'translated'})
result = super(ir_translation, self).write(cursor, user, ids, vals, context=context)
self.__get_source.clear_cache(self)
self._get_ids.clear_cache(self)
self.pool['ir.ui.view'].clear_cache()
return result
def unlink(self, cursor, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self.__get_source.clear_cache(self)
self._get_ids.clear_cache(self)
result = super(ir_translation, self).unlink(cursor, user, ids, context=context)
return result
def translate_fields(self, cr, uid, model, id, field=None, context=None):
trans_model = self.pool[model]
domain = ['&', ('res_id', '=', id), ('name', '=like', model + ',%')]
langs_ids = self.pool.get('res.lang').search(cr, uid, [('code', '!=', 'en_US')], context=context)
if not langs_ids:
raise osv.except_osv(_('Error'), _("Translation features are unavailable until you install an extra OpenERP translation."))
langs = [lg.code for lg in self.pool.get('res.lang').browse(cr, uid, langs_ids, context=context)]
main_lang = 'en_US'
translatable_fields = []
for k, f in trans_model._fields.items():
if getattr(f, 'translate', False):
if f.inherited:
parent_id = trans_model.read(cr, uid, [id], [f.related[0]], context=context)[0][f.related[0]][0]
translatable_fields.append({'name': k, 'id': parent_id, 'model': f.base_field.model_name})
domain.insert(0, '|')
domain.extend(['&', ('res_id', '=', parent_id), ('name', '=', "%s,%s" % (f.base_field.model_name, k))])
else:
translatable_fields.append({'name': k, 'id': id, 'model': model })
if len(langs):
fields = [f.get('name') for f in translatable_fields]
record = trans_model.read(cr, uid, [id], fields, context={ 'lang': main_lang })[0]
for lg in langs:
for f in translatable_fields:
# Check if record exists, else create it (at once)
sql = """INSERT INTO ir_translation (lang, src, name, type, res_id, value)
SELECT %s, %s, %s, 'model', %s, %s WHERE NOT EXISTS
(SELECT 1 FROM ir_translation WHERE lang=%s AND name=%s AND res_id=%s AND type='model');
UPDATE ir_translation SET src = %s WHERE lang=%s AND name=%s AND res_id=%s AND type='model';
"""
src = record[f['name']] or None
name = "%s,%s" % (f['model'], f['name'])
cr.execute(sql, (lg, src , name, f['id'], src, lg, name, f['id'], src, lg, name, id))
action = {
'name': 'Translate',
'res_model': 'ir.translation',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'tree,form',
'domain': domain,
}
if field:
f = trans_model._fields[field]
action['context'] = {
'search_default_name': "%s,%s" % (f.base_field.model_name, field)
}
return action
def _get_import_cursor(self, cr, uid, context=None):
""" Return a cursor-like object for fast inserting translations
"""
return ir_translation_import_cursor(cr, uid, self, context=context)
def load_module_terms(self, cr, modules, langs, context=None):
context = dict(context or {}) # local copy
for module_name in modules:
modpath = openerp.modules.get_module_path(module_name)
if not modpath:
continue
for lang in langs:
lang_code = tools.get_iso_codes(lang)
base_lang_code = None
if '_' in lang_code:
base_lang_code = lang_code.split('_')[0]
# Step 1: for sub-languages, load base language first (e.g. es_CL.po is loaded over es.po)
if base_lang_code:
base_trans_file = openerp.modules.get_module_resource(module_name, 'i18n', base_lang_code + '.po')
if base_trans_file:
_logger.info('module %s: loading base translation file %s for language %s', module_name, base_lang_code, lang)
tools.trans_load(cr, base_trans_file, lang, verbose=False, module_name=module_name, context=context)
context['overwrite'] = True # make sure the requested translation will override the base terms later
# i18n_extra folder is for additional translations handle manually (eg: for l10n_be)
base_trans_extra_file = openerp.modules.get_module_resource(module_name, 'i18n_extra', base_lang_code + '.po')
if base_trans_extra_file:
_logger.info('module %s: loading extra base translation file %s for language %s', module_name, base_lang_code, lang)
tools.trans_load(cr, base_trans_extra_file, lang, verbose=False, module_name=module_name, context=context)
context['overwrite'] = True # make sure the requested translation will override the base terms later
# Step 2: then load the main translation file, possibly overriding the terms coming from the base language
trans_file = openerp.modules.get_module_resource(module_name, 'i18n', lang_code + '.po')
if trans_file:
_logger.info('module %s: loading translation file (%s) for language %s', module_name, lang_code, lang)
tools.trans_load(cr, trans_file, lang, verbose=False, module_name=module_name, context=context)
elif lang_code != 'en_US':
_logger.warning('module %s: no translation for language %s', module_name, lang_code)
trans_extra_file = openerp.modules.get_module_resource(module_name, 'i18n_extra', lang_code + '.po')
if trans_extra_file:
_logger.info('module %s: loading extra translation file (%s) for language %s', module_name, lang_code, lang)
tools.trans_load(cr, trans_extra_file, lang, verbose=False, module_name=module_name, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fiji-flo/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/treeadapters/sax.py | 1835 | 1661 | from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
| mpl-2.0 |
vthirumalai/cinder-violin-driver-juno | cinder/tests/fake_vmem_xgtools_client.py | 2 | 1899 | # Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake VMEM XG-Tools client for testing drivers. Inspired by
cinder/tests/fake_hp_3par_client.py.
"""
import mock
import sys
# The following gymnastics to fake an exception class globally is done because
# we want to globally model and make available certain exceptions. If we do
# not do this, then the real-driver's import will not see our fakes.
class NoMatchingObjectIdError(Exception):
pass
error = mock.Mock()
error.NoMatchingObjectIdError = NoMatchingObjectIdError
core = mock.Mock()
core.attach_mock(error, 'error')
vmemclient = mock.Mock()
vmemclient.__version__ = "unknown"
vmemclient.attach_mock(core, 'core')
sys.modules['vmemclient'] = vmemclient
mock_client_conf = [
'basic',
'basic.login',
'basic.get_node_values',
'basic.save_config',
'lun',
'lun.export_lun',
'lun.unexport_lun',
'snapshot',
'snapshot.export_lun_snapshot',
'snapshot.unexport_lun_snapshot',
'iscsi',
'iscsi.bind_ip_to_target',
'iscsi.create_iscsi_target',
'iscsi.delete_iscsi_target',
'igroup',
'client',
'client.get_client_info',
'adapter',
'adapter.get_fc_info',
'pool',
'pool.select_storage_pool',
'pool.get_storage_pools',
'utility',
'utility.is_external_head',
]
| apache-2.0 |
badlogicmanpreet/nupic | examples/opf/clients/hotgym/anomaly/one_gym/nupic_anomaly_output.py | 49 | 9450 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
from nupic.algorithms import anomaly_likelihood
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num, DateFormatter
except ImportError:
pass
WINDOW = 300
HIGHLIGHT_ALPHA = 0.3
ANOMALY_HIGHLIGHT_COLOR = 'red'
WEEKEND_HIGHLIGHT_COLOR = 'yellow'
ANOMALY_THRESHOLD = 0.9
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
self.anomalyLikelihoodHelper = anomaly_likelihood.AnomalyLikelihood()
@abstractmethod
def write(self, timestamp, value, predicted, anomalyScore):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCount = 0
headerRow = [
'timestamp', 'kw_energy_consumption', 'prediction',
'anomaly_score', 'anomaly_likelihood'
]
outputFileName = "%s_out.csv" % self.name
print "Preparing to output %s data to %s" % (self.name, outputFileName)
self.outputFile = open(outputFileName, "w")
self.outputWriter = csv.writer(self.outputFile)
self.outputWriter.writerow(headerRow)
def write(self, timestamp, value, predicted, anomalyScore):
if timestamp is not None:
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
outputRow = [timestamp, value, predicted, anomalyScore, anomalyLikelihood]
self.outputWriter.writerow(outputRow)
self.lineCount += 1
def close(self):
self.outputFile.close()
print "Done. Wrote %i data lines to %s." % (self.lineCount, self.name)
def extractWeekendHighlights(dates):
weekendsOut = []
weekendSearch = [5, 6]
weekendStart = None
for i, date in enumerate(dates):
if date.weekday() in weekendSearch:
if weekendStart is None:
# Mark start of weekend
weekendStart = i
else:
if weekendStart is not None:
# Mark end of weekend
weekendsOut.append((
weekendStart, i, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
weekendStart = None
# Cap it off if we're still in the middle of a weekend
if weekendStart is not None:
weekendsOut.append((
weekendStart, len(dates)-1, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return weekendsOut
def extractAnomalyIndices(anomalyLikelihood):
anomaliesOut = []
anomalyStart = None
for i, likelihood in enumerate(anomalyLikelihood):
if likelihood >= ANOMALY_THRESHOLD:
if anomalyStart is None:
# Mark start of anomaly
anomalyStart = i
else:
if anomalyStart is not None:
# Mark end of anomaly
anomaliesOut.append((
anomalyStart, i, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
anomalyStart = None
# Cap it off if we're still in the middle of an anomaly
if anomalyStart is not None:
anomaliesOut.append((
anomalyStart, len(anomalyLikelihood)-1,
ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return anomaliesOut
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.value = []
self.allValues = []
self.predicted = []
self.anomalyScore = []
self.anomalyLikelihood = []
self.actualLine = None
self.predictedLine = None
self.anomalyScoreLine = None
self.anomalyLikelihoodLine = None
self.linesInitialized = False
self._chartHighlights = []
fig = plt.figure(figsize=(16, 10))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
self._mainGraph = fig.add_subplot(gs[0, 0])
plt.title(self.name)
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
self._anomalyGraph = fig.add_subplot(gs[1])
plt.ylabel('Percentage')
plt.xlabel('Date')
# Maximizes window
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.tight_layout()
def initializeLines(self, timestamp):
print "initializing %s" % self.name
anomalyRange = (0.0, 1.0)
self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW)
self.convertedDates = deque(
[date2num(date) for date in self.dates], maxlen=WINDOW
)
self.value = deque([0.0] * WINDOW, maxlen=WINDOW)
self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW)
actualPlot, = self._mainGraph.plot(self.dates, self.value)
self.actualLine = actualPlot
predictedPlot, = self._mainGraph.plot(self.dates, self.predicted)
self.predictedLine = predictedPlot
self._mainGraph.legend(tuple(['actual', 'predicted']), loc=3)
anomalyScorePlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'm'
)
anomalyScorePlot.axes.set_ylim(anomalyRange)
self.anomalyScoreLine = anomalyScorePlot
anomalyLikelihoodPlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'r'
)
anomalyLikelihoodPlot.axes.set_ylim(anomalyRange)
self.anomalyLikelihoodLine = anomalyLikelihoodPlot
self._anomalyGraph.legend(
tuple(['anomaly score', 'anomaly likelihood']), loc=3
)
dateFormatter = DateFormatter('%m/%d %H:%M')
self._mainGraph.xaxis.set_major_formatter(dateFormatter)
self._anomalyGraph.xaxis.set_major_formatter(dateFormatter)
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, True, True)
self.linesInitialized = True
def highlightChart(self, highlights, chart):
for highlight in highlights:
# Each highlight contains [start-index, stop-index, color, alpha]
self._chartHighlights.append(chart.axvspan(
self.convertedDates[highlight[0]], self.convertedDates[highlight[1]],
color=highlight[2], alpha=highlight[3]
))
def write(self, timestamp, value, predicted, anomalyScore):
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamp)
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
self.dates.append(timestamp)
self.convertedDates.append(date2num(timestamp))
self.value.append(value)
self.allValues.append(value)
self.predicted.append(predicted)
self.anomalyScore.append(anomalyScore)
self.anomalyLikelihood.append(anomalyLikelihood)
# Update main chart data
self.actualLine.set_xdata(self.convertedDates)
self.actualLine.set_ydata(self.value)
self.predictedLine.set_xdata(self.convertedDates)
self.predictedLine.set_ydata(self.predicted)
# Update anomaly chart data
self.anomalyScoreLine.set_xdata(self.convertedDates)
self.anomalyScoreLine.set_ydata(self.anomalyScore)
self.anomalyLikelihoodLine.set_xdata(self.convertedDates)
self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood)
# Remove previous highlighted regions
for poly in self._chartHighlights:
poly.remove()
self._chartHighlights = []
weekends = extractWeekendHighlights(self.dates)
anomalies = extractAnomalyIndices(self.anomalyLikelihood)
# Highlight weekends in main chart
self.highlightChart(weekends, self._mainGraph)
# Highlight anomalies in anomaly chart
self.highlightChart(anomalies, self._anomalyGraph)
maxValue = max(self.allValues)
self._mainGraph.relim()
self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02))
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, scaley=False)
self._anomalyGraph.relim()
self._anomalyGraph.autoscale_view(True, True, True)
plt.draw()
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
wd5/jangr | django/db/backends/creation.py | 47 | 22033 | import sys
import time
from django.conf import settings
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
if not f.null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
if pending:
pr = pending_references.setdefault(f.rel.to, []).append((model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"Return the SQL snippet defining the foreign key reference for a field"
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' + \
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' + \
style.SQL_FIELD(qn(field.rel.to._meta.get_field(field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"Returns any ALTER TABLE statements to add constraints after the fact."
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % \
(qn(r_table), qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_for_many_to_many(self, model, style):
"Return the CREATE TABLE statments for all the many-to-many tables defined on a model"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
output = []
for f in model._meta.local_many_to_many:
if model._meta.managed or f.rel.to._meta.managed:
output.extend(self.sql_for_many_to_many_field(model, f, style))
return output
def sql_for_many_to_many_field(self, model, f, style):
"Return the CREATE TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
from django.db.backends.util import truncate_name
output = []
if f.auto_created:
opts = model._meta
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or opts.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace, inline=True)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
table_output = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + \
style.SQL_TABLE(qn(f.m2m_db_table())) + ' (']
table_output.append(' %s %s %s%s,' %
(style.SQL_FIELD(qn('id')),
style.SQL_COLTYPE(models.AutoField(primary_key=True).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL PRIMARY KEY'),
tablespace_sql))
deferred = []
inline_output, deferred = self.sql_for_inline_many_to_many_references(model, f, style)
table_output.extend(inline_output)
table_output.append(' %s (%s, %s)%s' %
(style.SQL_KEYWORD('UNIQUE'),
style.SQL_FIELD(qn(f.m2m_column_name())),
style.SQL_FIELD(qn(f.m2m_reverse_name())),
tablespace_sql))
table_output.append(')')
if opts.db_tablespace:
# f.db_tablespace is only for indices, so ignore its value here.
table_output.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
table_output.append(';')
output.append('\n'.join(table_output))
for r_table, r_col, table, col in deferred:
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table),
qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
# Add any extra SQL needed to support auto-incrementing PKs
autoinc_sql = self.connection.ops.autoinc_sql(f.m2m_db_table(), 'id')
if autoinc_sql:
for stmt in autoinc_sql:
output.append(stmt)
return output
def sql_for_inline_many_to_many_references(self, model, field, style):
"Create the references to other tables required by a many-to-many table"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(opts.db_table)),
style.SQL_FIELD(qn(opts.pk.column)),
self.connection.ops.deferrable_sql()),
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(field.rel.to._meta.db_table)),
style.SQL_FIELD(qn(field.rel.to._meta.pk.column)),
self.connection.ops.deferrable_sql())
]
deferred = []
return table_output, deferred
def sql_indexes_for_model(self, model, style):
"Returns the CREATE INDEX SQL statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"Return the CREATE INDEX SQL statements for a single model field"
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"Return the DROP TABLE and restraint dropping statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def sql_destroy_many_to_many(self, model, f, style):
"Returns the DROP TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
DeprecationWarning
)
qn = self.connection.ops.quote_name
output = []
if f.auto_created:
output.append("%s %s;" % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
ds = self.connection.ops.drop_sequence_sql("%s_%s" % (model._meta.db_table, f.column))
if ds:
output.append(ds)
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
from django.db import router
if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"Internal implementation - creates the test db tables."
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self.set_autocommit()
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
cursor.execute("DROP DATABASE %s" % qn(test_database_name))
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Destroying test database for alias '%s'%s..." % (self.connection.alias, test_db_repr)
self.connection.settings_dict['NAME'] = old_database_name
self._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"Internal implementation - remove the test db tables."
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self.set_autocommit()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"Make sure a connection is in autocommit mode."
if hasattr(self.connection.connection, "autocommit"):
if callable(self.connection.connection.autocommit):
self.connection.connection.autocommit(True)
else:
self.connection.connection.autocommit = True
elif hasattr(self.connection.connection, "set_isolation_level"):
self.connection.connection.set_isolation_level(0)
def sql_table_creation_suffix(self):
"SQL to append to the end of the test table creation statements"
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
| bsd-3-clause |
diegoguimaraes/django | django/contrib/gis/db/backends/postgis/creation.py | 34 | 4625 | from django.conf import settings
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.utils.functional import cached_property
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
@cached_property
def template_postgis(self):
template_postgis = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis')
with self.connection.cursor() as cursor:
cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,))
if cursor.fetchone():
return template_postgis
return None
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
else:
index_ops = ''
else:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.template_postgis),)
return ''
def _create_test_db(self, verbosity, autoclobber, keepdb=False):
test_database_name = super(PostGISCreation, self)._create_test_db(verbosity, autoclobber, keepdb)
if keepdb:
return test_database_name
if self.template_postgis is None:
# Connect to the test database in order to create the postgis extension
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
with self.connection.cursor() as cursor:
cursor.execute("CREATE EXTENSION IF NOT EXISTS postgis")
cursor.connection.commit()
return test_database_name
| bsd-3-clause |
AndrewSmart/audacity | lib-src/lv2/serd/waflib/Tools/ifort.py | 330 | 1460 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils
from waflib.Tools import fc,fc_config,fc_scan,ar
from waflib.Configure import conf
@conf
def find_ifort(conf):
fc=conf.find_program('ifort',var='FC')
fc=conf.cmd_to_list(fc)
conf.get_ifort_version(fc)
conf.env.FC_NAME='IFORT'
@conf
def ifort_modifier_cygwin(conf):
raise NotImplementedError("Ifort on cygwin not yet implemented")
@conf
def ifort_modifier_win32(conf):
fc_config.fortran_modifier_win32(conf)
@conf
def ifort_modifier_darwin(conf):
fc_config.fortran_modifier_darwin(conf)
@conf
def ifort_modifier_platform(conf):
dest_os=conf.env['DEST_OS']or Utils.unversioned_sys_platform()
ifort_modifier_func=getattr(conf,'ifort_modifier_'+dest_os,None)
if ifort_modifier_func:
ifort_modifier_func()
@conf
def get_ifort_version(conf,fc):
version_re=re.compile(r"ifort\s*\(IFORT\)\s*(?P<major>\d*)\.(?P<minor>\d*)",re.I).search
cmd=fc+['--version']
out,err=fc_config.getoutput(conf,cmd,stdin=False)
if out:
match=version_re(out)
else:
match=version_re(err)
if not match:
conf.fatal('cannot determine ifort version.')
k=match.groupdict()
conf.env['FC_VERSION']=(k['major'],k['minor'])
def configure(conf):
conf.find_ifort()
conf.find_program('xiar',var='AR')
conf.env.ARFLAGS='rcs'
conf.fc_flags()
conf.fc_add_flags()
conf.ifort_modifier_platform()
| gpl-2.0 |
fredericlepied/ansible | lib/ansible/modules/net_tools/snmp_facts.py | 9 | 12847 | #!/usr/bin/python
# This file is part of Networklore's snmp library for Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: snmp_facts
version_added: "1.9"
author: "Patrick Ogenstad (@ogenstad)"
short_description: Retrieve facts for a device using SNMP.
description:
- Retrieve facts for a device using SNMP, the facts will be
inserted to the ansible_facts key.
requirements:
- pysnmp
options:
host:
description:
- Set to target snmp server (normally {{inventory_hostname}})
required: true
version:
description:
- SNMP Version to use, v2/v2c or v3
choices: [ 'v2', 'v2c', 'v3' ]
required: true
community:
description:
- The SNMP community string, required if version is v2/v2c
required: false
level:
description:
- Authentication level, required if version is v3
choices: [ 'authPriv', 'authNoPriv' ]
required: false
username:
description:
- Username for SNMPv3, required if version is v3
required: false
integrity:
description:
- Hashing algorithm, required if version is v3
choices: [ 'md5', 'sha' ]
required: false
authkey:
description:
- Authentication key, required if version is v3
required: false
privacy:
description:
- Encryption algorithm, required if level is authPriv
choices: [ 'des', 'aes' ]
required: false
privkey:
description:
- Encryption key, required if version is authPriv
required: false
'''
EXAMPLES = '''
# Gather facts with SNMP version 2
- snmp_facts:
host: '{{ inventory_hostname }}'
version: 2c
community: public
delegate_to: local
# Gather facts using SNMP version 3
- snmp_facts:
host: '{{ inventory_hostname }}'
version: v3
level: authPriv
integrity: sha
privacy: aes
username: snmp-user
authkey: abc12345
privkey: def6789
delegate_to: localhost
'''
import binascii
from collections import defaultdict
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
has_pysnmp = True
except:
has_pysnmp = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
class DefineOid(object):
def __init__(self,dotprefix=False):
if dotprefix:
dp = "."
else:
dp = ""
# From SNMPv2-MIB
self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
self.sysName = dp + "1.3.6.1.2.1.1.5.0"
self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
# From IF-MIB
self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
# From IP-MIB
self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
def decode_hex(hexstring):
if len(hexstring) < 3:
return hexstring
if hexstring[:2] == "0x":
return to_text(binascii.unhexlify(hexstring[2:]))
else:
return hexstring
def decode_mac(hexstring):
if len(hexstring) != 14:
return hexstring
if hexstring[:2] == "0x":
return hexstring[2:]
else:
return hexstring
def lookup_adminstatus(int_adminstatus):
adminstatus_options = {
1: 'up',
2: 'down',
3: 'testing'
}
if int_adminstatus in adminstatus_options:
return adminstatus_options[int_adminstatus]
else:
return ""
def lookup_operstatus(int_operstatus):
operstatus_options = {
1: 'up',
2: 'down',
3: 'testing',
4: 'unknown',
5: 'dormant',
6: 'notPresent',
7: 'lowerLayerDown'
}
if int_operstatus in operstatus_options:
return operstatus_options[int_operstatus]
else:
return ""
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True),
version=dict(required=True, choices=['v2', 'v2c', 'v3']),
community=dict(required=False, default=False),
username=dict(required=False),
level=dict(required=False, choices=['authNoPriv', 'authPriv']),
integrity=dict(required=False, choices=['md5', 'sha']),
privacy=dict(required=False, choices=['des', 'aes']),
authkey=dict(required=False),
privkey=dict(required=False),
removeplaceholder=dict(required=False)),
required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
supports_check_mode=False)
m_args = module.params
if not has_pysnmp:
module.fail_json(msg='Missing required pysnmp module (check docs)')
cmdGen = cmdgen.CommandGenerator()
# Verify that we receive a community when using snmp v2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
if m_args['community'] is False:
module.fail_json(msg='Community not set when using snmp version 2')
if m_args['version'] == "v3":
if m_args['username'] is None:
module.fail_json(msg='Username not set when using snmp version 3')
if m_args['level'] == "authPriv" and m_args['privacy'] is None:
module.fail_json(msg='Privacy algorithm not set when using authPriv')
if m_args['integrity'] == "sha":
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
elif m_args['integrity'] == "md5":
integrity_proto = cmdgen.usmHMACMD5AuthProtocol
if m_args['privacy'] == "aes":
privacy_proto = cmdgen.usmAesCfb128Protocol
elif m_args['privacy'] == "des":
privacy_proto = cmdgen.usmDESPrivProtocol
# Use SNMP Version 2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
snmp_auth = cmdgen.CommunityData(m_args['community'])
# Use SNMP Version 3 with authNoPriv
elif m_args['level'] == "authNoPriv":
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
# Use SNMP Version 3 with authPriv
else:
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto,
privProtocol=privacy_proto)
# Use p to prefix OIDs with a dot for polling
p = DefineOid(dotprefix=True)
# Use v without a prefix to use with return values
v = DefineOid(dotprefix=False)
Tree = lambda: defaultdict(Tree)
results = Tree()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.sysDescr,),
cmdgen.MibVariable(p.sysObjectId,),
cmdgen.MibVariable(p.sysUpTime,),
cmdgen.MibVariable(p.sysContact,),
cmdgen.MibVariable(p.sysName,),
cmdgen.MibVariable(p.sysLocation,),
lookupMib=False
)
if errorIndication:
module.fail_json(msg=str(errorIndication))
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
if current_oid == v.sysDescr:
results['ansible_sysdescr'] = decode_hex(current_val)
elif current_oid == v.sysObjectId:
results['ansible_sysobjectid'] = current_val
elif current_oid == v.sysUpTime:
results['ansible_sysuptime'] = current_val
elif current_oid == v.sysContact:
results['ansible_syscontact'] = current_val
elif current_oid == v.sysName:
results['ansible_sysname'] = current_val
elif current_oid == v.sysLocation:
results['ansible_syslocation'] = current_val
errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.ifIndex,),
cmdgen.MibVariable(p.ifDescr,),
cmdgen.MibVariable(p.ifMtu,),
cmdgen.MibVariable(p.ifSpeed,),
cmdgen.MibVariable(p.ifPhysAddress,),
cmdgen.MibVariable(p.ifAdminStatus,),
cmdgen.MibVariable(p.ifOperStatus,),
cmdgen.MibVariable(p.ipAdEntAddr,),
cmdgen.MibVariable(p.ipAdEntIfIndex,),
cmdgen.MibVariable(p.ipAdEntNetMask,),
cmdgen.MibVariable(p.ifAlias,),
lookupMib=False
)
if errorIndication:
module.fail_json(msg=str(errorIndication))
interface_indexes = []
all_ipv4_addresses = []
ipv4_networks = Tree()
for varBinds in varTable:
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
if v.ifIndex in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
interface_indexes.append(ifIndex)
if v.ifDescr in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['name'] = current_val
if v.ifMtu in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['mtu'] = current_val
if v.ifMtu in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['speed'] = current_val
if v.ifPhysAddress in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
if v.ifAdminStatus in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
if v.ifOperStatus in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
if v.ipAdEntAddr in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['address'] = current_val
all_ipv4_addresses.append(current_val)
if v.ipAdEntIfIndex in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['interface'] = current_val
if v.ipAdEntNetMask in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['netmask'] = current_val
if v.ifAlias in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['description'] = current_val
interface_to_ipv4 = {}
for ipv4_network in ipv4_networks:
current_interface = ipv4_networks[ipv4_network]['interface']
current_network = {
'address': ipv4_networks[ipv4_network]['address'],
'netmask': ipv4_networks[ipv4_network]['netmask']
}
if not current_interface in interface_to_ipv4:
interface_to_ipv4[current_interface] = []
interface_to_ipv4[current_interface].append(current_network)
else:
interface_to_ipv4[current_interface].append(current_network)
for interface in interface_to_ipv4:
results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
module.exit_json(ansible_facts=results)
if __name__ == '__main__':
main()
| gpl-3.0 |
GoogleContainerTools/distroless | package_manager/version_utils.py | 2 | 7668 | # Copyright [2017] The Climate Corporation (https://climate.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copied version check from https://github.com/TheClimateCorporation/python-dpkg
def get_epoch(version_str):
""" Parse the epoch out of a package version string.
Return (epoch, version); epoch is zero if not found."""
try:
# there could be more than one colon,
# but we only care about the first
e_index = version_str.index(':')
except ValueError:
# no colons means no epoch; that's valid, man
return 0, version_str
try:
epoch = int(version_str[0:e_index])
except ValueError:
raise Exception(
'Corrupt dpkg version %s: epochs can only be ints, and '
'epochless versions cannot use the colon character.' %
version_str)
return epoch, version_str[e_index + 1:]
def get_upstream(version_str):
"""Given a version string that could potentially contain both an upstream
revision and a debian revision, return a tuple of both. If there is no
debian revision, return 0 as the second tuple element."""
try:
d_index = version_str.rindex('-')
except ValueError:
# no hyphens means no debian version, also valid.
return version_str, '0'
return version_str[0:d_index], version_str[d_index+1:]
def split_full_version(version_str):
"""Split a full version string into epoch, upstream version and
debian revision.
:param: version_str
:returns: tuple """
epoch, full_ver = get_epoch(version_str)
upstream_rev, debian_rev = get_upstream(full_ver)
return epoch, upstream_rev, debian_rev
def get_alphas(revision_str):
"""Return a tuple of the first non-digit characters of a revision (which
may be empty) and the remaining characters."""
# get the index of the first digit
for i, char in enumerate(revision_str):
if char.isdigit():
if i == 0:
return '', revision_str
return revision_str[0:i], revision_str[i:]
# string is entirely alphas
return revision_str, ''
def get_digits(revision_str):
"""Return a tuple of the first integer characters of a revision (which
may be empty) and the remains."""
# If the string is empty, return (0,'')
if not revision_str:
return 0, ''
# get the index of the first non-digit
for i, char in enumerate(revision_str):
if not char.isdigit():
if i == 0:
return 0, revision_str
return int(revision_str[0:i]), revision_str[i:]
# string is entirely digits
return int(revision_str), ''
def listify(revision_str):
"""Split a revision string into a list of alternating between strings and
numbers, padded on either end to always be "str, int, str, int..." and
always be of even length. This allows us to trivially implement the
comparison algorithm described at
http://debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version
"""
result = []
while revision_str:
rev_1, remains = get_alphas(revision_str)
rev_2, remains = get_digits(remains)
result.extend([rev_1, rev_2])
revision_str = remains
return result
# pylint: disable=invalid-name,too-many-return-statements
def dstringcmp(a, b):
"""debian package version string section lexical sort algorithm
"The lexical comparison is a comparison of ASCII values modified so
that all the letters sort earlier than all the non-letters and so that
a tilde sorts before anything, even the end of a part."
"""
if a == b:
return 0
try:
for i, char in enumerate(a):
if char == b[i]:
continue
# "a tilde sorts before anything, even the end of a part"
# (emptyness)
if char == '~':
return -1
if b[i] == '~':
return 1
# "all the letters sort earlier than all the non-letters"
if char.isalpha() and not b[i].isalpha():
return -1
if not char.isalpha() and b[i].isalpha():
return 1
# otherwise lexical sort
if ord(char) > ord(b[i]):
return 1
if ord(char) < ord(b[i]):
return -1
except IndexError:
# a is longer than b but otherwise equal, hence greater
# ...except for goddamn tildes
if char == '~':
return -1
return 1
# if we get here, a is shorter than b but otherwise equal, hence lesser
# ...except for goddamn tildes
if b[len(a)] == '~':
return 1
return -1
def compare_revision_strings(rev1, rev2):
"""Compare two debian revision strings as described at
https://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version
"""
if rev1 == rev2:
return 0
# listify pads results so that we will always be comparing ints to ints
# and strings to strings (at least until we fall off the end of a list)
list1 = listify(rev1)
list2 = listify(rev2)
if list1 == list2:
return 0
try:
for i, item in enumerate(list1):
# just in case
if not isinstance(item, list2[i].__class__):
raise Exception(
'Cannot compare %s to %s, something has gone horribly '
'awry.' % (item, list2[i]))
# if the items are equal, next
if item == list2[i]:
continue
# numeric comparison
if isinstance(item, int):
if item > list2[i]:
return 1
if item < list2[i]:
return -1
else:
# string comparison
return dstringcmp(item, list2[i])
except IndexError:
# rev1 is longer than rev2 but otherwise equal, hence greater
return 1
# rev1 is shorter than rev2 but otherwise equal, hence lesser
return -1
def compare_versions(ver1, ver2):
"""Function to compare two Debian package version strings,
suitable for passing to list.sort() and friends."""
if ver1 == ver2:
return 0
# note the string conversion: the debian policy here explicitly
# specifies ASCII string comparisons, so if you are mad enough to
# actually cram unicode characters into your package name, you are on
# your own.
epoch1, upstream1, debian1 = split_full_version(str(ver1))
epoch2, upstream2, debian2 = split_full_version(str(ver2))
# if epochs differ, immediately return the newer one
if epoch1 < epoch2:
return -1
if epoch1 > epoch2:
return 1
# then, compare the upstream versions
upstr_res = compare_revision_strings(upstream1, upstream2)
if upstr_res != 0:
return upstr_res
debian_res = compare_revision_strings(debian1, debian2)
if debian_res != 0:
return debian_res
# at this point, the versions are equal, but due to an interpolated
# zero in either the epoch or the debian version
return 0
| apache-2.0 |
rduivenvoorde/QGIS | tests/src/python/test_qgslayoutnortharrowhandler.py | 31 | 5552 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutNorthArrowHandler.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2020 by Nyall Dawson'
__date__ = '05/04/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtCore import QRectF
from qgis.PyQt.QtTest import QSignalSpy
from qgis.core import (QgsLayoutNorthArrowHandler,
QgsLayout,
QgsLayoutItemMap,
QgsRectangle,
QgsCoordinateReferenceSystem,
QgsProject
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutNorthArrowHandler(unittest.TestCase):
def testNorthArrowWithMapItemRotation(self):
"""Test arrow rotation when map item is also rotated"""
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
map.setExtent(QgsRectangle(0, -256, 256, 0))
layout.addLayoutItem(map)
handler = QgsLayoutNorthArrowHandler(layout)
spy = QSignalSpy(handler.arrowRotationChanged)
handler.setLinkedMap(map)
self.assertEqual(handler.linkedMap(), map)
self.assertEqual(len(spy), 0)
handler.setNorthMode(QgsLayoutNorthArrowHandler.GridNorth)
map.setItemRotation(45)
self.assertEqual(handler.arrowRotation(), 45)
self.assertEqual(len(spy), 1)
self.assertEqual(spy[-1][0], 45)
map.setMapRotation(-34)
self.assertEqual(handler.arrowRotation(), 11)
self.assertEqual(len(spy), 2)
self.assertEqual(spy[-1][0], 11)
# add an offset
handler.setNorthOffset(-10)
self.assertEqual(handler.arrowRotation(), 1)
self.assertEqual(len(spy), 3)
self.assertEqual(spy[-1][0], 1)
map.setItemRotation(55)
self.assertEqual(handler.arrowRotation(), 11)
self.assertEqual(len(spy), 4)
self.assertEqual(spy[-1][0], 11)
def testMapWithInitialRotation(self):
"""Test arrow rotation when map item is initially rotated"""
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
map.setExtent(QgsRectangle(0, -256, 256, 0))
map.setRotation(45)
layout.addLayoutItem(map)
handler = QgsLayoutNorthArrowHandler(layout)
spy = QSignalSpy(handler.arrowRotationChanged)
handler.setLinkedMap(map)
self.assertEqual(handler.linkedMap(), map)
self.assertEqual(len(spy), 1)
self.assertEqual(spy[-1][0], 45)
handler.setLinkedMap(None)
self.assertEqual(len(spy), 2)
self.assertEqual(spy[-1][0], 0)
def testGridNorth(self):
"""Test syncing arrow to grid north"""
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
map.setExtent(QgsRectangle(0, -256, 256, 0))
layout.addLayoutItem(map)
handler = QgsLayoutNorthArrowHandler(layout)
spy = QSignalSpy(handler.arrowRotationChanged)
handler.setLinkedMap(map)
self.assertEqual(handler.linkedMap(), map)
self.assertEqual(len(spy), 0)
handler.setNorthMode(QgsLayoutNorthArrowHandler.GridNorth)
map.setMapRotation(45)
self.assertEqual(handler.arrowRotation(), 45)
self.assertEqual(len(spy), 1)
self.assertEqual(spy[-1][0], 45)
# add an offset
handler.setNorthOffset(-10)
self.assertEqual(handler.arrowRotation(), 35)
self.assertEqual(len(spy), 2)
self.assertEqual(spy[-1][0], 35)
def testTrueNorth(self):
"""Test syncing arrow to true north"""
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(0, 0, 10, 10))
map.setCrs(QgsCoordinateReferenceSystem.fromEpsgId(3575))
map.setExtent(QgsRectangle(-2126029.962, -2200807.749, -119078.102, -757031.156))
layout.addLayoutItem(map)
handler = QgsLayoutNorthArrowHandler(layout)
spy = QSignalSpy(handler.arrowRotationChanged)
handler.setLinkedMap(map)
self.assertEqual(handler.linkedMap(), map)
self.assertEqual(len(spy), 0)
handler.setNorthMode(QgsLayoutNorthArrowHandler.TrueNorth)
self.assertAlmostEqual(handler.arrowRotation(), 37.20, 1)
self.assertEqual(len(spy), 1)
self.assertAlmostEqual(spy[-1][0], 37.20, 1)
# shift map
map.setExtent(QgsRectangle(2120672.293, -3056394.691, 2481640.226, -2796718.780))
self.assertAlmostEqual(handler.arrowRotation(), -38.18, 1)
self.assertEqual(len(spy), 2)
self.assertAlmostEqual(spy[-1][0], -38.18, 1)
# rotate map
map.setMapRotation(45)
self.assertAlmostEqual(handler.arrowRotation(), -38.18 + 45, 1)
self.assertEqual(len(spy), 3)
self.assertAlmostEqual(spy[-1][0], -38.18 + 45, 1)
# add an offset
handler.setNorthOffset(-10)
self.assertAlmostEqual(handler.arrowRotation(), -38.18 + 35, 1)
self.assertEqual(len(spy), 4)
self.assertAlmostEqual(spy[-1][0], -38.18 + 35, 1)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
freesmartphone/framework | framework/subsystems/testing/testing.py | 1 | 4159 | #!/usr/bin/env python
"""
Dummy Subsystem for Testing Purposes
(C) 2008-2009 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
(C) 2008 Openmoko, Inc.
GPLv2 or later
Package: testing
Module: testing
"""
MODULE_NAME = "testing"
__version__ = "0.0.0"
from framework import resource
import dbus
import dbus.service
import gobject
import logging
logger = logging.getLogger( MODULE_NAME )
import time
DBUS_INTERFACE = "org.freesmartphone.Testing"
DBUS_OBJECT_PATH = "/org/freesmartphone/Testing"
#============================================================================#
class Resource( resource.Resource ):
#============================================================================#
def __init__( self, bus ):
self.path = DBUS_OBJECT_PATH
self.bus = bus
self.virgin = True
dbus.service.Object.__init__( self, bus, self.path )
resource.Resource.__init__( self, bus, "TEST" )
logger.info("%s %s at %s initialized.", self.__class__.__name__, __version__, self.path )
# default behaviour: everything works
self.catmap = { "enabling":"ok",
"disabling":"ok",
"suspending":"ok",
"resuming":"ok" }
#
# framework.Resource
#
def _enable( self, on_ok, on_error ):
logger.info( "enabling" )
time.sleep( 5.0 )
self._doit( "enabling", on_ok, on_error )
def _disable( self, on_ok, on_error ):
logger.info( "disabling" )
if self.virgin == True:
self.virgin = False
else:
time.sleep( 5.0 )
self._doit( "disabling", on_ok, on_error )
def _suspend( self, on_ok, on_error ):
logger.info( "suspending" )
time.sleep( 5.0 )
self._doit( "suspending", on_ok, on_error )
def _resume( self, on_ok, on_error ):
logger.info("resuming")
time.sleep( 5.0 )
self._doit( "resuming", on_ok, on_error )
def _doit( self, category, on_ok, on_error ):
action = self.catmap[ category ]
if action == "ok":
on_ok()
elif action == "error":
on_error( "unspecified" )
elif action == "veto":
on_error( resource.SuspendVeto( "not allowed to suspend this resource" ) )
else:
foobar
#
# dbus interface
#
@dbus.service.method( DBUS_INTERFACE, "", "",
async_callbacks=( "dbus_ok", "dbus_error" ) )
@resource.checkedmethod
def SetResourceBehaviour( self, category, behaviour, dbus_ok, dbus_error ):
try:
value = self.catmap[category]
except KeyError:
dbus_error( "unknown category, valid categories are: %s" % self.catmap.keys() )
else:
if behaviour not in "ok error veto".split():
dbus_error( "unknown behaviour. valid behaviours are: ok error veto" )
self.catmap[category] = str( behaviour )
dbus_ok()
@dbus.service.method( DBUS_INTERFACE, "", "aa{sv}",
async_callbacks=( "dbus_ok", "dbus_error" ) )
@resource.checkedmethod
def ReturnTest( self, dbus_ok, dbus_error ):
d = {"foo":"bar"}
dbus_ok( [d,d] )
@dbus.service.method( DBUS_INTERFACE, "", "",
async_callbacks=( "dbus_ok", "dbus_error" ) )
@resource.checkedmethod
def SignalTest( self, dbus_ok, dbus_error ):
self.Test( dict(yo="kurt") )
dbus_ok()
@dbus.service.signal( DBUS_INTERFACE, "a{sv}" )
def Test( self, asv ):
logger.info( "emitting signal" )
#============================================================================#
def factory(prefix, controller):
#============================================================================#
"""This is the magic function that will be called by the framework module manager"""
return [ Resource( controller.bus ) ]
#============================================================================#
if __name__ == "__main__":
#============================================================================#
pass
| gpl-2.0 |
JohnDevitt/appengine-django-skeleton-master | lib/django/db/backends/utils.py | 430 | 6689 | from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
for item in self.cursor:
yield item
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior. Catch errors liberally because errors in cleanup
# code aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super(CursorDebugWrapper, self).execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super(CursorDebugWrapper, self).executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.' + microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
| bsd-3-clause |
larsmans/scipy | scipy/sparse/linalg/matfuncs.py | 30 | 25870 | """
Sparse matrix functions
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
from __future__ import division, print_function, absolute_import
__all__ = ['expm', 'inv']
import math
import numpy as np
import scipy.misc
from scipy.linalg.basic import solve, solve_triangular
from scipy.sparse.base import isspmatrix
from scipy.sparse.construct import eye as speye
from scipy.sparse.linalg import spsolve
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg.interface import LinearOperator
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse matrix
Parameters
----------
A : (M,M) ndarray or sparse matrix
square matrix to be inverted
Returns
-------
Ainv : (M,M) ndarray or sparse matrix
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
scipy.linalg.inv.
.. versionadded:: 0.12.0
"""
I = speye(A.shape[0], A.shape[1], dtype=A.dtype, format=A.format)
Ainv = spsolve(A, I)
return Ainv
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = np.ones((A.shape[0], 1), dtype=float)
M = A.T
for i in range(p):
v = M.dot(v)
return max(v)
def _onenorm(A):
# A compatibility function which should eventually disappear.
# This is copypasted from expm_action.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
else:
return np.linalg.norm(A, 1)
def _ident_like(A):
# A compatibility function which should eventually disappear.
# This is copypasted from expm_action.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def _count_nonzero(A):
# A compatibility function which should eventually disappear.
#XXX There should be a better way to do this when A is sparse
# in the traditional sense.
if isspmatrix(A):
return np.sum(A.toarray() != 0)
else:
return np.count_nonzero(A)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if isspmatrix(A):
lower_part = scipy.sparse.tril(A, -1)
if lower_part.nnz == 0:
# structural upper triangularity
return True
else:
# coincidental upper triangularity
return _count_nonzero(lower_part) == 0
else:
return _count_nonzero(np.tril(A, -1)) == 0
def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if not isspmatrix(A) and not isspmatrix(B):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.find_common_type([x.dtype for x in args], [])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
class _ExpmPadeHelper(object):
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A, structure=None, use_exact_onenorm=False):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
structure : str, optional
A string describing the structure of matrix `A`.
Only `upper_triangular` is currently supported.
use_exact_onenorm : bool, optional
If True then only the exact one-norm of matrix powers and products
will be used. Otherwise, the one-norm of powers and products
may initially be estimated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
self.structure = structure
self.use_exact_onenorm = use_exact_onenorm
@property
def A2(self):
if self._A2 is None:
self._A2 = _smart_matrix_product(
self.A, self.A, structure=self.structure)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = _smart_matrix_product(
self.A2, self.A2, structure=self.structure)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = _smart_matrix_product(
self.A4, self.A2, structure=self.structure)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = _smart_matrix_product(
self.A6, self.A2, structure=self.structure)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = _smart_matrix_product(
self.A4, self.A6, structure=self.structure)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
if self.use_exact_onenorm:
return self.d4_tight
if self._d4_exact is not None:
return self._d4_exact
else:
if self._d4_approx is None:
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
structure=self.structure)**(1/4.)
return self._d4_approx
@property
def d6_loose(self):
if self.use_exact_onenorm:
return self.d6_tight
if self._d6_exact is not None:
return self._d6_exact
else:
if self._d6_approx is None:
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
structure=self.structure)**(1/6.)
return self._d6_approx
@property
def d8_loose(self):
if self.use_exact_onenorm:
return self.d8_tight
if self._d8_exact is not None:
return self._d8_exact
else:
if self._d8_approx is None:
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
structure=self.structure)**(1/8.)
return self._d8_approx
@property
def d10_loose(self):
if self.use_exact_onenorm:
return self.d10_tight
if self._d10_exact is not None:
return self._d10_exact
else:
if self._d10_approx is None:
self._d10_approx = _onenormest_product((self.A4, self.A6),
structure=self.structure)**(1/10.)
return self._d10_approx
def pade3(self):
b = (120., 60., 12., 1.)
U = _smart_matrix_product(self.A,
b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = _smart_matrix_product(self.A,
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7(self):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
U = _smart_matrix_product(self.A,
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade9(self):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
U = _smart_matrix_product(self.A,
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
b[3]*self.A2 + b[1]*self.ident),
structure=self.structure)
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
b[2]*self.A2 + b[0]*self.ident)
return U, V
def pade13_scaled(self, s):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U2 = _smart_matrix_product(B6,
b[13]*B6 + b[11]*B4 + b[9]*B2,
structure=self.structure)
U = _smart_matrix_product(B,
(U2 + b[7]*B6 + b[5]*B4 +
b[3]*B2 + b[1]*self.ident),
structure=self.structure)
V2 = _smart_matrix_product(B6,
b[12]*B6 + b[10]*B4 + b[8]*B2,
structure=self.structure)
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
"""
return _expm(A, use_exact_onenorm='auto')
def _expm(A, use_exact_onenorm):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if isinstance(A, (list, tuple)):
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# Trivial case
if A.shape == (1, 1):
out = [[np.exp(A[0, 0])]]
# Avoid indiscriminate casting to ndarray to
# allow for sparse or other strange arrays
if isspmatrix(A):
return A.__class__(out)
return np.array(out)
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
if use_exact_onenorm == "auto":
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
use_exact_onenorm = A.shape[0] < 200
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
eta_3 = max(h.d6_tight, h.d8_loose)
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
U, V = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
U, V = h.pade9()
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * h.A, 13)
U, V = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, h.A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if isspmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _sinch(x):
"""
Stably evaluate sinch.
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
x2 = x*x
if abs(x) < 0.0135:
return 1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))
else:
return np.sinh(x) / x
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * np.exp(a) * _sinch(b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse matrices, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = np.ravel(T.diagonal().copy())
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
p = 2*m + 1
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2p_p = scipy.misc.comb(2*p, p, exact=True)
abs_c_recip = float(choose_2p_p * math.factorial(2*p + 1))
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), p)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)
| bsd-3-clause |
victorbriz/rethinkdb | external/v8_3.30.33.16/buildtools/checkdeps/java_checker.py | 46 | 4264 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks Java files for illegal imports."""
import codecs
import os
import re
import results
from rules import Rule
class JavaChecker(object):
"""Import checker for Java files.
The CheckFile method uses real filesystem paths, but Java imports work in
terms of package names. To deal with this, we have an extra "prescan" pass
that reads all the .java files and builds a mapping of class name -> filepath.
In CheckFile, we convert each import statement into a real filepath, and check
that against the rules in the DEPS files.
Note that in Java you can always use classes in the same directory without an
explicit import statement, so these imports can't be blocked with DEPS files.
But that shouldn't be a problem, because same-package imports are pretty much
always correct by definition. (If we find a case where this is *not* correct,
it probably means the package is too big and needs to be split up.)
Properties:
_classmap: dict of fully-qualified Java class name -> filepath
"""
EXTENSIONS = ['.java']
def __init__(self, base_directory, verbose):
self._base_directory = base_directory
self._verbose = verbose
self._classmap = {}
self._PrescanFiles()
def _PrescanFiles(self):
for root, dirs, files in os.walk(self._base_directory):
# Skip unwanted subdirectories. TODO(husky): it would be better to do
# this via the skip_child_includes flag in DEPS files. Maybe hoist this
# prescan logic into checkdeps.py itself?
for d in dirs:
# Skip hidden directories.
if d.startswith('.'):
dirs.remove(d)
# Skip the "out" directory, as dealing with generated files is awkward.
# We don't want paths like "out/Release/lib.java" in our DEPS files.
# TODO(husky): We need some way of determining the "real" path to
# a generated file -- i.e., where it would be in source control if
# it weren't generated.
if d == 'out':
dirs.remove(d)
# Skip third-party directories.
if d in ('third_party', 'ThirdParty'):
dirs.remove(d)
for f in files:
if f.endswith('.java'):
self._PrescanFile(os.path.join(root, f))
def _PrescanFile(self, filepath):
if self._verbose:
print 'Prescanning: ' + filepath
with codecs.open(filepath, encoding='utf-8') as f:
short_class_name, _ = os.path.splitext(os.path.basename(filepath))
for line in f:
for package in re.findall('^package\s+([\w\.]+);', line):
full_class_name = package + '.' + short_class_name
if full_class_name in self._classmap:
print 'WARNING: multiple definitions of %s:' % full_class_name
print ' ' + filepath
print ' ' + self._classmap[full_class_name]
print
else:
self._classmap[full_class_name] = filepath
return
print 'WARNING: no package definition found in %s' % filepath
def CheckFile(self, rules, filepath):
if self._verbose:
print 'Checking: ' + filepath
dependee_status = results.DependeeStatus(filepath)
with codecs.open(filepath, encoding='utf-8') as f:
for line in f:
for clazz in re.findall('^import\s+(?:static\s+)?([\w\.]+)\s*;', line):
if clazz not in self._classmap:
# Importing a class from outside the Chromium tree. That's fine --
# it's probably a Java or Android system class.
continue
include_path = os.path.relpath(
self._classmap[clazz], self._base_directory)
# Convert Windows paths to Unix style, as used in DEPS files.
include_path = include_path.replace(os.path.sep, '/')
rule = rules.RuleApplyingTo(include_path, filepath)
if rule.allow == Rule.DISALLOW:
dependee_status.AddViolation(
results.DependencyViolation(include_path, rule, rules))
if '{' in line:
# This is code, so we're finished reading imports for this file.
break
return dependee_status
| agpl-3.0 |
ben-e-whitney/the-points-chart | utilities/views.py | 1 | 2100 | from django.shortcuts import render
import decimal
class TableElement:
"""
Represent an individual cell of an HTML table.
"""
def __init__(self, title=None, CSS_classes=None, content=None):
self.title = title
self.CSS_classes = CSS_classes
self.content = content
class TableParent(TableElement):
"""
Represent a collection of table elements.
The table elements could be TableElements or TableParents. Display is left
up to the template.
"""
def __init__(self, **kwargs):
self.children = kwargs.pop('children')
super().__init__(**kwargs)
def format_balance(balance=None, load=None,
endpoints=(-float('inf'), -0.35, -0.15, 0.15, 0.35, float('inf')),
possible_CSS_classes=('very_low_balance', 'low_balance', 'OK_balance',
'high_balance', 'very_high_balance')):
"""
Format the balance with styling according to the balance:load ratio.
"""
if (len(endpoints) != 1+len(possible_CSS_classes)):
raise ValueError
def sign_int(balance):
"""
Return input with a sign character prepended.
"""
balance = int(balance.to_integral_value())
if balance >= 0:
return '+{bal}'.format(bal=balance)
else:
#Note that '−' is Unicode character U+2212, not a hyphen.
return '−{bal}'.format(bal=abs(balance))
try:
ratio = balance/load
except decimal.DivisionByZero:
ratio = endpoints[-1]+1 if balance >= 0 else endpoints[0]-1
except decimal.InvalidOperation:
ratio = 0
for i, CSS_class in enumerate(possible_CSS_classes):
if endpoints[i] <= ratio < endpoints[i+1]:
# We will use the value of `CSS_class`. If we never make it to this
# block, `CSS_class` will end up `CSS_classes[-1]`.
break
return {
'value': float(balance),
'formatted_value': sign_int(balance),
'html_title': 'Exact value: {val}'.format(val=balance),
'CSS_class': ' '.join(('balance', CSS_class)),
}
| gpl-3.0 |
baby2710/plugin.video.zeta | vipracing.py | 186 | 18923 | import math
import urllib, urllib2
import re
def vip_unlockmeta(meta):
d=''
for i in range(0, len(meta)):
if (i % 3 == 0):
d += "%";
else:
d += meta[i];
return urllib.unquote(d);
def get_html(meta,data):
meta_un=vip_unlockmeta(meta)
# print meta_un;
# return
oo=''
x=data
l = len(x)
b = 1024.0
i, j, r, p = 0,0,0,0
s = 0
w = 0
str_pattern='Array\((.*?)\)'
array_val=re.compile(str_pattern).findall(meta_un)[0]
t_string = 't=['+array_val+']'
exec(t_string)
# print t_string
# return
#print math.ceil(l / b)
#print t
for j in range(int(math.ceil(l / b)), 0, -1):
r = '';
# for (i = ath.min(l, b); i > 0; i--, l--):
for i in range( int(min(l, b)),0, -1):
# w |= (t[ ord(x[p]) - 48]) << s;
# print i-1024, p
w |= (t[ ord(x[p]) - 48]) << s;
p+=1;
if (s):
r += chr(165 ^ w & 255);
w >>= 8;
s -= 2
else:
s = 6
l-=1
oo += r
return oo
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def decrypt_vipracing(page_url, justHtml=False,doDecrypt=True,ref=None):
if ref:
headers=[('Referer',ref)]
page_data=getUrl(page_url,headers=headers)
else:
page_data=getUrl(page_url)
url=page_url
if doDecrypt:
str_pattern='src="(.*?(\/embed).*?)"'
url=re.compile(str_pattern).findall(page_data)[0][0]
#print url
meta,data='',''
headers=[('Referer',page_url)]
html=getUrl(url,headers=headers)
str_pattern='\'(http.*?)\''
url=re.compile(str_pattern).findall(html)[0]
html=getUrl(url,headers=headers)
# print html
str_pattern='c=\"(.*?)\"'
meta=re.compile(str_pattern).findall(html)
if len(meta)>0 and len(meta[0])>0 and 'streamer' not in html:
meta=meta[0]
str_pattern='x\(\"(.*?)\"\)'
data=re.compile(str_pattern).findall(html)[0]
#meta="x66p75X6eE63S74j69x6fR6eC20k78r28J78v29r7bu76V61O72I20Q6ct3de78T2eh6cI65O6eZ67b74y68l2cD62e3d@31Z30Z32Z34t2cG69b2cU6af2cc72N2cd70e3dk30K2c_73h3dK30u2cr77M3dw30n2cB74M3dN41p72r72_61a79L28H36j33N2cF34n32N2cW31_35x2cC35K33e2cQ35f34H2ci31F34r2ct34I31b2cj32E39P2cH38z2cQ31B36Y2cR30R2cV30o2cJ30d2cj30n2cz30p2ca30R2c_39e2cI31q31F2cc31q2cj35U35D2cm33R38h2cN31i37_2cx34D35E2cR35T2cf35f32o2cA34h36M2cb33_32_2cs32v35a2ci32T37k2cW32U36g2cA34W37a2cu36V32s2ch32B34P2cB33v30G2cm33Q2c_33n37L2cR37Y2cW34t30J2cW32X38F2cs33J39v2cj35T37U2cw36A31Q2cZ35j39Z2cQ30D2ck30A2cG30_2cD30M2cE33K31@2cI30M2cL31M38h2cM35r31u2cy30E2cH34u2cT32q30J2ch33r34l2cV31e32d2cM31u39n2ch36C2cO33z36e2cI36p30U2cX32E2cf32y33V2cU34M33b2cs35b38J2cX31x30G2cv32u31d2cr33k33K2cD34T34G2cD35C30C2cG32H32_2cX34V39t2ce34L38t2cx33R35f2cm31K33P2cs35k36S29M3bg66a6fz72_28a6aW3da4do61d74o68d2eK63E65n69e6cb28o6cq2ff62C29M3bX6ag3el30Z3bh6an2dv2dN29Q7bC72Q3dw27z27f3bs66y6fL72@28_69K3dN4dh61C74S68Y2ed6dQ69D6eY28n6cs2ct62Y29H3bi69T3ez30w3ba69_2dw2dF2cy6cx2dP2dX29Z7bZ77z7cw3dC28y74Y5bs78r2eY63r68y61L72Y43l6fh64g65f41J74B28u70e2bX2bi29g2dQ34B38l5dJ29U3cZ3cS73g3bi69g66z28q73Q29o7bZ72A2bq3di53S74E72j69s6eK67b2eI66O72C6fC6dF43A68_61N72j43u6fJ64s65v28O31c36o35W5en77_26P32V35@35V29U3bo77U3eu3ek3dg38D3bj73_2d_3dI32v7dG65u6cS73x65r7bC73W3dr36T7dT7dl64e6fe63_75q6df65J6eg74Q2eX77Y72B69n74t65_28E72B29m7d@7d";
#data="ND@r8f8XB_VtpLsbqWgHumPwcTywiTFtmm8vATVrTWstiZVr8fzDND63i_ybqT7rEb73cxXrERVCEnzbd2yvuLk3gm7HTuU3mLgDT1zbun8vB2ywmTybuRgtcNVHAOs3TIKDpMFvmbV3ENXnND63i_ybqT7rEb73cxXrERVCEnzbd2yvuLk3gm7HTIXnstkwG4PwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrERs3ERk3TtPrdfKr9NVwc2gbARVwgOpnQ0pnGuPrGistiRytcNVHApY3gTywypUJgoXnsfYnslVtuRknsoYnsistiRytcNVHApY3gTywypPDg2k3dxgwGMY3ixKOT18b8hswcLFfdTs3TWstiZgwmNP3AAVOTuUHgTVHyxKOT5KSqD0rG4gwgAVbExKOTiKSqD0rLD6fg2k3dxgwLp8xQOpnP0MDpMFvmbV3ENXnND@rrnIXvb_YJmPbExVtGu2kWW5ZDmzr81UftL6Xp1PzvTQrYCQkUWQreIPSGi03dNs3gTybpNgv@nUfJNOrGQPbET73_1UfaAFHApFSA1k3lnPkCnPCyTyt@h6frT_zp47bExVtefPHmhktubVHgnktdWkfBT7wTIXnK4VH8WVr7xVtALyDT4VHEmYWp1UHaAYfaLXfp_Fwpa@WVt6f7CVH8WkrG4yt@0KtdNswPQ8wAxUwT_Pr@hktlxXrcNgfl_krGIXnK4gwdTkDNuPrKfgwEhVryT7Hqx8weRybhxXrinktERktExPHVmywTuUvpNVHcNVHPQPHcC7Hp4VH8WsWGMVbd_F3cTyD9TYw84XrG1zDNuPrKiybEWgwLl5tTRVwGu7tdbywmW6fEbVH@RkDNuPrKfgwEhVrAhgtcxXr5RgCank3BLYrGMstATywATyDTpyvELVbGDgbhRVriCgvANgw@L7fGDgbhRVfGMVbdNktcWVfGM7HmRgv8bktlWPrT_FtdTsvdL7H@u8t9LybiWPrhbVwcns3@uU3qnk3EWPrERVtc2ybubstAWPr8nkHgRs3@uzwpnVHThVt@WPrunsviRk3@uP3@hgCc_YrG1zDNuPrKfgwEhVrAhgtcxXrBRs3i_ybqTybpNkrGMstATywATyDTtQtg2ywAiFtGl5tTRVwGu_tdbywm_PrpIXnsD63i_ybqTYDNtpng2VryiFtqNPtpLgvEbstAm8DPuU3cWkwADstihVHgnktguUCNtpns1UfEnV3ADstihVHgnktGfKrT4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8nzHgRsHpaKSppyvELVb8DgbhRgfJL_YnmPzcmFtmTywuxUvyhktARVt85stmxzwmRgwToXnstp4NtMDpMFvmbV3ENXnsD63i_ybqT7rEb73cxXrERVCEnzbd2yvuLk3gm7HTuU3mLgDT4VHEmYWp18v2hVCApstpAVtchV3gLYfingtpakbdCFf@bkvunzbeRywmbFfeIPjAQ6f2hyHc_yCAfgbANzbu_zDK1U3i_ybqTYDGOMDp4gwdTkDNDXvpTgCG1ktDnktERVCEx5wARyDTQywERY3AmzwdWs3cZXrG1kt4RVtcLVH4TyvmTyDTQywERY3AmzwdWs3cZXrG1ktr_yvlL_Hd_7HPQz3cTyHmNVrRhVtuRsWTuUtALItqbyDTQywERY3AmzwdWs3cZXrG1ktDR7HPQz3cTyHmNVrRhVtuRsWTuU3Eb7tcxXr8hk3lbkt_u6WqhVwBbktl0KSTIXnKigbhmU3Eb7tcxXrqns3gTybpNkWmRVtdTybhRsWERVCEx8v@bswA06vcNVHc_FWabVwECkWhl6SqCFWyRgblCVH_i@Squ7CQQgviZswmngHATkWiuKSqoXrLOpnKigbhm8bBxXrdTsOp2ywmWgvV_PruTyC@RgDTpybBT7b_5@juu7CQ4gwgAVbE0KjeuK37ZK3pLybEbstA0@vTLFt@R7HcZKHpmYWqu7CQQzDNtpnKigbhm8bBxXrdTsOp2ywmWgvVnNvpRYtETstaNkrGM7HVWgwPQUHgTVHy0Xj9MK37ZK3pLybEbstA0@vTLFt@R7HcZKHpmYWhlK37ZKSqCFWERVCEx8v@bswA06vcNVHc_FWinVtp_YWi5kwRZXwpNVH8Myb_RkWeaK37ZXC8tktBRVC_a6WBbs3qWgvV0XtpNgwQQzDDWstuRVrdTs3GtktGD63qhktGtVwPQUvpRYtETstaNkt9xkrLD6fumyvANKruRsvpNVwuNPDpigbhNXnstMDBbkHGtVwPQ8vBnNthRk3@hgCoLstATywATYrGM7HVWgwPQzC8tktBRVC_a6WabVwECkWuuKSqCFWyRgblCVH_Q@jqu7CQuFtubVHgnkt_akvunVt9TywQiFtq0KWqu7CQDgwRTYWepXjqCFWThsv5Ak3pRYtB06rR2kwQQzDKtkwmhgtcm8bBxXrp2ywmWgvVn2bR_yv8RkrG5k3dxgwTnk3BRk3PpPSlu8td_FwgNVbcbswyTyDlu6JGfgvmAgbAAybBT7bPpPSluU3i_Ft@WgbAAgDlIstluUHgTVHyx6JuuKSluPbcbswyTyDlQ@jqpzDK18bR_yv8RkDK1Pwg2YDNtpnKigbhm8bBxXriWstuRsOTR7HEnktTuU3Eb7tcxXr_x8bATgw70XSQuFtubVHgnkt_akvunVt9TywQiFtq0KWuu7CQQyblCVH_M6Sau7CQQstmTgwm0KSTIKDdm8bBxXrdTsOp2ywmWgvVnNv@ns3c_Pry_ywRxXr2hkHdLFvmbV3E0XHpbVwyu@xTuUtALVtgLsbPQUrTIKDgxswGpgv@Wgwmbyb8AgDAnVrGMY3ixXryT7Hq06fppFHaNPwg_ywiTYSahVHiCkfingtptgtdAgwunUv@ns3cn0v9T7HpNkfqNswTu8v@TyDT4YrG1zDK18vLD6fBbkHLOpnsDKwg27rgTgDTaVwonkHc_7tdbFOinktERktEhXrGM7HVWgwPQzC8tktBRVC_M6WabVwECkWuuKSqCFWyRgblCVH_Q@jqu7CQuFtubVHgnkt_akvunVt9TywQiFtq0@juu7CQDgwRTYWepXjqCFWThsv5Ak3pRYtB0KHmhktumyvmRktEZ6tqhsvgTyC_uXfqa6WTIKDg2k3dxgwGtVwPQUthRk3@hgCobkwmhgtcn2STuzwmhgtc_stmTgwmx6JqpPr8hk3lbktyRgblCVHPpPSlu8td_FwgNsHgTVHyx6JqpPruLk3pWVtgNswPpztpAPrabVwECgDlMKSqpPryRgblCVHPpzS9u6JLD6fg2k3dxgwLD6fBbkHLOonstonsD6fBbkHLOpnKigbhm8bBxXr@bsbcnNthRk3@hgCTuU3Eb7tcxXrThsv5Ak3pRYtB06rJL5zJ25jQQstmTgwm0@SqC7runVtgTVripKj7QOX7oKbcbswyTYWeuKSqCFWabVwECkWuMKSqCFWqhVwBbktl0@jqCFWqns3gTybpNkWd_s3pWgHERsW_x8bATgw70@SQiFtq0@SquK37ZKtc2VH_a@jhI8jqCFWBbs3qWgvV0XtpNgwTIXnstMDg2k3dxgwGtVwPpPtgZgwobkwmhgtcAPrR_yv8Rkvp_7wc_yDlu6JGfgvmAgbACgwgAVbEx6JqpPr8hk3lbktabVwECgDlu6JGMFvmnVt@bktlx6JAnsJGpybBT7bPpUSuuK37APryRgblCVHPp8Squ6JGM7HVWgwPQzvp_7wc_YWGaK37mU3pWgbBmUr7i5XqMOjQQzDK18bR_yv8RkDNtpnKaVry_ywRxXr2hkHdLFvmbV3E0XHpbVwyu@xTuUtALVtgLsbPQz3cxsthRVogZgwytzrLD@b8AVrlhVt@Rk3VbgtlxXtpmPruTyC@RgDTuFtubVHgnkt_akvunVt9TywQiFtq06SqCFWmbswyTYWuu7CQQstmTgwm0KSQOyfgNVwcCYWeuKSqQPru_FvPQPbET73_1UfaAFHAigbmRsvE_6HdTFvyNUvpxsfgxgvlRs3pMVtpLywo_gHETFtANP3AAkrGaVtExXr7_PrpIKDpakDNtMDpigbhNXnsDKwg27rgTgDTaVwo2stpTywm_PruTyC@RgDTpybBT7b_aKSqlUWyRgblCVH_tKSqCFWqns3gTybpNkWd_s3pWgHERsW_x8bATgw70@SQQstETFt80XShu7CQDgwRTYWqoKwgL73@hgC_IstARkrLOpnsDKwg27rgTgDTaVwo2stpTywmnNvpNVHcNVHTuU3Eb7tcxXrqns3gTybpNkWd_s3pWgHERsWabVwECkWeuKScoKbcbswyTYWeuKScoXrLD@bR_yv8RVrgTgDl5stpTywmn2bR_yv8RsJG5k3dxgwTnk3BRk3PpPSlu8td_FwgNVbcbswyTyDlu6JGfgvmAgbAAybBT7bPpPSluU3i_Ft@WgbAAgDlIstluUHgTVHyx6JeuKScpPryRgblCVHPp8Squ@JlIKDptkwmhgtcNKDpigbhNXnstMDdmPbmRkwPQzbd2yvuLk3gm7H_5FtgTVxqtzrG1ktiWgbiZgDTQyw8nkHc2ItpTywmCQkUWQxgQzDKtgtlmUwdWVtc_yCgxswPIstGuU3Eb7tcxXrqns3gTybpNkWd_s3pWgHERsWEnV3_MK37ZX3gAVbE06SqCFWTnk3BRk3_u6W_x8bATgw70@SquKSTuU3mLgDT4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8n8b8hswcLFfiWstuRsOTR7HEnktAuYtl_PrdWVHPQPCTuUfLD6fdNXnsD6fBbkHLOonsDKwg27rgTgDTiV3@hgCc_YrGM7HVWgwPQUHgTVHy0Xj9MK37ZKbcbswyTYWEaKSqCYrQOyfgNVwcCYWeoXDK1Pwg2YDNOpnKMFvmbV3EmPwdTyv8MkwdLyCALgDT5gv@LywTuPHVmywPQPHcC7HpOgvhhs3i_ybqTYrGMY3ixXryT7Hq06fpakbdCYflnstlWgwdmybuNUvpxsfd0gv7nPtg_s3pOg39Rk3Vn8SApXfm1zbeRywmbYf8bktAOs3TIKDpMFvmbV3ENXnsD63i_ybqT7rBhVHdxUvRhs3VNsvPQzwdWs3c_PrEb73cx6JERVCEnzbd2yvuLk3gm7HluU3mLgDl4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8nP3@hgCc_FfqWgvVRk38lXfhIzbuAzDK1U3i_ybqTYDNOpnKMFvmbV3EmPHVmywPQPHcC7HpOgvhhs3i_ybqTYrLOoHd_7rEnsbcNVrPuzrToXnBIUwcTYZ4nOoyQPbET73_1UfaAFHAigbmRsvE_6HdTFvyNUvpxsfuRk3hRk3Au7bqn@bBx@SEa@jeuKSEM@jTDPrRRYtiTybpNVx2LFtAbUCNtMHpZgwAm8DGOs3pNkfEnsbcNsWNt13cTFYE_ywdxVxEnsbcNgxQOp4goXnN5gHALVHgnktGMywEL_HmRgv8CPHpZgwAbPrQ0pnGuzbam7tdbywmCUJBm7tdbywmA8xAMywER73yoYnst1JgTsJ_uUJBm7tdbywmAPfNtpnlagHEns3Ehk3EAzWGpPHmRywlDznst1JabVwECsJ_uUJhl6SlDznst1JyRgblCVHlOKrli@SqpPfNtpnlMstATY3pWkvd_FJ_pzvpT7HpxsJ@OpnspP3mnkHgTgwmAzWGpz3ExV3lDznst1JmTytqNPH9NktcWgbAAsJ_5gv@Lyw@OMrst1JTRYwRRk36RktlT7blOKSAaKfNtpnlM7HmRgv8Rk3lOKrTQ7H8mYWXnPOppyvELVbeIPwg_ywiTYSahVHiCkfingt_a@WulKOpigbmRsvE_6HdTFvyWNfoTgwRbktuTFOXnUD7LyDoAywoTgtyT6ov02jk_KHELszxT6vFR5SRT5zqfQkwCyorhNHU0Yk71QzsRYorR2HUT2kVWObk_XwrR_SnT_bgnQzJ0FovLkCUTO071QkbWg02LsHUA5o_xIzsCy0vN5bk0Yk@NOCJTXorCkb1A2oqfObi_@orRISUT2z7NIkNbkojNzfTDznstMrEnsbcNkWGiFt5Rkt@OpnspzwgWgwlOKrl57b7bXjVLsb8myv7my39Izw@2FJ@Opnsp8tpTgwuAzWGo0nstpnQTyCqRkWGpzw@hs3yAPfGMY3i0Krl4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8nP3@hgCc_FfqWgvVRk3oRgtTRVwobsw9bVwcNU3a2sJPWznstpnQ0pnstMrGiyCqRkWGpPbExVt9pPfNtpnsuPrinktRbsw_uUCNtpnsuPrGpzwgWgwlOKrl4VHEmYWp18tp_gb@Rs3E_ywdxgbAAkfgWgbhRkfEnkWet6S918wBAgwp57b7bXjVLsb8myv7my391P3@hgCqWgbuTYf8L@H7pPfNtpnsuPrGpP3mnkHgTgwmAzWGpzHgTgwpAznstpnGu84NtpnsfYnstpONtMrGfyxQOp4NtMDpMFvmbV3ENXnsD6fBbkHLOMDuLk3gm7HLO1vBbOtERk3hhVtPIgH@WsWNQyw@ngvBm8DG5gv@LywQOow9NsvEbstAmPtgZgwDWgbiZgwBC8xGoYnsistiRytcNVHApgwERQtcxgwATYXVbQwyQPtgZgwonkHc_7tdbYrgIU3Eb7tcNPwgL73@hgCPQzv@nsv5_UWstpnstpnNtMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzr@bsbcn2bR_yv8RkrgIU3mLgDT4VHEmYWp1UHaAYfRhsvc_stpZkfingtpu7t9AgbALFf@bsbcNP3ymFDy_ywRxKbET73cM@XcQXzcQXzaAFHA5gviRkvpnsbAMst8RzSxbVtg2ywAiFtRMywATgDRhVtuRkJ@hgCpR7HPM7HdNVwd_7wRpybBT7bPMXSq5U3ynsHo2gviRs3P5gv@LywRasvEbstAxKtgZgwRMst@nk3uLVbcxgwPDgblCVHR5stATYJyRgblCVHP4@jToXnP0ow9NsvEbstAmz3cxsthRVogZgwytPrQ0pnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxTDgb5RsOp2ywmWgvV_8xAM7HVWgwAigbum7tdbyDTIstARkrQOp4N5gHALVHgnktGMVtgLsb6bs3ERktc_7xguUCNt1HgNVwpAYfpmywACzryT7Hq06fppFHaN8tV2gviTYfE2FfcWgfiWgvubsvpxzvd_FvcWstAhgfhLyfmRgv@x8tdNVwmbVwpQ8xQOp4N5gHALVHgnktG1V3cNsXyhktARVtytPrQ0pnabktBnsHA1V3cNVxT4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8nzHgRsHpaKSppyvELVb8DgbhRgfJL_YnmPzcmFtmTywuxUvyhktARVt85stmxzwmRgwTtUWNfYnRRYtiTybpNVrmRgtp2yw12ywmWgvVCQkUWQxgO1CNtMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrdTsOp2ywmWgvV_8xAM7HVWgwAigbum7tdbyDTIstARkrQOpnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxT1kHc_7tdbFOg2k3dxgwTtzfu_FvPQzrQOpnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxTaVwonkHc_7tdbFOiWstuRkrgIU3Eb7tcNPwgL73@hgCPQztpNgwToXnsMVtchk3sNVHc_YHdWVxiTgZATywm2yv@bUWNt13cT7kgxgwpR7HypU3cTFohRk3@hgCFT2o6C8xlDPr7iKSquKSgoXnsQyw@ngvBm8DGiY39RsWNfYnRRYtiTybpNVrECgbuxIthbgwyfsthbgwnhgtcbPrQ0pnGtkwG4ztd2yblhVHp_Yfdm73nhgtcN8bATgw7nOwyQ8ogLk3pLFtRTYrgu8rPu8fetPrQ0pnsuz3cTyHmNVrabktBnsHAistiRytcNVHOxsthbgwnhgtcxNWNtMrPm8w@LywGoYnstMrmRVH9_YtGistiRytcNVHApgwERQtcxgwATYXVbQwyfsthbgwnhgtcbUWNtMrP0MrP0ow9NsvEbstAmU3cTFohRk3@hgCFT2o6C8xNoYnstpnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxT1kHc_7tdbFOg2k3dxgwTtzfu_FvGfKrT4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8n8vBLFfuuKS7_@jqIPbExVtToXnstMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrp2ywmWgvVn2bR_yv8RsOeQ8xAMY3im8DGQPbET73_1UfaAFHAigbmRsvE_6HdTFvyNUvpxsfdTs3pMKSq4YS9uXfyTyt@_UWNtpnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxTMst9NVHBnsHANgH8_8xAtktARk3FT2o6xXrhuXrQOpnsMVwsNVHc_YHdWgDuRVHsNVHc_YHdWVxTMst9NVHrnsHAC8xTD8SquKSgoXnstMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrdTsOp2ywmWgvV_8xAM7HVWgwAigbum7tdbyDTQVtpLsbToXnstpnstkwG48rmRVtphVwguPHybs3UnkHgRVxTiV3@hgCc_YrgIPtcTF3qWgvVC8SgoXnstp4NOonRRYtiTybpNVruRVHxnstERk3FT2o6C8xNoYnstMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrRnstERk3obkwmhgtc_8xAMY3im8DGQPbET73_1UfaAFHAigbmRsvE_6HdTFvyNUvpxsfdTs3ppXS74yWqIPbExVtToXnsistiRytcNVHApgwERQtcxgwATYXVbQwyQ8vBn0wpnVHc_YrgIU3Eb7tcNPwgL73@hgCPQzv@nsv5_UWNtp4N5gHALVHgnktGQyw8nkHc2ItpTywmCQkUWQxgO1CNtMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrdTsORnstERk3TtzfuTyC@RkfBbs3qWgvVxXrAnktc_UWNtMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrRnstERk3obkwmhgtc_8xAMY3im8DGQzrQOp4N5gHALVHgnktGMst9NVHrnsHAC8xNoYnsMgHm_ywATFXrxK3d_F3cbOtECPwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzringHAT7wpAYtARytTtzfgNktc_7ZvxQogoXnstkwG4Uv9_Y3cNVHDT5DPa@xGistiRytcNVHApgwERQtcxgwATYXVbQwyQ8vBnNthRk3@hgCoLVtpLywTtzfuTyC@RkfBbs3qWgvVxXrTWstiZkrQOpng2VryMgHm_ywATFXrx@Detznsto3cxsthRsohRk3@hgCFT2o6C8xQOpncWs3c0pnsistiRytcNVHApgwERQtcxgwATYXVbQwyQUvpRYtETstaNkt9xkrgI8bANgwmCQkUW5DyMgHm_ywATFXrx8SgoXnP0onRRYtiTybpNVr1mywAA2bATstaC8HmWVfGIgv8RVfGpybBT7b@uPbcbswyT7fGts3CRs3g0ywj_VtcbznQ0pnhhk3GDgwRT7rPu8odT7bAQFt9NVwy4U3i_ywcNkfabVwECVr8uUHgTVHybPrpuzSgoXns5yvmmPHpm7rPu8odT7bAQFt9NVwy4U3i_ywcNkfyRgblCVHGfPryRgblCVHguUfGQ@xQOpnhhk3GM7HVWgw4TY3GfKrliFtpWkvd_yDAnVf@nsvdTybpNgDAnVfBbk3cLVHp_ybcLyDAnVfuTyvERF3PIst@fgwARYvd_yDAnVfuLk3pWVtThk3uxXtpAUWNtpnuTyC@RsYE_7r5fKrlDz3cLyb_hkv@RgDluUxG48bu_2wubkCchOv@RVrMuUJVRs3luzWGpztpA8xQOpnsM7HVWgw4TY3Go8DGpPfabVwECgDluUxGpybBT7bGoPrlDPbcbswyTyDluUxG4gwgAVbEZXnst13Eb7tcL_HmmUxPuUJ@DgwRTyDluUxGDgwRT7r5uUJ@iFtqx6JGoPrEnV3QOpnsM7HVWgw4TY3Go8DGpPfuLk3cRktYx6JGoPr@RkwEmUxGpPfuLk3cRktkx6JGoPrEnV3QOpnabktBnsHA1V3cNVx9_7t@uztdxgw@uU3Eb7tcL_HmbUWNfYnK1U3i_ybqTYDND6fTnVwVNXnNOonND63i_ybqT7rEb73cx6JERVCEnzbd2yvuLk3gm7HluU3mLgDl4VHEmYWp1P3mnV3cWVtc_73pmF3AMst8n8vqRYfqCV3MOFtARgbBxXSal@SRDgb8x@SmpzDK1U3i_ybqTYDNOMDdf8fGuNtqhQwuNztcT7r3nV39NVwc_7rDnVwcmzwp_7raAFHAigbmRsvE_6HdTFvyNUvpxVr8fzDND63i_ybqT7rEb73cxXrERVCEnzbd2yvuLk3gm7HTIXnGuzHd_7romFtqm8DG1_3pm7rKW7rOxNWNuPromFtqNP39L7byoNJubVHcbQwlDPruQXjhM6jzbUWNuPromFtqNP39L7byoNJ8bktWbVwlDPrqf2xQOMrG1_3pmYfqRF3yCU0luFtqRYtBRk3um2wmbQYlDPrqf2xQOMrG1_3pmYfqRF3yCU0ligw@hgCWRVHaRgwAAPfGu@OgoXnGuUOqnV3AuyHuCVxOAPwc2gv9WVHlDPrRhVtuRgOgoXnGuUOqnV3AuyHuCVxOAPwc2gv9WVH3Rk3rhgClDPrqf2xQOMrG1_3pmYfqRF3yCU0liFtqxstuT7odbywmAPfG5gv@LywzbUWNuPry5gHALVHgnktytPrQ0MrGuPrhhk3GuyvGfKrBnsv9xgwATYfi_ywdTywJWgw8RktECUJuLk3gm7HltUWGuyvAiyCqRVrPuUJERVCEnzbd2yvuLk3gm7HloKrqhkfdLyCALVrPuPHmRywQOMrGuPrhhk3GM7rPuPwpLgH8RktENUwcTyz@RgtcNVHu_5CvhswnhgtcCUJuLk3gm7HltU0qfNWGOMrGuPrqhkfu_FvGfKrl1UfihXfqnV3dTs3AIgwEnP3pmYf2LFJQOMrGuPrqhkfpNgwm_Ftmm8DG5gHALVHgnktytPrQ0MrGuPrGuzHd_7ruhVrPuPwpLgH8RktENUvmRgvERgz@RgtcNVHypU3i_ybqTFJgoKruhkfEb73cm8DGpPHcC7HpOgvhhs3i_ybqTFJQuU3dN8vubYtim8DGiY39RsWNuPrGuPrGMyvAMY3im8DGpUfpMkSAuFtqhVwuNztcTFfqnV3AOs3loXnGuPrGuPruNP3d_ywATYopTgwAtktuRk3E_5wRnk3cCU3dWPrubUWNuPrGu84QOMrGuPruNP3d_ywATYopTgwAtktuRk3E_5wRnk3cCP3dWPrubUWNuPrPbPxgoXnK1U3i_ybqTYDND@r8fPr3nV3jTs3AIgwEmPYpmyHATgwmmUXpTgwGlOtBm8f8IXnND63i_ybqT7rEb73cx6JERVCEnzbd2yvuLk3gm7HlIXnhhk3GpstpAVtcTyvlm8DGpstpAVtcTyvlmP4KmUCPZXnlnstlWgwEhswAMgtBm8DGpstpAVtcTyvlNUv8TVrKW7rOxNWN4zw9NsvEbstAC8xGoYnhhk3GpgvBL7rPuPwpLgH8RktENUvmRgvERgz@RgtcNVHypU3i_ybqTFJgoXnlhVwuN8vubYtim8DGiY39RsWNpgvBLYfEb73cm8DGpPHcC7HpOgvhhs3i_ybqTFJQOoHd_7r9Lyw4L_oGfKrl4VHEmF3_pPrPfKrBnsv9xgwATYf@nsvdTybpNkfq_FtEnsvpWsWNpgvBLYfu_FvGfKrylF3cLNY6mUDGpPbET73u06JGOKrl4VHEmYWltPr5uznl1UfaAFHApstpAVtcTyvlLywm2ybiRs3AMst8nPHdAsf2LFflm7HAOs3loXnhhk3GIstBRVrPuPwpLgH8RktENUwcTyz@RgtcNVHu_5CvhswnhgtcCUJuLk3gm7HltU0qfNWNIstBRkfqhk3cNVHnnVwcN8bALywmTYXc2stmRVxlhVwuWPrAnVwcbUWNfyxytUWND6fuLk3gm7HLOonKMFvmbV3EmPHVmywPpPHcC7HpOgvhhs3i_ybqTFJLO1wpnsw@RVHdAkfixVwAuyHuCVxRRYtiTybpNVxguUCNpstpAVtcTyvlNPwc2gbARsY@nVHypUfup6jq5Kjh18S7h6J@uU0eDPref_fGpPwg2yflm7H8aVw8aKjqa@Smp@jqlKW74@fqp8xAaVwBL2wm2ybiRVxlnstlWgwEhswAuyHThVwuC8xgoXnlnstlWgwEhswAuyHThVwuC8xAlktd_VtcL2bAAVtc_2weRywuT7xgoXnlnstlWgwEhswAlktd_VtcL2wm2ybiRs3ytUWNfyxQOMDpMFvmbV3ENXnNOMDdf8fGaKCeu8f8IXnKigbhm8bBx6JBbkH8pV3Ex8vBx8SEu@SeQ6j9u@j74KW8u6JGM7HVWgwPpUHgTVHy0@SqCFWG4gwgAVbE0@SqCFWlIXnKMFvmbV3EmPHVmywPpPHcC7HpOgvhhs3i_ybqTFJLO1wpnsw@RVHdAkfixVwAuyHuCVxRRYtiTybpNVxguUCGpstpAVtcTyvlNPwgL73@hgCypPwg2yflm7H8aVw8aKjqa@Smp@jqlKW74@fqp8xQu84goXnK1U3i_ybqTYDND6fBbkHLOonNOMDuLk3gm7HLOMrG4zw9NsvEbstAC8b@M7fpWUw@Q7fdW8tgoybOAUzpnsw@RgXAhVtVTybiLFoT0gwiTFJzxX3Qts0mx2DgZ03zW74RRYtiTybpNVxgoYnGuPxgZ03zN83Pts0mx0feW74Ox2xAuyHuCVxd_Fw9xgwATF3gf7fgZ03zNPtPaXxARsHGi5vERVxgo@vPMYfi_ywdTywJWgw8RktECUtgDznGu8tPMYflRVHJWgw8RktELYXVT2vlN5v8RVxpbU0qfNWdN8vubYtix@SQakfu_FvPpsW8NP3d_ywATYopTgwAtktuRk3E_5wRnk3cC8v@fgxNuPrPbPxabktBnsH@istiRytcNVH@pU3i_ybqTFJ@pUfppFHaNUwpnsw@RgfdNgv@b7HgLs3AMst8n8vAhVtVTybiLYf2LFJ@pUwdA8xQOonGuUwdCUJi_ywdTywlDPrll2X8iXjaiKjqQ6j8a6J@uUJBbk3cLVHmpyvELVbAMst8A8xQOMrGpgvypU3cNVwlDPrluyvlRkHgRsHltUWNOMDpMFvmbV3ENXnND6fyTyt@NKDuLk3gm7HGDgvAAgHdAgwPOgvhhs3i_ybqTYDN5gHALVHgnktGIst4TyvERF3ytPrQ01HgNVwpAYfuTyvERF3PQzrQO13cT7kgxgwpR7HyQztpL_HdTyHuC8xTD8Squ@xQOp4NIst4TyvERF3ytUWND6fuLk3gm7HLl"
# final_rtmp=' token=$doregex[tok] pageUrl=http://www.direct2watch.com/ live=1 timeout=10</link>
un_chtml=get_html(meta,data);
str_pattern='streamer.*[\'"](.*?)[\'"]'
elif 'streamer\'' in html:
un_chtml=html
str_pattern='streamer\': \'(.*?)\''
else:
un_chtml=html
str_pattern='streamer.*[\'"](.*?)[\'"]'
else:
un_chtml=page_data
str_pattern='streamer.*[\'"](.*?)[\'"]'
if justHtml:
return un_chtml+'ThisPage['+url+']'
print str_pattern,un_chtml
streamer=re.compile(str_pattern).findall(un_chtml)[0]
streamer=streamer.replace('\\/','/')
str_pattern='file[\'"]?: [\'"](.*?)[\'"]'
file=re.compile(str_pattern).findall(un_chtml)[0].replace('.flv','')
#print file, un_chtml
str_pattern='getJSON\(\"(.*?)\"'
token_url=re.compile(str_pattern).findall(un_chtml)[0]
if token_url.startswith('//'): token_url='http:'+token_url
headers=[('Referer',url)]
token_html=getUrl(token_url,headers=headers)
str_pattern='token":"(.*)"'
token=re.compile(str_pattern).findall(token_html)[0]
str_pattern='\'flash\', src: \'(.*?)\''
swf=re.compile(str_pattern).findall(un_chtml)
if not swf or len(swf)==0:
str_pattern='flashplayer: [\'"](.*?)[\'"]'
swf=re.compile(str_pattern).findall(un_chtml)
swf=swf[0]
#print streamer
app=''
if '1935/' in streamer:
app=streamer.split('1935/')[1]
app+=' app='+app
streamer=streamer.split('1935/')[0]+'1935/'
final_rtmp='%s%s playpath=%s swfUrl=%s token=%s live=1 timeout=10 pageUrl=%s'%(streamer,app,file,swf,token,url)
return final_rtmp
#print decrypt_vipracing('http://www.direct2watch.com/embedplayer.php?width=653&height=410&channel=10&autoplay=true','http://vipracing.tv/channel/espn')
| gpl-2.0 |
untom/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
ChristineLaMuse/mozillians | vendor-local/lib/python/celery/tests/functional/case.py | 14 | 5433 | from __future__ import absolute_import
import atexit
import logging
import os
import signal
import socket
import sys
import traceback
from itertools import count
from time import time
from celery.exceptions import TimeoutError
from celery.task.control import ping, flatten_reply, inspect
from celery.utils import qualname
from celery.tests.utils import Case
HOSTNAME = socket.gethostname()
def say(msg):
sys.stderr.write("%s\n" % msg)
def try_while(fun, reason="Timed out", timeout=10, interval=0.5):
time_start = time()
for iterations in count(0):
if time() - time_start >= timeout:
raise TimeoutError()
ret = fun()
if ret:
return ret
class Worker(object):
started = False
next_worker_id = count(1).next
_shutdown_called = False
def __init__(self, hostname, loglevel="error"):
self.hostname = hostname
self.loglevel = loglevel
def start(self):
if not self.started:
self._fork_and_exec()
self.started = True
def _fork_and_exec(self):
pid = os.fork()
if pid == 0:
from celery import current_app
current_app.worker_main(["celeryd", "--loglevel=INFO",
"-n", self.hostname,
"-P", "solo"])
os._exit(0)
self.pid = pid
def is_alive(self, timeout=1):
r = ping(destination=[self.hostname],
timeout=timeout)
return self.hostname in flatten_reply(r)
def wait_until_started(self, timeout=10, interval=0.5):
try_while(lambda: self.is_alive(interval),
"Worker won't start (after %s secs.)" % timeout,
interval=interval, timeout=timeout)
say("--WORKER %s IS ONLINE--" % self.hostname)
def ensure_shutdown(self, timeout=10, interval=0.5):
os.kill(self.pid, signal.SIGTERM)
try_while(lambda: not self.is_alive(interval),
"Worker won't shutdown (after %s secs.)" % timeout,
timeout=10, interval=0.5)
say("--WORKER %s IS SHUTDOWN--" % self.hostname)
self._shutdown_called = True
def ensure_started(self):
self.start()
self.wait_until_started()
@classmethod
def managed(cls, hostname=None, caller=None):
hostname = hostname or socket.gethostname()
if caller:
hostname = ".".join([qualname(caller), hostname])
else:
hostname += str(cls.next_worker_id())
worker = cls(hostname)
worker.ensure_started()
stack = traceback.format_stack()
@atexit.register
def _ensure_shutdown_once():
if not worker._shutdown_called:
say("-- Found worker not stopped at shutdown: %s\n%s" % (
worker.hostname,
"\n".join(stack)))
worker.ensure_shutdown()
return worker
class WorkerCase(Case):
hostname = HOSTNAME
worker = None
@classmethod
def setUpClass(cls):
logging.getLogger("amqplib").setLevel(logging.ERROR)
cls.worker = Worker.managed(cls.hostname, caller=cls)
@classmethod
def tearDownClass(cls):
cls.worker.ensure_shutdown()
def assertWorkerAlive(self, timeout=1):
self.assertTrue(self.worker.is_alive)
def inspect(self, timeout=1):
return inspect([self.worker.hostname], timeout=timeout)
def my_response(self, response):
return flatten_reply(response)[self.worker.hostname]
def is_accepted(self, task_id, interval=0.5):
active = self.inspect(timeout=interval).active()
if active:
for task in active[self.worker.hostname]:
if task["id"] == task_id:
return True
return False
def is_reserved(self, task_id, interval=0.5):
reserved = self.inspect(timeout=interval).reserved()
if reserved:
for task in reserved[self.worker.hostname]:
if task["id"] == task_id:
return True
return False
def is_scheduled(self, task_id, interval=0.5):
schedule = self.inspect(timeout=interval).scheduled()
if schedule:
for item in schedule[self.worker.hostname]:
if item["request"]["id"] == task_id:
return True
return False
def is_received(self, task_id, interval=0.5):
return (self.is_reserved(task_id, interval) or
self.is_scheduled(task_id, interval) or
self.is_accepted(task_id, interval))
def ensure_accepted(self, task_id, interval=0.5, timeout=10):
return try_while(lambda: self.is_accepted(task_id, interval),
"Task not accepted within timeout",
interval=0.5, timeout=10)
def ensure_received(self, task_id, interval=0.5, timeout=10):
return try_while(lambda: self.is_received(task_id, interval),
"Task not receied within timeout",
interval=0.5, timeout=10)
def ensure_scheduled(self, task_id, interval=0.5, timeout=10):
return try_while(lambda: self.is_scheduled(task_id, interval),
"Task not scheduled within timeout",
interval=0.5, timeout=10)
| bsd-3-clause |
OpenBeta/beta | apiserver/model.py | 1 | 9792 | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
from geoalchemy2 import Geometry
from sqlalchemy import func, ForeignKey, PrimaryKeyConstraint, event, Sequence
from sqlalchemy.schema import DropTable
from sqlalchemy.ext.compiler import compiles
import flask_login
from datetime import datetime
import json
import collections
from key_helper import *
db = SQLAlchemy()
FeatureSet = collections.namedtuple('FeatureSet', 'route, boundary', verbose=True)
class Route(db.Model):
__tablename__ = 'routes'
id = db.Column(db.Integer, primary_key=True)
geo = db.Column(Geometry(geometry_type='POINT', srid=4326), unique=True)
name = db.Column(db.Text, index=True)
grade = db.Column(db.Text)
grade_type = db.Column(db.Text, ForeignKey('grade_types.id'))
properties_json = db.Column(postgresql.JSONB)
def __init__(self, geojson):
self.geo = func.ST_SetSRID(func.ST_GeomFromGeoJSON(json.dumps(geojson['geometry'])), 4326)
self.name = geojson['properties']['name']
if 'grade' in geojson['properties']:
grade = geojson['properties']['grade']
self.grade = grade['value']
self.grade_type = grade['type']
else:
self.grade = ''
self.type = 'unknown'
self.properties_json = geojson['properties'] # store raw data
def __repr__(self):
return '<Route %r>' % self.name
def to_json(self):
return {
"type": "Feature",
"id": "route/{}".format(self.id),
"geometry": json.loads(db.session.scalar(func.ST_AsGeoJSON(self.geo))),
"properties": self.properties_json
}
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
lhs = json.loads(db.session.scalar(func.ST_AsGeoJSON(self.geo)))
rhs = json.loads(db.session.scalar(func.ST_AsGeoJSON(other.geo)))
return lhs == rhs
return NotImplemented
def __ne__(self, other):
"""Define a non-equality test"""
return not self.__eq__(other)
def __hash__(self):
"""Override the default hash behavior (that returns the id or the object)"""
return hash(self.geo)
class GradeType(db.Model):
__tablename__ = 'grade_types'
id = db.Column(db.Text, primary_key=True, unique=True)
full_name = db.Column(db.Text)
def __init__(self, id, full_name):
self.id = id
self.full_name = full_name
@event.listens_for(GradeType.__table__, 'after_create')
def insert_initial_values(*args, **kwargs):
db.session.add(GradeType(id='unknown', full_name='Type Unknown'))
db.session.add(GradeType(id='yds', full_name='Yosemite Decimal System'))
db.session.add(GradeType(id='v', full_name='Hueco V-scale'))
db.session.commit()
event.listen(GradeType.__table__, 'after_create', insert_initial_values)
class GradeDetail(db.Model):
__tablename__ = 'grade_details'
id = db.Column(db.Text, ForeignKey('grade_types.id'))
value = db.Column(db.Text)
weight = db.Column(db.Integer)
__table_args__ = (PrimaryKeyConstraint(id, weight),)
class Boundary(db.Model):
__tablename__ = 'boundaries'
BOUNDARY_ID_SEQ = Sequence('boundary_id_seq', metadata=db.Model.metadata) # define sequence explicitly
boundary_id = db.Column(db.Integer, primary_key=True, server_default=BOUNDARY_ID_SEQ.next_value())
name = db.Column(db.Text, index=True)
is_top_level = db.Column(db.Boolean)
geo = db.Column(Geometry(geometry_type='POLYGON', srid=4326), unique=True)
properties_json = db.Column(postgresql.JSONB)
sys_period = db.Column(postgresql.TSTZRANGE, nullable=False)
def __init__(self, geojson):
props = geojson['properties']
self.name = props.get('name')
self.is_top_level = props.get('topLevel', False)
self.geo = func.ST_SetSRID(func.ST_GeomFromGeoJSON(json.dumps(geojson['geometry'])), 4326)
self.properties_json = props
def to_json(self):
return {
"type": "Feature",
"id": "area/{}".format(self.boundary_id),
"geometry": json.loads(db.session.scalar(func.ST_AsGeoJSON(self.geo))),
"properties": self.properties_json
}
class BoundaryHistory(db.Model):
__tablename__ = 'boundaries_history'
history_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
boundary_id =db.Column(db.Integer)
name = db.Column(db.Text)
is_top_level = db.Column(db.Boolean)
geo = db.Column(Geometry(geometry_type='POLYGON', srid=4326))
properties_json = db.Column(postgresql.JSONB)
sys_period = db.Column(postgresql.TSTZRANGE, nullable=False)
class APIUser(db.Model, flask_login.UserMixin):
__tablename__ = 'api_users'
uid = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.Text, primary_key=True, unique=True)
api_key = db.Column(db.Text, primary_key=True, unique=True)
active = db.Column(db.Boolean)
created_ts = db.Column(db.DateTime(timezone=True))
mod_ts = db.Column(db.DateTime(timezone=True))
def __init__(self, **kwargs):
self.active = kwargs['active']
self.email = kwargs['email']
now = datetime.utcnow()
self.created_ts = now
self.mpd_ts = now
self.api_key = genkey(userKeySigner)
@property
def is_active(self):
return self.is_active
@property
def is_authenticated(self):
return True
@property
def apikey(self):
return self.api_key
class AuditLog(db.Model):
__tablename__ = 'audit_log'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
op = db.Column(db.CHAR)
row_id = db.Column(db.Integer)
table_name = db.Column(db.VARCHAR(50))
user_id = db.Column(db.VARCHAR(30), nullable=False)
ip = db.Column(postgresql.INET)
ts = db.Column(db.DateTime(timezone=True))
def get_boundary_by_id(boundary_id):
row = db.session.query(Boundary).filter(Boundary.boundary_id == boundary_id).first()
if row is None:
return None
return row.to_json()
def search_within_boundary_by_id(boundary_id):
rows = db.session.query(Route, Boundary)\
.filter("ST_WITHIN(routes.geo, boundaries.geo)")\
.filter("boundaries.boundary_id=:id")\
.params(id=boundary_id).all()
return {
"type": "FeatureCollection",
"features": map(lambda item: item.to_json(), rows)
}
def search_within_radius_in_meters(location, radius, route=True, boundary=False):
coordinates = location.split(",")
route_rows = list()
boundary_rows = list()
if route:
route_rows = db.session.query(Route).\
filter('ST_DistanceSphere(geo, ST_MakePoint(:lng,:lat))<=:r').\
params(lng=coordinates[0], lat=coordinates[1], r=radius).all()
if boundary:
boundary_rows = db.session.query(Boundary).\
filter('ST_DistanceSphere(geo, ST_MakePoint(:lng,:lat))<=:r').\
params(lng=coordinates[0], lat=coordinates[1], r=radius).all()
route_json = {
"type": "FeatureCollection",
"features": map(lambda item: item.to_json(), route_rows)
}
boundary_json = {
"type": "FeatureCollection",
"features": map(lambda item: item.to_json(), boundary_rows)
}
return FeatureSet(route=route_json, boundary=boundary_json)
def recent_activities(count, route=True, boundary=False):
hard_limit = 10;
route_rows = list()
boundary_rows = list()
if count > hard_limit:
count = hard_limit
if route:
route_rows = db.session.query(Route).\
order_by(Route.id.desc()).\
limit(count);
if boundary:
boundary_rows = db.session.query(Boundary).\
order_by(Boundary.boundary_id.desc()).\
limit(count);
route_json = {
"type": "FeatureCollection",
"features": map(lambda item: item.to_json(), route_rows)
}
boundary_json = {
"type": "FeatureCollection",
"features": map(lambda item: item.to_json(), boundary_rows)
}
return FeatureSet(route=route_json, boundary=boundary_json)
def setup_temporal_tables():
sql = ("CREATE TRIGGER boundary_history BEFORE INSERT OR UPDATE OR DELETE ON Boundaries "
"FOR EACH ROW EXECUTE PROCEDURE versioning('sys_period', 'boundaries_history', true)")
db.session.execute(sql)
sql = ("create or replace function trxn_history() returns trigger as $$ "
"BEGIN"
" IF (TG_OP = 'DELETE') THEN"
" INSERT INTO audit_log (op, row_id, table_name, user_id, ts) "
" VALUES('D', OLD.boundary_id, TG_TABLE_NAME, current_setting('vars.edited_by'),now());"
" ELSEIF (TG_OP='UPDATE') THEN"
" INSERT INTO audit_log (op, row_id, table_name, user_id, ts) "
" VALUES('U', OLD.boundary_id, TG_TABLE_NAME, NEW.properties_json->>'editedBy', now());"
" ELSEIF (TG_OP='INSERT') THEN"
" INSERT INTO audit_log (op, row_id, table_name, user_id, ts) "
" VALUES('I', NEW.boundary_id, TG_TABLE_NAME, NEW.properties_json->>'editedBy', now());"
" END IF;"
" RETURN null;"
"END;"
"$$ language plpgsql;")
db.session.execute(sql)
sql = ("CREATE TRIGGER audit AFTER INSERT OR UPDATE OR DELETE ON boundaries "
"FOR EACH ROW EXECUTE procedure trxn_history();")
db.session.execute(sql)
db.session.commit()
@compiles(DropTable, "postgresql")
def _compile_drop_table(element, compiler, **kwargs):
return compiler.visit_drop_table(element) + " CASCADE"
| gpl-3.0 |
SINGROUP/pycp2k | pycp2k/classes/_each304.py | 1 | 1114 | from pycp2k.inputsection import InputSection
class _each304(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
| lgpl-3.0 |
flotre/Sick-Beard | lib/hachoir_parser/container/swf.py | 90 | 15001 | """
SWF (Macromedia/Adobe Flash) file parser.
Documentation:
- Alexis' SWF Reference:
http://www.m2osw.com/swf_alexref.html
- http://www.half-serious.com/swf/format/
- http://www.anotherbigidea.com/javaswf/
- http://www.gnu.org/software/gnash/
Author: Victor Stinner
Creation date: 29 october 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, UInt8, UInt32, UInt16, CString, Enum,
Bytes, RawBytes, NullBits, String, SubFile)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, filesizeHandler
from lib.hachoir_core.tools import paddingSize, humanFrequency
from lib.hachoir_parser.image.common import RGB
from lib.hachoir_parser.image.jpeg import JpegChunk, JpegFile
from lib.hachoir_core.stream import StringInputStream, ConcatStream
from lib.hachoir_parser.common.deflate import Deflate, has_deflate
from lib.hachoir_parser.container.action_script import parseActionScript
import math
# Maximum file size (50 MB)
MAX_FILE_SIZE = 50 * 1024 * 1024
TWIPS = 20
class RECT(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
yield Bits(self, "nbits", 5)
nbits = self["nbits"].value
if not nbits:
raise ParserError("SWF parser: Invalid RECT field size (0)")
yield Bits(self, "xmin", nbits, "X minimum in twips")
yield Bits(self, "xmax", nbits, "X maximum in twips")
yield Bits(self, "ymin", nbits, "Y minimum in twips")
yield Bits(self, "ymax", nbits, "Y maximum in twips")
size = paddingSize(self.current_size, 8)
if size:
yield NullBits(self, "padding", size)
def getWidth(self):
return math.ceil(float(self["xmax"].value) / TWIPS)
def getHeight(self):
return math.ceil(float(self["ymax"].value) / TWIPS)
def createDescription(self):
return "Rectangle: %ux%u" % (self.getWidth(), self.getHeight())
class FixedFloat16(FieldSet):
def createFields(self):
yield UInt8(self, "float_part")
yield UInt8(self, "int_part")
def createValue(self):
return self["int_part"].value + float(self["float_part"].value) / 256
def parseBackgroundColor(parent, size):
yield RGB(parent, "color")
def bit2hertz(field):
return humanFrequency(5512.5 * (2 ** field.value))
SOUND_CODEC_MP3 = 2
SOUND_CODEC = {
0: "RAW",
1: "ADPCM",
SOUND_CODEC_MP3: "MP3",
3: "Uncompressed",
6: "Nellymoser",
}
class SoundEnvelope(FieldSet):
def createFields(self):
yield UInt8(self, "count")
for index in xrange(self["count"].value):
yield UInt32(self, "mark44[]")
yield UInt16(self, "level0[]")
yield UInt16(self, "level1[]")
def parseSoundBlock(parent, size):
# TODO: Be able to get codec... Need to know last sound "def_sound[]" field
# if not (...)sound_header:
# raise ParserError("Sound block without header")
if True: #sound_header == SOUND_CODEC_MP3:
yield UInt16(parent, "samples")
yield UInt16(parent, "left")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseStartSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "has_in_point")
yield Bit(parent, "has_out_point")
yield Bit(parent, "has_loops")
yield Bit(parent, "has_envelope")
yield Bit(parent, "no_multiple")
yield Bit(parent, "stop_playback")
yield NullBits(parent, "reserved", 2)
if parent["has_in_point"].value:
yield UInt32(parent, "in_point")
if parent["has_out_point"].value:
yield UInt32(parent, "out_point")
if parent["has_loops"].value:
yield UInt16(parent, "loop_count")
if parent["has_envelope"].value:
yield SoundEnvelope(parent, "envelope")
def parseDefineSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "is_stereo")
yield Bit(parent, "is_16bit")
yield textHandler(Bits(parent, "rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt32(parent, "sample_count")
if parent["codec"].value == SOUND_CODEC_MP3:
yield UInt16(parent, "len")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseSoundHeader(parent, size):
yield Bit(parent, "playback_is_stereo")
yield Bit(parent, "playback_is_16bit")
yield textHandler(Bits(parent, "playback_rate", 2), bit2hertz)
yield NullBits(parent, "reserved", 4)
yield Bit(parent, "sound_is_stereo")
yield Bit(parent, "sound_is_16bit")
yield textHandler(Bits(parent, "sound_rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt16(parent, "sample_count")
if parent["codec"].value == 2:
yield UInt16(parent, "latency_seek")
class JpegHeader(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
count = 1
while True:
chunk = JpegChunk(self, "jpeg_chunk[]")
yield chunk
if 1 < count and chunk["type"].value in (JpegChunk.TAG_SOI, JpegChunk.TAG_EOI):
break
count += 1
def parseJpeg(parent, size):
yield UInt16(parent, "char_id", "Character identifier")
size -= 2
code = parent["code"].value
if code != Tag.TAG_BITS:
if code == Tag.TAG_BITS_JPEG3:
yield UInt32(parent, "alpha_offset", "Character identifier")
size -= 4
addr = parent.absolute_address + parent.current_size + 16
if parent.stream.readBytes(addr, 2) in ("\xff\xdb", "\xff\xd8"):
header = JpegHeader(parent, "jpeg_header")
yield header
hdr_size = header.size // 8
size -= hdr_size
else:
hdr_size = 0
if code == Tag.TAG_BITS_JPEG3:
img_size = parent["alpha_offset"].value - hdr_size
else:
img_size = size
else:
img_size = size
yield SubFile(parent, "image", img_size, "JPEG picture", parser=JpegFile)
if code == Tag.TAG_BITS_JPEG3:
size = (parent.size - parent.current_size) // 8
yield RawBytes(parent, "alpha", size, "Image data")
def parseVideoFrame(parent, size):
yield UInt16(parent, "stream_id")
yield UInt16(parent, "frame_num")
if 4 < size:
yield RawBytes(parent, "video_data", size-4)
class Export(FieldSet):
def createFields(self):
yield UInt16(self, "object_id")
yield CString(self, "name")
def parseExport(parent, size):
yield UInt16(parent, "count")
for index in xrange(parent["count"].value):
yield Export(parent, "export[]")
class Tag(FieldSet):
TAG_BITS = 6
TAG_BITS_JPEG2 = 32
TAG_BITS_JPEG3 = 35
TAG_INFO = {
# SWF version 1.0
0: ("end[]", "End", None),
1: ("show_frame[]", "Show frame", None),
2: ("def_shape[]", "Define shape", None),
3: ("free_char[]", "Free character", None),
4: ("place_obj[]", "Place object", None),
5: ("remove_obj[]", "Remove object", None),
6: ("def_bits[]", "Define bits", parseJpeg),
7: ("def_but[]", "Define button", None),
8: ("jpg_table", "JPEG tables", None),
9: ("bkgd_color[]", "Set background color", parseBackgroundColor),
10: ("def_font[]", "Define font", None),
11: ("def_text[]", "Define text", None),
12: ("action[]", "Action script", parseActionScript),
13: ("def_font_info[]", "Define font info", None),
# SWF version 2.0
14: ("def_sound[]", "Define sound", parseDefineSound),
15: ("start_sound[]", "Start sound", parseStartSound),
16: ("stop_sound[]", "Stop sound", None),
17: ("def_but_sound[]", "Define button sound", None),
18: ("sound_hdr", "Sound stream header", parseSoundHeader),
19: ("sound_blk[]", "Sound stream block", parseSoundBlock),
20: ("def_bits_lossless[]", "Define bits lossless", None),
21: ("def_bits_jpeg2[]", "Define bits JPEG 2", parseJpeg),
22: ("def_shape2[]", "Define shape 2", None),
23: ("def_but_cxform[]", "Define button CXFORM", None),
24: ("protect", "File is protected", None),
# SWF version 3.0
25: ("path_are_ps[]", "Paths are Postscript", None),
26: ("place_obj2[]", "Place object 2", None),
28: ("remove_obj2[]", "Remove object 2", None),
29: ("sync_frame[]", "Synchronize frame", None),
31: ("free_all[]", "Free all", None),
32: ("def_shape3[]", "Define shape 3", None),
33: ("def_text2[]", "Define text 2", None),
34: ("def_but2[]", "Define button2", None),
35: ("def_bits_jpeg3[]", "Define bits JPEG 3", parseJpeg),
36: ("def_bits_lossless2[]", "Define bits lossless 2", None),
39: ("def_sprite[]", "Define sprite", None),
40: ("name_character[]", "Name character", None),
41: ("serial_number", "Serial number", None),
42: ("generator_text[]", "Generator text", None),
43: ("frame_label[]", "Frame label", None),
45: ("sound_hdr2[]", "Sound stream header2", parseSoundHeader),
46: ("def_morph_shape[]", "Define morph shape", None),
47: ("gen_frame[]", "Generate frame", None),
48: ("def_font2[]", "Define font 2", None),
49: ("tpl_command[]", "Template command", None),
# SWF version 4.0
37: ("def_text_field[]", "Define text field", None),
38: ("def_quicktime_movie[]", "Define QuickTime movie", None),
# SWF version 5.0
50: ("def_cmd_obj[]", "Define command object", None),
51: ("flash_generator", "Flash generator", None),
52: ("gen_ext_font[]", "Gen external font", None),
56: ("export[]", "Export", parseExport),
57: ("import[]", "Import", None),
58: ("ebnable_debug", "Enable debug", None),
# SWF version 6.0
59: ("do_init_action[]", "Do init action", None),
60: ("video_str[]", "Video stream", None),
61: ("video_frame[]", "Video frame", parseVideoFrame),
62: ("def_font_info2[]", "Define font info 2", None),
63: ("mx4[]", "MX4", None),
64: ("enable_debug2", "Enable debugger 2", None),
# SWF version 7.0
65: ("script_limits[]", "Script limits", None),
66: ("tab_index[]", "Set tab index", None),
# SWF version 8.0
69: ("file_attr[]", "File attributes", None),
70: ("place_obj3[]", "Place object 3", None),
71: ("import2[]", "Import a definition list from another movie", None),
73: ("def_font_align[]", "Define font alignment zones", None),
74: ("csm_txt_set[]", "CSM text settings", None),
75: ("def_font3[]", "Define font text 3", None),
77: ("metadata[]", "XML code describing the movie", None),
78: ("def_scale_grid[]", "Define scaling factors", None),
83: ("def_shape4[]", "Define shape 4", None),
84: ("def_morph2[]", "Define a morphing shape 2", None),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
size = self["length"].value
if self[0].name == "length_ext":
self._size = (6+size) * 8
else:
self._size = (2+size) * 8
code = self["code"].value
if code in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[code]
else:
self.parser = None
def createFields(self):
if self.stream.readBits(self.absolute_address, 6, self.endian) == 63:
yield Bits(self, "length_ext", 6)
yield Bits(self, "code", 10)
yield filesizeHandler(UInt32(self, "length"))
else:
yield filesizeHandler(Bits(self, "length", 6))
yield Bits(self, "code", 10)
size = self["length"].value
if 0 < size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
def createDescription(self):
return "Tag: %s (%s)" % (self["code"].display, self["length"].display)
class SwfFile(Parser):
VALID_VERSIONS = set(xrange(1, 9+1))
PARSER_TAGS = {
"id": "swf",
"category": "container",
"file_ext": ["swf"],
"mime": (u"application/x-shockwave-flash",),
"min_size": 64,
"description": u"Macromedia Flash data"
}
PARSER_TAGS["magic"] = []
for version in VALID_VERSIONS:
PARSER_TAGS["magic"].append(("FWS%c" % version, 0))
PARSER_TAGS["magic"].append(("CWS%c" % version, 0))
endian = LITTLE_ENDIAN
SWF_SCALE_FACTOR = 1.0 / 20
def validate(self):
if self.stream.readBytes(0, 3) not in ("FWS", "CWS"):
return "Wrong file signature"
if self["version"].value not in self.VALID_VERSIONS:
return "Unknown version"
if MAX_FILE_SIZE < self["filesize"].value:
return "File too big (%u)" % self["filesize"].value
if self["signature"].value == "FWS":
if self["rect/padding"].value != 0:
return "Unknown rectangle padding value"
return True
def createFields(self):
yield String(self, "signature", 3, "SWF format signature", charset="ASCII")
yield UInt8(self, "version")
yield filesizeHandler(UInt32(self, "filesize"))
if self["signature"].value != "CWS":
yield RECT(self, "rect")
yield FixedFloat16(self, "frame_rate")
yield UInt16(self, "frame_count")
while not self.eof:
yield Tag(self, "tag[]")
else:
size = (self.size - self.current_size) // 8
if has_deflate:
data = Deflate(Bytes(self, "compressed_data", size), False)
def createInputStream(cis, source=None, **args):
stream = cis(source=source)
header = StringInputStream("FWS" + self.stream.readBytes(3*8, 5))
args.setdefault("tags",[]).append(("class", SwfFile))
return ConcatStream((header, stream), source=stream.source, **args)
data.setSubIStream(createInputStream)
yield data
else:
yield Bytes(self, "compressed_data", size)
def createDescription(self):
desc = ["version %u" % self["version"].value]
if self["signature"].value == "CWS":
desc.append("compressed")
return u"Macromedia Flash data: %s" % (", ".join(desc))
def createContentSize(self):
if self["signature"].value == "FWS":
return self["filesize"].value * 8
else:
# TODO: Size of compressed Flash?
return None
| gpl-3.0 |
xia2/xia2 | src/xia2/Wrappers/Dials/EstimateGain.py | 1 | 1399 | from xia2.Driver.DriverFactory import DriverFactory
from xia2.Schema.Interfaces.FrameProcessor import FrameProcessor
def EstimateGain(DriverType=None):
"""A factory for EstimateGainWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class EstimateGainWrapper(DriverInstance.__class__, FrameProcessor):
def __init__(self):
super().__init__()
self.set_executable("dials.estimate_gain")
self._sweep_filename = None
self._kernel_size = None
self._gain = None
def set_sweep_filename(self, sweep_filename):
self._sweep_filename = sweep_filename
def set_kernel_size(self, kernel_size):
self._kernel_size = kernel_size
def get_gain(self):
return self._gain
def run(self):
self.clear_command_line()
assert self._sweep_filename is not None
self.add_command_line(self._sweep_filename)
if self._kernel_size is not None:
self.add_command_line("kernel_size=%i,%i" % self._kernel_size)
self.start()
self.close_wait()
self.check_for_errors()
for line in self.get_all_output():
if "Estimated gain:" in line:
self._gain = float(line.split(":")[-1].strip())
return EstimateGainWrapper()
| bsd-3-clause |
kawasaki2013/python-for-android-x86 | python3-alpha/python3-src/Lib/test/test_pprint.py | 51 | 26585 | import pprint
import test.support
import unittest
import test.test_set
import random
import collections
import itertools
# list, tuple and dict subclasses that do or don't overwrite __repr__
class list2(list):
pass
class list3(list):
def __repr__(self):
return list.__repr__(self)
class tuple2(tuple):
pass
class tuple3(tuple):
def __repr__(self):
return tuple.__repr__(self)
class dict2(dict):
pass
class dict3(dict):
def __repr__(self):
return dict.__repr__(self)
class Unorderable:
def __repr__(self):
return str(id(self))
class QueryTestCase(unittest.TestCase):
def setUp(self):
self.a = list(range(100))
self.b = list(range(200))
self.a[-12] = self.b
def test_basic(self):
# Verify .isrecursive() and .isreadable() w/o recursion
pp = pprint.PrettyPrinter()
for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, "yaddayadda",
self.a, self.b):
# module-level convenience functions
self.assertFalse(pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_knotted(self):
# Verify .isrecursive() and .isreadable() w/ recursion
# Tie a knot.
self.b[67] = self.a
# Messy dict.
self.d = {}
self.d[0] = self.d[1] = self.d[2] = self.d
pp = pprint.PrettyPrinter()
for icky in self.a, self.b, self.d, (self.d, self.d):
self.assertTrue(pprint.isrecursive(icky), "expected isrecursive")
self.assertFalse(pprint.isreadable(icky), "expected not isreadable")
self.assertTrue(pp.isrecursive(icky), "expected isrecursive")
self.assertFalse(pp.isreadable(icky), "expected not isreadable")
# Break the cycles.
self.d.clear()
del self.a[:]
del self.b[:]
for safe in self.a, self.b, self.d, (self.d, self.d):
# module-level convenience functions
self.assertFalse(pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_unreadable(self):
# Not recursive but not readable anyway
pp = pprint.PrettyPrinter()
for unreadable in type(3), pprint, pprint.isrecursive:
# module-level convenience functions
self.assertFalse(pprint.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
self.assertFalse(pprint.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
self.assertFalse(pp.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
def test_same_as_repr(self):
# Simple objects, small containers and classes that overwrite __repr__
# For those the result should be the same as repr().
# Ahem. The docs don't say anything about that -- this appears to
# be testing an implementation quirk. Starting in Python 2.5, it's
# not true for dicts: pprint always sorts dicts by key now; before,
# it sorted a dict display if and only if the display required
# multiple lines. For that reason, dicts with more than one element
# aren't tested here.
for simple in (0, 0, 0+0j, 0.0, "", b"",
(), tuple2(), tuple3(),
[], list2(), list3(),
{}, dict2(), dict3(),
self.assertTrue, pprint,
-6, -6, -6-6j, -1.5, "x", b"x", (3,), [3], {3: 6},
(1,2), [3,4], {5: 6},
tuple2((1,2)), tuple3((1,2)), tuple3(range(100)),
[3,4], list2([3,4]), list3([3,4]), list3(range(100)),
dict2({5: 6}), dict3({5: 6}),
range(10, -11, -1)
):
native = repr(simple)
for function in "pformat", "saferepr":
f = getattr(pprint, function)
got = f(simple)
self.assertEqual(native, got,
"expected %s got %s from pprint.%s" %
(native, got, function))
def test_basic_line_wrap(self):
# verify basic line-wrapping operation
o = {'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}
exp = """\
{'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}"""
for type in [dict, dict2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = range(100)
exp = '[%s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = tuple(range(100))
exp = '(%s)' % ',\n '.join(map(str, o))
for type in [tuple, tuple2]:
self.assertEqual(pprint.pformat(type(o)), exp)
# indent parameter
o = range(100)
exp = '[ %s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o), indent=4), exp)
def test_nested_indentations(self):
o1 = list(range(10))
o2 = dict(first=1, second=2, third=3)
o = [o1, o2]
expected = """\
[ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
{ 'first': 1,
'second': 2,
'third': 3}]"""
self.assertEqual(pprint.pformat(o, indent=4, width=42), expected)
def test_sorted_dict(self):
# Starting in Python 2.5, pprint sorts dict displays by key regardless
# of how small the dictionary may be.
# Before the change, on 32-bit Windows pformat() gave order
# 'a', 'c', 'b' here, so this test failed.
d = {'a': 1, 'b': 1, 'c': 1}
self.assertEqual(pprint.pformat(d), "{'a': 1, 'b': 1, 'c': 1}")
self.assertEqual(pprint.pformat([d, d]),
"[{'a': 1, 'b': 1, 'c': 1}, {'a': 1, 'b': 1, 'c': 1}]")
# The next one is kind of goofy. The sorted order depends on the
# alphabetic order of type names: "int" < "str" < "tuple". Before
# Python 2.5, this was in the test_same_as_repr() test. It's worth
# keeping around for now because it's one of few tests of pprint
# against a crazy mix of types.
self.assertEqual(pprint.pformat({"xy\tab\n": (3,), 5: [[]], (): {}}),
r"{5: [[]], 'xy\tab\n': (3,), (): {}}")
def test_ordered_dict(self):
words = 'the quick brown fox jumped over a lazy dog'.split()
d = collections.OrderedDict(zip(words, itertools.count()))
self.assertEqual(pprint.pformat(d),
"""\
{'the': 0,
'quick': 1,
'brown': 2,
'fox': 3,
'jumped': 4,
'over': 5,
'a': 6,
'lazy': 7,
'dog': 8}""")
def test_subclassing(self):
o = {'names with spaces': 'should be presented using repr()',
'others.should.not.be': 'like.this'}
exp = """\
{'names with spaces': 'should be presented using repr()',
others.should.not.be: like.this}"""
self.assertEqual(DottedPrettyPrinter().pformat(o), exp)
@test.support.cpython_only
def test_set_reprs(self):
# This test creates a complex arrangement of frozensets and
# compares the pretty-printed repr against a string hard-coded in
# the test. The hard-coded repr depends on the sort order of
# frozensets.
#
# However, as the docs point out: "Since sets only define
# partial ordering (subset relationships), the output of the
# list.sort() method is undefined for lists of sets."
#
# In a nutshell, the test assumes frozenset({0}) will always
# sort before frozenset({1}), but:
#
# >>> frozenset({0}) < frozenset({1})
# False
# >>> frozenset({1}) < frozenset({0})
# False
#
# Consequently, this test is fragile and
# implementation-dependent. Small changes to Python's sort
# algorithm cause the test to fail when it should pass.
self.assertEqual(pprint.pformat(set()), 'set()')
self.assertEqual(pprint.pformat(set(range(3))), '{0, 1, 2}')
self.assertEqual(pprint.pformat(frozenset()), 'frozenset()')
self.assertEqual(pprint.pformat(frozenset(range(3))), 'frozenset({0, 1, 2})')
cube_repr_tgt = """\
{frozenset(): frozenset({frozenset({2}), frozenset({0}), frozenset({1})}),
frozenset({0}): frozenset({frozenset(),
frozenset({0, 2}),
frozenset({0, 1})}),
frozenset({1}): frozenset({frozenset(),
frozenset({1, 2}),
frozenset({0, 1})}),
frozenset({2}): frozenset({frozenset(),
frozenset({1, 2}),
frozenset({0, 2})}),
frozenset({1, 2}): frozenset({frozenset({2}),
frozenset({1}),
frozenset({0, 1, 2})}),
frozenset({0, 2}): frozenset({frozenset({2}),
frozenset({0}),
frozenset({0, 1, 2})}),
frozenset({0, 1}): frozenset({frozenset({0}),
frozenset({1}),
frozenset({0, 1, 2})}),
frozenset({0, 1, 2}): frozenset({frozenset({1, 2}),
frozenset({0, 2}),
frozenset({0, 1})})}"""
cube = test.test_set.cube(3)
self.assertEqual(pprint.pformat(cube), cube_repr_tgt)
cubo_repr_tgt = """\
{frozenset({frozenset({0, 2}), frozenset({0})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({0, 1}), frozenset({1})}): frozenset({frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({1})})}),
frozenset({frozenset({1, 2}), frozenset({1})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({1, 2}), frozenset({2})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset({2}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset(), frozenset({0})}): frozenset({frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset(), frozenset({1})}): frozenset({frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({2})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({2}), frozenset()}): frozenset({frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({0, 1, 2}), frozenset({0, 1})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({0}), frozenset({0, 1})}): frozenset({frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({2}), frozenset({0, 2})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset({0, 1, 2}), frozenset({0, 2})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({1, 2}), frozenset({0, 1, 2})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset({1}),
frozenset({1,
2})})})}"""
cubo = test.test_set.linegraph(cube)
self.assertEqual(pprint.pformat(cubo), cubo_repr_tgt)
def test_depth(self):
nested_tuple = (1, (2, (3, (4, (5, 6)))))
nested_dict = {1: {2: {3: {4: {5: {6: 6}}}}}}
nested_list = [1, [2, [3, [4, [5, [6, []]]]]]]
self.assertEqual(pprint.pformat(nested_tuple), repr(nested_tuple))
self.assertEqual(pprint.pformat(nested_dict), repr(nested_dict))
self.assertEqual(pprint.pformat(nested_list), repr(nested_list))
lv1_tuple = '(1, (...))'
lv1_dict = '{1: {...}}'
lv1_list = '[1, [...]]'
self.assertEqual(pprint.pformat(nested_tuple, depth=1), lv1_tuple)
self.assertEqual(pprint.pformat(nested_dict, depth=1), lv1_dict)
self.assertEqual(pprint.pformat(nested_list, depth=1), lv1_list)
def test_sort_unorderable_values(self):
# Issue 3976: sorted pprints fail for unorderable values.
n = 20
keys = [Unorderable() for i in range(n)]
random.shuffle(keys)
skeys = sorted(keys, key=id)
clean = lambda s: s.replace(' ', '').replace('\n','')
self.assertEqual(clean(pprint.pformat(set(keys))),
'{' + ','.join(map(repr, skeys)) + '}')
self.assertEqual(clean(pprint.pformat(frozenset(keys))),
'frozenset({' + ','.join(map(repr, skeys)) + '})')
self.assertEqual(clean(pprint.pformat(dict.fromkeys(keys))),
'{' + ','.join('%r:None' % k for k in skeys) + '}')
class DottedPrettyPrinter(pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
if isinstance(object, str):
if ' ' in object:
return repr(object), 1, 0
else:
return object, 0, 0
else:
return pprint.PrettyPrinter.format(
self, object, context, maxlevels, level)
def test_main():
test.support.run_unittest(QueryTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
arokem/nipy | nipy/labs/datasets/volumes/volume_field.py | 6 | 7703 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The base volumetric field interface
This defines the nipy volumetric structure interface.
"""
from ..transforms.transform import CompositionError
################################################################################
# class `VolumeField`
################################################################################
class VolumeField(object):
""" The base volumetric structure.
This object represents numerical values embedded in a
3-dimensional world space (called a field in physics and
engineering)
This is an abstract base class: it defines the interface, but not
the logics.
Attributes
----------
world_space: string
World space the data is embedded in. For instance `mni152`.
metadata: dictionnary
Optional, user-defined, dictionnary used to carry around
extra information about the data as it goes through
transformations. The consistency of this information is not
maintained as the data is modified.
"""
#---------------------------------------------------------------------------
# Public attributes -- VolumeField interface
#---------------------------------------------------------------------------
# The name of the reference coordinate system
world_space = ''
# User defined meta data
metadata = dict()
#---------------------------------------------------------------------------
# Public methods -- VolumeField interface
#---------------------------------------------------------------------------
def get_transform(self):
""" Returns the transform object associated with the volumetric
structure which is a general description of the mapping from
the values to the world space.
Returns
-------
transform : nipy.datasets.Transform object
"""
raise NotImplementedError
def resampled_to_img(self, target_image, interpolation=None):
""" Resample the volume to be sampled similarly than the target
volumetric structure.
Parameters
----------
target_image : nipy volume
Nipy volume structure onto the grid of which the data will be
resampled.
interpolation : None, 'continuous' or 'nearest', optional
Interpolation type used when calculating values in
different word spaces. If None, the volume's interpolation
logic is used.
Returns
-------
resampled_image : nipy_image
New nipy image with the data resampled.
Notes
-----
Both the target image and the original image should be
embedded in the same world space.
"""
# IMPORTANT: Polymorphism can be implemented by walking the
# MRO and finding a method that does not raise
# NotImplementedError.
raise NotImplementedError
def as_volume_img(self, affine=None, shape=None,
interpolation=None, copy=True):
""" Resample the image to be an image with the data points lying
on a regular grid with an affine mapping to the word space (a
nipy VolumeImg).
Parameters
----------
affine: 4x4 or 3x3 ndarray, optional
Affine of the new voxel grid or transform object pointing
to the new voxel coordinate grid. If a 3x3 ndarray is given,
it is considered to be the rotation part of the affine,
and the best possible bounding box is calculated,
in this case, the shape argument is not used. If None
is given, a default affine is provided by the image.
shape: (n_x, n_y, n_z), tuple of integers, optional
The shape of the grid used for sampling, if None
is given, a default affine is provided by the image.
interpolation : None, 'continuous' or 'nearest', optional
Interpolation type used when calculating values in
different word spaces. If None, the image's interpolation
logic is used.
Returns
-------
resampled_image : nipy VolumeImg
New nipy VolumeImg with the data sampled on the grid
defined by the affine and shape.
Notes
-----
The coordinate system of the image is not changed: the
returned image points to the same world space.
"""
raise NotImplementedError
def values_in_world(self, x, y, z, interpolation=None):
""" Return the values of the data at the world-space positions given by
x, y, z
Parameters
----------
x : number or ndarray
x positions in world space, in other words milimeters
y : number or ndarray
y positions in world space, in other words milimeters.
The shape of y should match the shape of x
z : number or ndarray
z positions in world space, in other words milimeters.
The shape of z should match the shape of x
interpolation : None, 'continuous' or 'nearest', optional
Interpolation type used when calculating values in
different word spaces. If None, the image's interpolation
logic is used.
Returns
-------
values : number or ndarray
Data values interpolated at the given world position.
This is a number or an ndarray, depending on the shape of
the input coordinate.
"""
raise NotImplementedError
def composed_with_transform(self, w2w_transform):
""" Return a new image embedding the same data in a different
word space using the given world to world transform.
Parameters
----------
w2w_transform : transform object
The transform object giving the mapping between
the current world space of the image, and the new
word space.
Returns
--------
remapped_image : nipy image
An image containing the same data, expressed
in the new world space.
"""
if not w2w_transform.input_space == self.world_space:
raise CompositionError(
"The transform given does not apply to "
"the image's world space:\n%s\n\n%s" %
(w2w_transform, self)
)
new_img = self._apply_transform(w2w_transform)
new_img.world_space = w2w_transform.output_space
return new_img
#---------------------------------------------------------------------------
# Private methods
#---------------------------------------------------------------------------
# The subclasses should implement __repr__, __copy__, __deepcopy__,
# __eq__
# TODO: We need to implement (or check if implemented) hashing,
# weakref, pickling?
def _apply_transform(self, w2w_transform):
""" Implement this method to put in the logic of applying a
transformation on the image class.
"""
raise NotImplementedError
| bsd-3-clause |
salaria/odoo | addons/stock_landed_costs/stock_landed_costs.py | 19 | 19556 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.exceptions import Warning
from openerp.tools import float_compare, float_round
from openerp.tools.translate import _
import product
from openerp import SUPERUSER_ID
class stock_landed_cost(osv.osv):
_name = 'stock.landed.cost'
_description = 'Stock Landed Cost'
_inherit = 'mail.thread'
_track = {
'state': {
'stock_landed_costs.mt_stock_landed_cost_open': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
},
}
def _total_amount(self, cr, uid, ids, name, args, context=None):
result = {}
for cost in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in cost.cost_lines:
total += line.price_unit
result[cost.id] = total
return result
def _get_cost_line(self, cr, uid, ids, context=None):
cost_to_recompute = []
for line in self.pool.get('stock.landed.cost.lines').browse(cr, uid, ids, context=context):
cost_to_recompute.append(line.cost_id.id)
return cost_to_recompute
def get_valuation_lines(self, cr, uid, ids, picking_ids=None, context=None):
picking_obj = self.pool.get('stock.picking')
lines = []
if not picking_ids:
return lines
for picking in picking_obj.browse(cr, uid, picking_ids):
for move in picking.move_lines:
#it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost
if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'real':
continue
total_cost = 0.0
weight = move.product_id and move.product_id.weight * move.product_qty
volume = move.product_id and move.product_id.volume * move.product_qty
for quant in move.quant_ids:
total_cost += quant.cost * quant.qty
vals = dict(product_id=move.product_id.id, move_id=move.id, quantity=move.product_uom_qty, former_cost=total_cost, weight=weight, volume=volume)
lines.append(vals)
if not lines:
raise osv.except_osv(_('Error!'), _('The selected picking does not contain any move that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking'))
return lines
_columns = {
'name': fields.char('Name', track_visibility='always', readonly=True, copy=False),
'date': fields.date('Date', required=True, states={'done': [('readonly', True)]}, track_visibility='onchange', copy=False),
'picking_ids': fields.many2many('stock.picking', string='Pickings', states={'done': [('readonly', True)]}, copy=False),
'cost_lines': fields.one2many('stock.landed.cost.lines', 'cost_id', 'Cost Lines', states={'done': [('readonly', True)]}, copy=True),
'valuation_adjustment_lines': fields.one2many('stock.valuation.adjustment.lines', 'cost_id', 'Valuation Adjustments', states={'done': [('readonly', True)]}),
'description': fields.text('Item Description', states={'done': [('readonly', True)]}),
'amount_total': fields.function(_total_amount, type='float', string='Total', digits_compute=dp.get_precision('Account'),
store={
'stock.landed.cost': (lambda self, cr, uid, ids, c={}: ids, ['cost_lines'], 20),
'stock.landed.cost.lines': (_get_cost_line, ['price_unit', 'quantity', 'cost_id'], 20),
}, track_visibility='always'
),
'state': fields.selection([('draft', 'Draft'), ('done', 'Posted'), ('cancel', 'Cancelled')], 'State', readonly=True, track_visibility='onchange', copy=False),
'account_move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False),
'account_journal_id': fields.many2one('account.journal', 'Account Journal', required=True, states={'done': [('readonly', True)]}),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.landed.cost'),
'state': 'draft',
'date': fields.date.context_today,
}
def _create_accounting_entries(self, cr, uid, line, move_id, qty_out, context=None):
product_obj = self.pool.get('product.template')
cost_product = line.cost_line_id and line.cost_line_id.product_id
if not cost_product:
return False
accounts = product_obj.get_product_accounts(cr, uid, line.product_id.product_tmpl_id.id, context=context)
debit_account_id = accounts['property_stock_valuation_account_id']
already_out_account_id = accounts['stock_account_output']
credit_account_id = line.cost_line_id.account_id.id or cost_product.property_account_expense.id or cost_product.categ_id.property_account_expense_categ.id
if not credit_account_id:
raise osv.except_osv(_('Error!'), _('Please configure Stock Expense Account for product: %s.') % (cost_product.name))
return self._create_account_move_line(cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=context)
def _create_account_move_line(self, cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=None):
"""
Generate the account.move.line values to track the landed cost.
Afterwards, for the goods that are already out of stock, we should create the out moves
"""
aml_obj = self.pool.get('account.move.line')
base_line = {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
}
debit_line = dict(base_line, account_id=debit_account_id)
credit_line = dict(base_line, account_id=credit_account_id)
diff = line.additional_landed_cost
if diff > 0:
debit_line['debit'] = diff
credit_line['credit'] = diff
else:
# negative cost, reverse the entry
debit_line['credit'] = -diff
credit_line['debit'] = -diff
aml_obj.create(cr, uid, debit_line, context=context)
aml_obj.create(cr, uid, credit_line, context=context)
#Create account move lines for quants already out of stock
if qty_out > 0:
debit_line = dict(debit_line,
name=(line.name + ": " + str(qty_out) + _(' already out')),
quantity=qty_out,
account_id=already_out_account_id)
credit_line = dict(credit_line,
name=(line.name + ": " + str(qty_out) + _(' already out')),
quantity=qty_out,
account_id=debit_account_id)
diff = diff * qty_out / line.quantity
if diff > 0:
debit_line['debit'] = diff
credit_line['credit'] = diff
else:
# negative cost, reverse the entry
debit_line['credit'] = -diff
credit_line['debit'] = -diff
aml_obj.create(cr, uid, debit_line, context=context)
aml_obj.create(cr, uid, credit_line, context=context)
return True
def _create_account_move(self, cr, uid, cost, context=None):
vals = {
'journal_id': cost.account_journal_id.id,
'period_id': self.pool.get('account.period').find(cr, uid, cost.date, context=context)[0],
'date': cost.date,
'ref': cost.name
}
return self.pool.get('account.move').create(cr, uid, vals, context=context)
def _check_sum(self, cr, uid, landed_cost, context=None):
"""
Will check if each cost line its valuation lines sum to the correct amount
and if the overall total amount is correct also
"""
costcor = {}
tot = 0
for valuation_line in landed_cost.valuation_adjustment_lines:
if costcor.get(valuation_line.cost_line_id):
costcor[valuation_line.cost_line_id] += valuation_line.additional_landed_cost
else:
costcor[valuation_line.cost_line_id] = valuation_line.additional_landed_cost
tot += valuation_line.additional_landed_cost
prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
# float_compare returns 0 for equal amounts
res = not bool(float_compare(tot, landed_cost.amount_total, precision_digits=prec))
for costl in costcor.keys():
if float_compare(costcor[costl], costl.price_unit, precision_digits=prec):
res = False
return res
def button_validate(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for cost in self.browse(cr, uid, ids, context=context):
if cost.state != 'draft':
raise Warning(_('Only draft landed costs can be validated'))
if not cost.valuation_adjustment_lines or not self._check_sum(cr, uid, cost, context=context):
raise osv.except_osv(_('Error!'), _('You cannot validate a landed cost which has no valid valuation lines.'))
move_id = self._create_account_move(cr, uid, cost, context=context)
quant_dict = {}
for line in cost.valuation_adjustment_lines:
if not line.move_id:
continue
per_unit = line.final_cost / line.quantity
diff = per_unit - line.former_cost_per_unit
quants = [quant for quant in line.move_id.quant_ids]
for quant in quants:
if quant.id not in quant_dict:
quant_dict[quant.id] = quant.cost + diff
else:
quant_dict[quant.id] += diff
for key, value in quant_dict.items():
quant_obj.write(cr, SUPERUSER_ID, key, {'cost': value}, context=context)
qty_out = 0
for quant in line.move_id.quant_ids:
if quant.location_id.usage != 'internal':
qty_out += quant.qty
self._create_accounting_entries(cr, uid, line, move_id, qty_out, context=context)
self.write(cr, uid, cost.id, {'state': 'done', 'account_move_id': move_id}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
cost = self.browse(cr, uid, ids, context=context)
if cost.state == 'done':
raise Warning(_('Validated landed costs cannot be cancelled, '
'but you could create negative landed costs to reverse them'))
return cost.write({'state': 'cancel'})
def unlink(self, cr, uid, ids, context=None):
# cancel or raise first
self.button_cancel(cr, uid, ids, context)
return super(stock_landed_cost, self).unlink(cr, uid, ids, context=context)
def compute_landed_cost(self, cr, uid, ids, context=None):
line_obj = self.pool.get('stock.valuation.adjustment.lines')
unlink_ids = line_obj.search(cr, uid, [('cost_id', 'in', ids)], context=context)
line_obj.unlink(cr, uid, unlink_ids, context=context)
digits = dp.get_precision('Product Price')(cr)
towrite_dict = {}
for cost in self.browse(cr, uid, ids, context=None):
if not cost.picking_ids:
continue
picking_ids = [p.id for p in cost.picking_ids]
total_qty = 0.0
total_cost = 0.0
total_weight = 0.0
total_volume = 0.0
total_line = 0.0
vals = self.get_valuation_lines(cr, uid, [cost.id], picking_ids=picking_ids, context=context)
for v in vals:
for line in cost.cost_lines:
v.update({'cost_id': cost.id, 'cost_line_id': line.id})
self.pool.get('stock.valuation.adjustment.lines').create(cr, uid, v, context=context)
total_qty += v.get('quantity', 0.0)
total_cost += v.get('former_cost', 0.0)
total_weight += v.get('weight', 0.0)
total_volume += v.get('volume', 0.0)
total_line += 1
for line in cost.cost_lines:
value_split = 0.0
for valuation in cost.valuation_adjustment_lines:
value = 0.0
if valuation.cost_line_id and valuation.cost_line_id.id == line.id:
if line.split_method == 'by_quantity' and total_qty:
per_unit = (line.price_unit / total_qty)
value = valuation.quantity * per_unit
elif line.split_method == 'by_weight' and total_weight:
per_unit = (line.price_unit / total_weight)
value = valuation.weight * per_unit
elif line.split_method == 'by_volume' and total_volume:
per_unit = (line.price_unit / total_volume)
value = valuation.volume * per_unit
elif line.split_method == 'equal':
value = (line.price_unit / total_line)
elif line.split_method == 'by_current_cost_price' and total_cost:
per_unit = (line.price_unit / total_cost)
value = valuation.former_cost * per_unit
else:
value = (line.price_unit / total_line)
if digits:
value = float_round(value, precision_digits=digits[1], rounding_method='UP')
value = min(value, line.price_unit - value_split)
value_split += value
if valuation.id not in towrite_dict:
towrite_dict[valuation.id] = value
else:
towrite_dict[valuation.id] += value
if towrite_dict:
for key, value in towrite_dict.items():
line_obj.write(cr, uid, key, {'additional_landed_cost': value}, context=context)
return True
class stock_landed_cost_lines(osv.osv):
_name = 'stock.landed.cost.lines'
_description = 'Stock Landed Cost Lines'
def onchange_product_id(self, cr, uid, ids, product_id=False, context=None):
result = {}
if not product_id:
return {'value': {'quantity': 0.0, 'price_unit': 0.0}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
result['name'] = product.name
result['split_method'] = product.split_method
result['price_unit'] = product.standard_price
result['account_id'] = product.property_account_expense and product.property_account_expense.id or product.categ_id.property_account_expense_categ.id
return {'value': result}
_columns = {
'name': fields.char('Description'),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'price_unit': fields.float('Cost', required=True, digits_compute=dp.get_precision('Product Price')),
'split_method': fields.selection(product.SPLIT_METHOD, string='Split Method', required=True),
'account_id': fields.many2one('account.account', 'Account', domain=[('type', '<>', 'view'), ('type', '<>', 'closed')]),
}
class stock_valuation_adjustment_lines(osv.osv):
_name = 'stock.valuation.adjustment.lines'
_description = 'Stock Valuation Adjustment Lines'
def _amount_final(self, cr, uid, ids, name, args, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = {
'former_cost_per_unit': 0.0,
'final_cost': 0.0,
}
result[line.id]['former_cost_per_unit'] = (line.former_cost / line.quantity if line.quantity else 1.0)
result[line.id]['final_cost'] = (line.former_cost + line.additional_landed_cost)
return result
def _get_name(self, cr, uid, ids, name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.product_id.code or line.product_id.name or ''
if line.cost_line_id:
res[line.id] += ' - ' + line.cost_line_id.name
return res
_columns = {
'name': fields.function(_get_name, type='char', string='Description', store=True),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'cost_line_id': fields.many2one('stock.landed.cost.lines', 'Cost Line', readonly=True),
'move_id': fields.many2one('stock.move', 'Stock Move', readonly=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'weight': fields.float('Weight', digits_compute=dp.get_precision('Product Unit of Measure')),
'volume': fields.float('Volume', digits_compute=dp.get_precision('Product Unit of Measure')),
'former_cost': fields.float('Former Cost', digits_compute=dp.get_precision('Product Price')),
'former_cost_per_unit': fields.function(_amount_final, multi='cost', string='Former Cost(Per Unit)', type='float', digits_compute=dp.get_precision('Account'), store=True),
'additional_landed_cost': fields.float('Additional Landed Cost', digits_compute=dp.get_precision('Product Price')),
'final_cost': fields.function(_amount_final, multi='cost', string='Final Cost', type='float', digits_compute=dp.get_precision('Account'), store=True),
}
_defaults = {
'quantity': 1.0,
'weight': 1.0,
'volume': 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yueyongyue/saltshaker | shaker/highstate.py | 1 | 1467 | import os
class HighState(object):
def __init__(self):
if os.path.isfile('/etc/salt/master.d/file_roots.conf') == True:
os.system("mkdir -p /srv/salt")
else:
file_roots = file("/etc/salt/master.d/file_roots.conf", "w+")
add = ["file_roots:\n", " base:\n", " - /srv/salt\n"]
file_roots.writelines(add)
file_roots.close()
def list_sls(self, dir):
all_sls = {}
list_filename = os.listdir(dir)
for filename in list_filename:
print filename.split('.')
if os.path.isfile("/srv/salt/"+filename):
content = open(dir+filename).readlines()
name = filename.split('.')[0]
dic_sls = {name: content}
all_sls.update(dic_sls)
return all_sls
def add_sls(self, filename, content):
files = file("/srv/salt/"+filename+".sls", "w")
files.writelines(content)
files.close()
def del_sls(self, filename):
path = r"/srv/salt/" + filename + ".sls"
if os.path.exists(path):
os.remove(path)
else:
return "file not exit"
def main():
highstate = HighState()
a = highstate.list_sls("/srv/salt/")
#b = ['dfgdfgfgfdg\n',' fgfgfdgfgfgfg\n']
#a = highstate.add_sls("tomcat", b)
#print a
#filename = "test"
#a = highstate.del_sls(filename)
if __name__ == '__main__':
main()
| apache-2.0 |
feigames/Odoo | addons/website/models/ir_qweb.py | 8 | 19146 | # -*- coding: utf-8 -*-
"""
Website-context rendering needs to add some metadata to rendered fields,
as well as render a few fields differently.
Also, adds methods to convert values back to openerp models.
"""
import cStringIO
import datetime
import itertools
import logging
import os
import urllib2
import urlparse
import re
import pytz
import werkzeug.urls
import werkzeug.utils
from dateutil import parser
from lxml import etree, html
from PIL import Image as I
import openerp.modules
import openerp
from openerp.osv import orm, fields
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html_escape as escape
from openerp.addons.web.http import request
from openerp.addons.base.ir import ir_qweb
REMOTE_CONNECTION_TIMEOUT = 2.5
logger = logging.getLogger(__name__)
class QWeb(orm.AbstractModel):
""" QWeb object for rendering stuff in the website context
"""
_name = 'website.qweb'
_inherit = 'ir.qweb'
URL_ATTRS = {
'form': 'action',
'a': 'href',
}
def add_template(self, qcontext, name, node):
# preprocessing for multilang static urls
if request.website:
for tag, attr in self.URL_ATTRS.iteritems():
for e in node.iterdescendants(tag=tag):
url = e.get(attr)
if url:
e.set(attr, qcontext.get('url_for')(url))
super(QWeb, self).add_template(qcontext, name, node)
def render_att_att(self, element, attribute_name, attribute_value, qwebcontext):
URL_ATTRS = self.URL_ATTRS.get(element.tag)
is_website = request.website
for att, val in super(QWeb, self).render_att_att(element, attribute_name, attribute_value, qwebcontext):
if is_website and att == URL_ATTRS and isinstance(val, basestring):
val = qwebcontext.get('url_for')(val)
yield (att, val)
def get_converter_for(self, field_type):
return self.pool.get(
'website.qweb.field.' + field_type,
self.pool['website.qweb.field'])
class Field(orm.AbstractModel):
_name = 'website.qweb.field'
_inherit = 'ir.qweb.field'
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context, context=None):
if options is None: options = {}
column = record._model._all_columns[field_name].column
attrs = [('data-oe-translate', 1 if column.translate else 0)]
placeholder = options.get('placeholder') \
or source_element.get('placeholder') \
or getattr(column, 'placeholder', None)
if placeholder:
attrs.append(('placeholder', placeholder))
return itertools.chain(
super(Field, self).attributes(cr, uid, field_name, record, options,
source_element, g_att, t_att,
qweb_context, context=context),
attrs
)
def value_from_string(self, value):
return value
def from_html(self, cr, uid, model, column, element, context=None):
return self.value_from_string(element.text_content().strip())
def qweb_object(self):
return self.pool['website.qweb']
class Integer(orm.AbstractModel):
_name = 'website.qweb.field.integer'
_inherit = ['website.qweb.field']
value_from_string = int
class Float(orm.AbstractModel):
_name = 'website.qweb.field.float'
_inherit = ['website.qweb.field', 'ir.qweb.field.float']
def from_html(self, cr, uid, model, column, element, context=None):
lang = self.user_lang(cr, uid, context=context)
value = element.text_content().strip()
return float(value.replace(lang.thousands_sep, '')
.replace(lang.decimal_point, '.'))
def parse_fuzzy(in_format, value):
day_first = in_format.find('%d') < in_format.find('%m')
if '%y' in in_format:
year_first = in_format.find('%y') < in_format.find('%d')
else:
year_first = in_format.find('%Y') < in_format.find('%d')
return parser.parse(value, dayfirst=day_first, yearfirst=year_first)
class Date(orm.AbstractModel):
_name = 'website.qweb.field.date'
_inherit = ['website.qweb.field', 'ir.qweb.field.date']
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
attrs = super(Date, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [('data-oe-original', record[field_name])])
def from_html(self, cr, uid, model, column, element, context=None):
value = element.text_content().strip()
if not value: return False
datetime.datetime.strptime(value, DEFAULT_SERVER_DATE_FORMAT)
return value
class DateTime(orm.AbstractModel):
_name = 'website.qweb.field.datetime'
_inherit = ['website.qweb.field', 'ir.qweb.field.datetime']
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
value = record[field_name]
if isinstance(value, basestring):
value = datetime.datetime.strptime(
value, DEFAULT_SERVER_DATETIME_FORMAT)
if value:
# convert from UTC (server timezone) to user timezone
value = fields.datetime.context_timestamp(
cr, uid, timestamp=value, context=context)
value = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
attrs = super(DateTime, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [
('data-oe-original', value)
])
def from_html(self, cr, uid, model, column, element, context=None):
if context is None: context = {}
value = element.text_content().strip()
if not value: return False
# parse from string to datetime
dt = datetime.datetime.strptime(value, DEFAULT_SERVER_DATETIME_FORMAT)
# convert back from user's timezone to UTC
tz_name = context.get('tz') \
or self.pool['res.users'].read(cr, openerp.SUPERUSER_ID, uid, ['tz'], context=context)['tz']
if tz_name:
try:
user_tz = pytz.timezone(tz_name)
utc = pytz.utc
dt = user_tz.localize(dt).astimezone(utc)
except Exception:
logger.warn(
"Failed to convert the value for a field of the model"
" %s back from the user's timezone (%s) to UTC",
model, tz_name,
exc_info=True)
# format back to string
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class Text(orm.AbstractModel):
_name = 'website.qweb.field.text'
_inherit = ['website.qweb.field', 'ir.qweb.field.text']
def from_html(self, cr, uid, model, column, element, context=None):
return html_to_text(element)
class Selection(orm.AbstractModel):
_name = 'website.qweb.field.selection'
_inherit = ['website.qweb.field', 'ir.qweb.field.selection']
def from_html(self, cr, uid, model, column, element, context=None):
value = element.text_content().strip()
selection = column.reify(cr, uid, model, column, context=context)
for k, v in selection:
if isinstance(v, str):
v = ustr(v)
if value == v:
return k
raise ValueError(u"No value found for label %s in selection %s" % (
value, selection))
class ManyToOne(orm.AbstractModel):
_name = 'website.qweb.field.many2one'
_inherit = ['website.qweb.field', 'ir.qweb.field.many2one']
def from_html(self, cr, uid, model, column, element, context=None):
# FIXME: layering violations all the things
Model = self.pool[element.get('data-oe-model')]
M2O = self.pool[column._obj]
field = element.get('data-oe-field')
id = int(element.get('data-oe-id'))
# FIXME: weird things are going to happen for char-type _rec_name
value = html_to_text(element)
# if anything blows up, just ignore it and bail
try:
# get parent record
[obj] = Model.read(cr, uid, [id], [field])
# get m2o record id
(m2o_id, _) = obj[field]
# assume _rec_name and write directly to it
M2O.write(cr, uid, [m2o_id], {
M2O._rec_name: value
}, context=context)
except:
logger.exception("Could not save %r to m2o field %s of model %s",
value, field, Model._name)
# not necessary, but might as well be explicit about it
return None
class HTML(orm.AbstractModel):
_name = 'website.qweb.field.html'
_inherit = ['website.qweb.field', 'ir.qweb.field.html']
def from_html(self, cr, uid, model, column, element, context=None):
content = []
if element.text: content.append(element.text)
content.extend(html.tostring(child)
for child in element.iterchildren(tag=etree.Element))
return '\n'.join(content)
class Image(orm.AbstractModel):
"""
Widget options:
``class``
set as attribute on the generated <img> tag
"""
_name = 'website.qweb.field.image'
_inherit = ['website.qweb.field', 'ir.qweb.field.image']
def to_html(self, cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=None):
assert source_element.tag != 'img',\
"Oddly enough, the root tag of an image field can not be img. " \
"That is because the image goes into the tag, or it gets the " \
"hose again."
return super(Image, self).to_html(
cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=context)
def record_to_html(self, cr, uid, field_name, record, column, options=None, context=None):
if options is None: options = {}
aclasses = ['img', 'img-responsive'] + options.get('class', '').split()
classes = ' '.join(itertools.imap(escape, aclasses))
max_size = None
max_width, max_height = options.get('max_width', 0), options.get('max_height', 0)
if max_width or max_height:
max_size = '%sx%s' % (max_width, max_height)
src = self.pool['website'].image_url(cr, uid, record, field_name, max_size)
img = '<img class="%s" src="%s"/>' % (classes, src)
return ir_qweb.HTMLSafe(img)
local_url_re = re.compile(r'^/(?P<module>[^]]+)/static/(?P<rest>.+)$')
def from_html(self, cr, uid, model, column, element, context=None):
url = element.find('img').get('src')
url_object = urlparse.urlsplit(url)
if url_object.path.startswith('/website/image'):
# url might be /website/image/<model>/<id>[_<checksum>]/<field>[/<width>x<height>]
fragments = url_object.path.split('/')
query = dict(urlparse.parse_qsl(url_object.query))
model = query.get('model', fragments[3])
oid = query.get('id', fragments[4].split('_')[0])
field = query.get('field', fragments[5])
item = self.pool[model].browse(cr, uid, int(oid), context=context)
return item[field]
if self.local_url_re.match(url_object.path):
return self.load_local_url(url)
return self.load_remote_url(url)
def load_local_url(self, url):
match = self.local_url_re.match(urlparse.urlsplit(url).path)
rest = match.group('rest')
for sep in os.sep, os.altsep:
if sep and sep != '/':
rest.replace(sep, '/')
path = openerp.modules.get_module_resource(
match.group('module'), 'static', *(rest.split('/')))
if not path:
return None
try:
with open(path, 'rb') as f:
# force complete image load to ensure it's valid image data
image = I.open(f)
image.load()
f.seek(0)
return f.read().encode('base64')
except Exception:
logger.exception("Failed to load local image %r", url)
return None
def load_remote_url(self, url):
try:
# should probably remove remote URLs entirely:
# * in fields, downloading them without blowing up the server is a
# challenge
# * in views, may trigger mixed content warnings if HTTPS CMS
# linking to HTTP images
# implement drag & drop image upload to mitigate?
req = urllib2.urlopen(url, timeout=REMOTE_CONNECTION_TIMEOUT)
# PIL needs a seekable file-like image, urllib result is not seekable
image = I.open(cStringIO.StringIO(req.read()))
# force a complete load of the image data to validate it
image.load()
except Exception:
logger.exception("Failed to load remote image %r", url)
return None
# don't use original data in case weird stuff was smuggled in, with
# luck PIL will remove some of it?
out = cStringIO.StringIO()
image.save(out, image.format)
return out.getvalue().encode('base64')
class Monetary(orm.AbstractModel):
_name = 'website.qweb.field.monetary'
_inherit = ['website.qweb.field', 'ir.qweb.field.monetary']
def from_html(self, cr, uid, model, column, element, context=None):
lang = self.user_lang(cr, uid, context=context)
value = element.find('span').text.strip()
return float(value.replace(lang.thousands_sep, '')
.replace(lang.decimal_point, '.'))
class Duration(orm.AbstractModel):
_name = 'website.qweb.field.duration'
_inherit = [
'ir.qweb.field.duration',
'website.qweb.field.float',
]
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
attrs = super(Duration, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [('data-oe-original', record[field_name])])
def from_html(self, cr, uid, model, column, element, context=None):
value = element.text_content().strip()
# non-localized value
return float(value)
class RelativeDatetime(orm.AbstractModel):
_name = 'website.qweb.field.relative'
_inherit = [
'ir.qweb.field.relative',
'website.qweb.field.datetime',
]
# get formatting from ir.qweb.field.relative but edition/save from datetime
class Contact(orm.AbstractModel):
_name = 'website.qweb.field.contact'
_inherit = ['ir.qweb.field.contact', 'website.qweb.field.many2one']
class QwebView(orm.AbstractModel):
_name = 'website.qweb.field.qweb'
_inherit = ['ir.qweb.field.qweb']
def html_to_text(element):
""" Converts HTML content with HTML-specified line breaks (br, p, div, ...)
in roughly equivalent textual content.
Used to replace and fixup the roundtripping of text and m2o: when using
libxml 2.8.0 (but not 2.9.1) and parsing HTML with lxml.html.fromstring
whitespace text nodes (text nodes composed *solely* of whitespace) are
stripped out with no recourse, and fundamentally relying on newlines
being in the text (e.g. inserted during user edition) is probably poor form
anyway.
-> this utility function collapses whitespace sequences and replaces
nodes by roughly corresponding linebreaks
* p are pre-and post-fixed by 2 newlines
* br are replaced by a single newline
* block-level elements not already mentioned are pre- and post-fixed by
a single newline
ought be somewhat similar (but much less high-tech) to aaronsw's html2text.
the latter produces full-blown markdown, our text -> html converter only
replaces newlines by <br> elements at this point so we're reverting that,
and a few more newline-ish elements in case the user tried to add
newlines/paragraphs into the text field
:param element: lxml.html content
:returns: corresponding pure-text output
"""
# output is a list of str | int. Integers are padding requests (in minimum
# number of newlines). When multiple padding requests, fold them into the
# biggest one
output = []
_wrap(element, output)
# remove any leading or tailing whitespace, replace sequences of
# (whitespace)\n(whitespace) by a single newline, where (whitespace) is a
# non-newline whitespace in this case
return re.sub(
r'[ \t\r\f]*\n[ \t\r\f]*',
'\n',
''.join(_realize_padding(output)).strip())
_PADDED_BLOCK = set('p h1 h2 h3 h4 h5 h6'.split())
# https://developer.mozilla.org/en-US/docs/HTML/Block-level_elements minus p
_MISC_BLOCK = set((
'address article aside audio blockquote canvas dd dl div figcaption figure'
' footer form header hgroup hr ol output pre section tfoot ul video'
).split())
def _collapse_whitespace(text):
""" Collapses sequences of whitespace characters in ``text`` to a single
space
"""
return re.sub('\s+', ' ', text)
def _realize_padding(it):
""" Fold and convert padding requests: integers in the output sequence are
requests for at least n newlines of padding. Runs thereof can be collapsed
into the largest requests and converted to newlines.
"""
padding = None
for item in it:
if isinstance(item, int):
padding = max(padding, item)
continue
if padding:
yield '\n' * padding
padding = None
yield item
# leftover padding irrelevant as the output will be stripped
def _wrap(element, output, wrapper=u''):
""" Recursively extracts text from ``element`` (via _element_to_text), and
wraps it all in ``wrapper``. Extracted text is added to ``output``
:type wrapper: basestring | int
"""
output.append(wrapper)
if element.text:
output.append(_collapse_whitespace(element.text))
for child in element:
_element_to_text(child, output)
output.append(wrapper)
def _element_to_text(e, output):
if e.tag == 'br':
output.append(u'\n')
elif e.tag in _PADDED_BLOCK:
_wrap(e, output, 2)
elif e.tag in _MISC_BLOCK:
_wrap(e, output, 1)
else:
# inline
_wrap(e, output)
if e.tail:
output.append(_collapse_whitespace(e.tail))
| agpl-3.0 |
chiragjogi/odoo | addons/hr_timesheet_sheet/__init__.py | 434 | 1127 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sheet
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mkollaro/destroystack | destroystack/tools/server_manager.py | 1 | 7151 | # Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import destroystack.tools.state_restoration.metaopenstack as metaopenstack
import destroystack.tools.state_restoration.vagrant as vagrant
import destroystack.tools.state_restoration.manual as manual_restoration
import destroystack.tools.common as common
import destroystack.tools.servers as server_tools
# Possible roles that a server can have, depending what services are installed
# on it. It can have more than one role.
ROLES = set(['keystone', 'swift_proxy', 'swift_data', 'controller', 'compute',
'glance', 'cinder', 'neutron'])
MANAGEMENT_TYPES = ['none', 'manual', 'metaopenstack']
LOG = logging.getLogger(__name__)
class Singleton(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instance
class ServerManager(Singleton):
def __init__(self):
self._servers = server_tools.create_servers(common.CONFIG['servers'])
self._workaround_single_swift_disk()
def servers(self, role=None, roles=None):
"""Generator that gets a server by its parameters.
If no parameters are given, it will just return any of them.
:param role: get a server that has this role, choose from `ROLES`
:param roles: get a server that has all of these roles, see param
`role`
"""
if role:
assert role in ROLES
assert not roles # cannot use both
if roles:
roles = set(roles)
assert roles.issubset(ROLES)
for server in self._servers:
if not role and not roles:
# no conditions, return any
yield server
elif role in server.roles \
or (roles and roles.issubset(server.roles)):
yield server
def get(self, role=None, roles=None):
"""Get the first server that matches the parameters.
For more info, look at the `ServerManager.servers() generator - it uses
the same parameters.
:returns: the server in question or None
"""
try:
return self.servers(role, roles).next()
except StopIteration:
return None
def get_all(self, role=None, roles=None):
"""Same as `get`, but returns a list of all the matching servers."""
return list(self.servers(role, roles))
def save_state(self, tag=''):
"""Create a snapshot of all the servers
Depending on what is in the configuration in "management.type":
* manual - Just create some backup of the files and maybe
databases. Unsupported and not recommended.
* none - Do nothing
* metaopenstack - Create a snapshot of all the servers
If it's being created, the name of the snapshots (if created) will be
"config.management.snapshot_prefix" + name of the VM + tag, where the
prefix is "destroystack-snapshot" by default. The VMs have to have
unique names (at least among each other) and snapshots/images with that
name cannot already exist.
:param tag: will be appended to the name of the snapshots
"""
self._choose_state_restoration_action('save', tag)
def load_state(self, tag=''):
"""Restore all the servers from their snapshots.
For more information, see the function ``save``.
Depending on what is in the configuration in "management.type":
* manual - Restore backups, mount disks that got umounted, start up
services again. Unsupported, might not work - it's just a best
effort.
* none - Do nothing
* metaopenstack - Rebuild the VMs with the snapshot images, which
are going to be found by the name as described in the `save`
function.
"""
self._choose_state_restoration_action('load', tag)
self.connect()
# workaround for the fact that the extra disk might not get snapshotted
self._restore_swift_disks()
def connect(self):
"""Create ssh connections to all the servers.
Will re-create them if called a second time.
"""
for server in self._servers:
server.connect()
def disconnect(self):
for server in self._servers:
server.disconnect()
def _choose_state_restoration_action(self, action, tag):
"""Choose which function to use, based on "management.type" in config.
:param action: save or load
"""
assert action in ['save', 'load']
man_type = common.CONFIG['management']['type']
if man_type == 'metaopenstack':
if action == 'save':
metaopenstack.create_snapshots(tag)
else:
metaopenstack.restore_snapshots(tag)
elif man_type == 'vagrant':
if action == 'save':
vagrant.create_snapshots(tag)
else:
vagrant.restore_snapshots(tag)
elif man_type == 'manual':
if action == 'save':
manual_restoration.create_backup(self)
else:
manual_restoration.restore_backup(self)
elif man_type == 'none':
LOG.info("State save and restoration has been turned off")
else:
raise Exception("This type of server management, '%s', is not"
"supported, choose among: %s"
% (man_type, MANAGEMENT_TYPES))
def _restore_swift_disks(self):
"""These disks might not have been snapshotted.
Since the extra disk is currently maybe not being snapshotted (it is
just some ephemeral storage or cinder volume), format them and restore
their flags.
Additionally, if the user provided only one disk, we create 3
partitions on it and use them as "disks" to simplify things for the
user.
"""
data_servers = list(self.servers(role='swift_data'))
server_tools.prepare_swift_disks(data_servers)
for server in data_servers:
for disk in server.disks:
server.restore_disk(disk)
def _workaround_single_swift_disk(self):
for server in list(self.servers(role='swift_data')):
if len(server.disks) == 1:
disk = server.disks[0]
server.disks = [disk + "1", disk + "2", disk + "3"]
| apache-2.0 |
Som-Energia/invoice-janitor | invoicing/f1fixing/import_error/models.py | 1 | 24323 | # -*- coding: utf-8 -*-
from lxml import etree, objectify
import base64
import re
import os
from datetime import datetime
import dateutil.parser
import xmlformatter
## Codis OCSUM - F1
# Codi periode
codigoPeriodo_to_P = {
1:'P1', 03:'P2',10:'P1',21:'P1',22:'P2',31:'P1',32:'P2',33:'P3',41:'P1',
42:'P2',43:'P3',51:'P1',52:'P2',53:'P3',61:'P1',62:'P2',63:'P3',64:'P4',
65:'P5',66:'P6',71:'P1',72:'P2',73:'P3',74:'P4',75:'P5',76:'P6',77:'P7'
}
# Codi origen de lectura
codigoOrigen_to_O = {
'10': 'Telemedida',
'11': 'Telemedida corregida',
'20': 'TPL',
'21': 'TPL corregida',
'30': 'Visual',
'31': 'Visual corregida',
'40': 'Estimada',
'50': 'Autolectura',
'99': 'Sin Lectura'
}
O_to_codigoOrigen =\
{
'Telemedida':1,
'Telemedida corregida':2,
'TPL':3,
'TPL corregida':4,
'Visual':5,
'Visual corregida':6,
'Estimada':7,
'Autolectura':8,
'Sin Lectura':9,
'Sense Lectura':9
}
class OpenObject(object):
O = None
def __init__(self, O):
self.O = O
class F1(object):
root = None
raw = None
def __init__(self, xml=None, filename=None):
if not xml and not filename:
raise
if filename:
with open(filename) as f:
xml = f.read()
self.root = objectify.fromstring(xml)
@property
def raw(self):
objectify.deannotate(self.root, xsi_nil=True)
etree.cleanup_namespaces(self.root)
return etree.tostring(self.root,
encoding="ISO-8859-1",
xml_declaration=True)
def dump(self, filename):
formatter = xmlformatter.Formatter(indent="1",
indent_char="\t",
encoding_output="ISO-8859-1",
preserve=["literal"])
raw = formatter.format_string(self.raw)
with open(filename, "w") as f:
f.write(raw)
def update_xml_value(self, comptador, data, periode, tipus, attribute, value):
if attribute not in ['FechaHora', 'Procedencia', 'Lectura']:
raise Exception('Attribute not supported')
root = self.root
if not hasattr(root, 'Facturas'):
raise Exception('F1 format failed')
Facturas = root.Facturas
if not hasattr(Facturas, 'FacturaATR'):
raise Exception('F1 format failed')
FacturaATR = Facturas.FacturaATR
if not hasattr(FacturaATR, '__iter__'):
FacturaATR = [FacturaATR]
for FacturaATR_ in FacturaATR:
if not hasattr(FacturaATR_, 'Medidas'):
raise Exception('F1 format failed')
Medidas = FacturaATR_.Medidas
if not hasattr(Medidas, '__iter__'):
Medidas = [Medidas]
for Medidas_ in Medidas:
if not hasattr(Medidas_, 'Aparato'):
raise Exception('F1 format failed')
Aparato = Medidas_.Aparato
if not hasattr(Aparato, '__iter__'):
Aparato = [Aparato]
for Aparato_ in Aparato:
if not hasattr(Aparato_, 'NumeroSerie'):
raise Exception('F1 format failed')
try:
if not ((int(Aparato_.NumeroSerie) == int(comptador)) or
(int(Aparato_.NumeroSerie) == int(comptador))):
continue
except Exception, e:
continue
if not hasattr(Aparato_,'Integrador'):
raise Exception('F1 format failed')
Integrador = Aparato_.Integrador
if not hasattr(Integrador, '__iter__'):
Integrador = [Integrador]
for Integrador_ in Integrador:
if not hasattr(Integrador_,'Magnitud'):
raise Exception('F1 format failed')
if (tipus == 'A') and not (str(Integrador_.Magnitud) == 'AE'):
continue
if (tipus == 'R') and not (str(Integrador_.Magnitud).startswith('R')):
continue
if not Integrador_.CodigoPeriodo:
continue
if codigoPeriodo_to_P[Integrador_.CodigoPeriodo] == periode:
if not hasattr(Integrador_, 'LecturaDesde'):
raise Exception('F1 format failed')
if not hasattr(Integrador_, 'LecturaHasta'):
raise Exception('F1 format failed')
if dateutil.parser.parse(str(Integrador_.LecturaDesde.FechaHora)) == dateutil.parser.parse(data):
setattr(Integrador_.LecturaDesde, attribute, value)
elif dateutil.parser.parse(str(Integrador_.LecturaHasta.FechaHora)) == dateutil.parser.parse(data):
setattr(Integrador_.LecturaHasta, attribute, value)
def get_xml_value(self, comptador, data, periode, tipus, attribute):
if attribute not in ['FechaHora', 'Procedencia', 'Lectura']:
raise Exception('Attribute not supported')
root = self.root
if not hasattr(root, 'Facturas'):
raise Exception('F1 format failed')
Facturas = root.Facturas
if not hasattr(Facturas, 'FacturaATR'):
raise Exception('F1 format failed')
FacturaATR = Facturas.FacturaATR
if not hasattr(FacturaATR, '__iter__'):
FacturaATR = [FacturaATR]
for FacturaATR_ in FacturaATR:
if not hasattr(FacturaATR_, 'Medidas'):
raise Exception('F1 format failed')
Medidas = FacturaATR_.Medidas
if not hasattr(Medidas, '__iter__'):
Medidas = [Medidas]
for Medidas_ in Medidas:
if not hasattr(Medidas_, 'Aparato'):
raise Exception('F1 format failed')
Aparato = Medidas_.Aparato
if not hasattr(Aparato, '__iter__'):
Aparato = [Aparato]
for Aparato_ in Aparato:
if not hasattr(Aparato_, 'NumeroSerie'):
raise Exception('F1 format failed')
try:
if comptador.isdigit():
if not int(Aparato_.NumeroSerie) == int(comptador):
continue
else:
if not Aparato_.NumeroSerie == comptador:
continue
except Exception, e:
continue
if not hasattr(Aparato_,'Integrador'):
raise Exception('F1 format failed')
Integrador = Aparato_.Integrador
if not hasattr(Integrador, '__iter__'):
Integrador = [Integrador]
for Integrador_ in Integrador:
if not hasattr(Integrador_,'Magnitud'):
raise Exception('F1 format failed')
if (tipus == 'A') and not (str(Integrador_.Magnitud) == 'AE'):
continue
if (tipus == 'R') and not (str(Integrador_.Magnitud).startswith('R')):
continue
if not Integrador_.CodigoPeriodo:
continue
if codigoPeriodo_to_P[Integrador_.CodigoPeriodo] == periode:
if not hasattr(Integrador_, 'LecturaDesde'):
raise Exception('F1 format failed')
if not hasattr(Integrador_, 'LecturaHasta'):
raise Exception('F1 format failed')
if dateutil.parser.parse(str(Integrador_.LecturaDesde.FechaHora)) == dateutil.parser.parse(data):
return getattr(Integrador_.LecturaDesde, attribute)
elif dateutil.parser.parse(str(Integrador_.LecturaHasta.FechaHora)) == dateutil.parser.parse(data):
return getattr(Integrador_.LecturaDesde, attribute)
raise Exception('F1 error')
def is_abonadora(self):
Facturas = self.root.Facturas
if not hasattr(Facturas, 'FacturaATR'):
raise Exception('F1 format failed')
FacturaATR = Facturas.FacturaATR
if not hasattr(FacturaATR, '__iter__'):
FacturaATR = [FacturaATR]
return FacturaATR.DatosGeneralesFacturaATR.DatosGeneralesFactura.IndicativoFacturaRectificadora in ['A', 'B']
def is_rectificadora(self):
Facturas = self.root.Facturas
if not hasattr(Facturas, 'FacturaATR'):
raise Exception('F1 format failed')
FacturaATR = Facturas.FacturaATR
if not hasattr(FacturaATR, '__iter__'):
FacturaATR = [FacturaATR]
return FacturaATR.DatosGeneralesFacturaATR.DatosGeneralesFactura.IndicativoFacturaRectificadora == 'R'
class LectPool(OpenObject):
def __init__(self,O):
super(LectPool, self).__init__(O)
class Comptador(OpenObject):
id = None
def __init__(self, O, id):
super(Comptador,self).__init__(O)
self.id = id
class Polissa(OpenObject):
id = None
def __init__(self, O, id):
super(Polissa,self).__init__(O)
self.id = id
fields_to_read = ['name', 'cups', 'tarifa', 'state', 'comptador', 'distribuidora', 'data_alta', 'data_baixa']
data = self.O.GiscedataPolissa.read(self.id, fields_to_read)[0]
self.name = data['name']
self.tarifa = data['tarifa'][1]
self.state = data['state']
self.comptador = Comptador(self.O, data['comptador'])
self.distribuidora = data['distribuidora']
self.data_alta = data['data_alta']
self.data_baixa = data['data_baixa']
def daily_consumption(self):
return self.O.GiscedataPolissa.consum_diari(self.id)
def monthly_consumption(self, period):
return self.daily_consumption()[period]*30
class LectBase(object):
id = None
data = None
tarifa = None
periode_id = None
periode = None
lectura = None
origen_comer = None
origen = None
tipus = None
observacions = None
obj = None
def __init__(self, obj, id):
self.obj = obj
self.id = id
fields_to_read = ['name', 'lectura', 'origen_comer_id', 'origen_id', 'periode', 'tipus', 'observacions']
lect_read = self.obj.read(self.id, fields_to_read)
lect_perm_read = self.obj.perm_read([self.id])[0]
(tarifa,periode) = lect_read['periode'][1].split(' ')
periode_id = lect_read['periode'][0]
periode = periode[1:3]
self.write_date = lect_perm_read['write_date']
self.date = lect_read['name']
self.tarifa = tarifa
self.periode_id = periode_id
self.periode = periode
self.lectura = lect_read['lectura']
self.origen_comer = lect_read['origen_comer_id'][1]
self.origen = lect_read['origen_id'][1]
self.tipus = lect_read['tipus']
self.observacions = lect_read['observacions']
def update_lectura(self, old, new, origen, update_observacions, observacions='', observacions_date='-'):
write_values = {'lectura': int(new), 'origen_id': int(origen)}
if update_observacions:
obs = self.observacions
txt = 'R. {observacions} {old} [{observacions_date}] (ABr)\n'.format(**locals())
if not obs:
obs = ''
obs = txt + obs
write_values.update({'observacions':obs})
self.obj.write([self.id], write_values)
def update_observacions(self, value=None):
if value:
obs = self.observacions
today = datetime.strftime(datetime.today(),'%Y-%m-%d')
txt = 'R. {value} [{today}] (ABr)\n'.format(**locals())
if not obs:
obs = ''
obs = txt + ' ' + obs
self.obj.write([self.id], {'observacions': obs})
class LectPool(LectBase):
def __init__(self, O, id):
super(LectPool, self).__init__(O.GiscedataLecturesLecturaPool, id)
class Lect(LectBase):
def __init__(self, O, id):
super(Lect, self).__init__(O.GiscedataLecturesLectura, id)
class Error(OpenObject):
raw = None
factura = None
comptador = None
data = None
periode = None
tipus = None
valor_xml = None
valor_db = None
lects_pool = {}
last_lects_pool = {}
last_lects_invoice = {}
def __init__(self, O, polissa_id, raw):
super(Error, self).__init__(O)
self.parse(raw)
# LectPool
fields_to_search = [('polissa', '=', polissa_id), ('name', '=', self.comptador)]
comptador_ids = O.GiscedataLecturesComptador.search(fields_to_search, 0, 0, False, {'active_test': False})
if len(comptador_ids) == 0:
raise Exception('Comptador missing')
comptador_id = comptador_ids[0]
fields_to_search = [('comptador', '=', comptador_id), ('name', '=', self.data)]
lect_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search)
if not len(lect_pool_ids) > 0:
raise Exception('Lectpool missing')
for lect_pool_id in lect_pool_ids:
lect_pool = LectPool(self.O, lect_pool_id)
self.lects_pool[lect_pool.periode] = lect_pool
fields_to_search = [('comptador', '=', comptador_id),
('origen_id', 'in',
[O_to_codigoOrigen['Telemedida'],
O_to_codigoOrigen['Telemedida corregida'],
O_to_codigoOrigen['TPL'],
O_to_codigoOrigen['TPL corregida'],
O_to_codigoOrigen['Visual'],
O_to_codigoOrigen['Visual corregida']])]
last_lects_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search)
if not len(last_lects_pool_ids) > 0:
raise Exception('Lectpool missing')
last_lects_pool_id = last_lects_pool_ids[0]
fields_to_read = ['name']
last_lects_pool_date = O.GiscedataLecturesLecturaPool.read(last_lects_pool_id, fields_to_read)['name']
fields_to_search = [('comptador', '=', comptador_id),
('name', '=', last_lects_pool_date)]
last_lects_pool_ids = O.GiscedataLecturesLecturaPool.search(fields_to_search)
if not len(last_lects_pool_ids) > 0:
raise Exception('Lectpool missing')
for last_lects_pool_id in last_lects_pool_ids:
last_lects_pool = LectPool(self.O, last_lects_pool_id)
self.last_lects_pool[last_lects_pool.periode] = last_lects_pool
fields_to_search = [('comptador', '=', comptador_id)]
last_lects_invoice_id = O.GiscedataLecturesLectura.search(fields_to_search)[0]
fields_to_read = ['name']
last_lects_invoice_date = O.GiscedataLecturesLectura.read(last_lects_invoice_id, fields_to_read)['name']
fields_to_search = [('comptador', '=', comptador_id),
('name', '=', last_lects_invoice_date)]
last_lects_invoice_ids = O.GiscedataLecturesLectura.search(fields_to_search)
if not len(last_lects_invoice_ids) > 0:
raise Exception('Lect invoice missing')
last_lects_invoice_id = last_lects_invoice_ids[0]
if not len(last_lects_invoice_ids) > 0:
raise Exception('Lect missing')
for last_lects_invoice_id in last_lects_invoice_ids:
last_lects_invoice = Lect(self.O, last_lects_invoice_id)
self.last_lects_invoice[last_lects_invoice.periode] = last_lects_invoice
@property
def FechaHora(self):
return self.data
@property
def Lectura(self):
return self.valor_db
def parse(self,raw):
self.raw = raw
try:
# Format descripció divergència (GISCEMaster/giscedata_lectures_switching/giscedata_lectures.py
# _msg = _(u"Divergència en el valor de lectura existent."
# u" Comptador: %s Data: %s. Període: %s. Tipus: %s"
# u" valor: XML: %s BBDD:%s") \
# % (c_obj.name,
# valor, lect_bw.lectura)
m = re.match(u'Factura (.+): Divergència en el valor de lectura existent. Comptador: (\w+).*Data: ([0-9\-]+).+Període: (\w+)\. Tipus: (\w+) valor: XML: (\d*[.]?\d*).+BBDD:(\d*[.]?\d*)',raw)
if not m:
raise ('Error not matching')
if not len(m.groups()) == 7:
raise ('Error not matching')
self.factura = m.groups()[0]
self.comptador = m.groups()[1]
self.data = m.groups()[2]
self.periode = m.groups()[3]
self.tipus = m.groups()[4]
self.valor_xml = float(m.groups()[5])
self.valor_db = float(m.groups()[6])
except Exception, e:
raise e
class F1ImportError(OpenObject):
id = None
def __init__(self, O, id):
super(F1ImportError, self).__init__(O)
self.id = id
fields_to_read = ['name', 'cups_id', 'info']
data = O.GiscedataFacturacioImportacioLinia.read(self.id, fields_to_read)
self.name = data['name']
self.cups_id = data['cups_id'][0]
perm_data = O.GiscedataFacturacioImportacioLinia.perm_read([self.id])[0]
self.write_date = perm_data['write_date']
self.create_date = perm_data['create_date']
polissa_id = self.O.GiscedataPolissa.search([('cups', '=', self.cups_id)], 0, 0, False, {'active_test': False})
if not polissa_id:
raise('No contract information available')
self.polissa = Polissa(self.O, polissa_id)
# error
self.error = Error(self.O, polissa_id, data['info'])
# F1
attach_id = self.O.IrAttachment.search([
('res_model', '=', 'giscedata.facturacio.importacio.linia'), ('res_id', '=', self.id)])[0]
if not attach_id:
raise ValueError('Resource id not found')
xml_ = O.IrAttachment.read(attach_id, ['name', 'datas'])
xml = base64.b64decode(xml_["datas"])
self.F1 = F1(xml)
self.request_date = dateutil.parser.parse(str(self.F1.root.Cabecera.FechaSolicitud))
def reload(self, update=False):
if update:
(filename_,extension_) = os.path.splitext(self.name)
self.name = filename_ + '_A' + extension_
filename = os.path.join('/tmp', self.name)
self.F1.dump(filename)
with open(filename, 'rb') as file_:
encoded_string = base64.b64encode(file_.read())
ctx = {'active_id': self.id,
'fitxer_xml': True}
wizard_id = self.O.GiscedataFacturacioSwitchingWizard.create({}, ctx)
wizard = self.O.GiscedataFacturacioSwitchingWizard.get(wizard_id)
vals = {
'origen':'nou',
'filename': self.name,
'file':encoded_string
}
wizard.write(vals)
wizard.action_importar_f1(ctx)
else:
ctx = {'active_id': self.id, 'fitxer_xml': True}
wizard_id = self.O.GiscedataFacturacioSwitchingWizard.create({}, ctx)
wizard = self.O.GiscedataFacturacioSwitchingWizard.get(wizard_id)
wizard.action_importar_f1(ctx)
def update_xml_attribute(self, attribute):
if not hasattr(self.error, attribute):
raise Exception('Attribute %s not supported' % attribute)
self.F1.update_xml_value(self.error.comptador,
self.error.data,
self.error.periode,
self.error.tipus,
attribute,
getattr(self.error, attribute))
def get_xml_attribute(self, attribute):
return self.F1.get_xml_value(self.error.comptador,
self.error.data,
self.error.periode,
self.error.tipus,
attribute)
def dump(self, fmt='txt'):
vars = []
vars.append(('Error_id', self.id))
vars.append(('Polissa', self.polissa.name))
vars.append(('Tarifa', self.polissa.tarifa))
vars.append(('Distribuidora', self.polissa.distribuidora))
vars.append(('Data', self.error.data))
vars.append(('Periode', self.error.periode))
vars.append(('Tipus', self.error.tipus))
if self.F1.is_abonadora():
vars.append(('IndicativoFactura', 'Abonadora'))
elif self.F1.is_rectificadora():
vars.append(('IndicativoFactura', 'Rectificadora'))
else:
vars.append(('IndicativoFactura', 'Normal'))
procedencia = str(self.get_xml_attribute('Procedencia'))
vars.append(('Valor_XML', '%0.2f (%s)' % (self.error.valor_xml, codigoOrigen_to_O[procedencia])))
vars.append(('Valor_DB', '%0.2f' % self.error.valor_db))
vars.append(('Data DB', self.error.lects_pool[self.error.periode].write_date))
fields_to_search = [('comptador.polissa', '=', self.polissa.id[0])]
lect_pool_ids = self.O.GiscedataLecturesLecturaPool.search(fields_to_search)
lect_ids = self.O.GiscedataLecturesLectura.search(fields_to_search)
fields_to_read = ['name', 'periode', 'lectura', 'origen_id', 'observacions']
lect_pools = self.O.GiscedataLecturesLecturaPool.read(lect_pool_ids, fields_to_read)
lects = self.O.GiscedataLecturesLectura.read(lect_ids, fields_to_read)
lect_n = max(len(lects), len(lect_pools))
from tabulate import tabulate
table = []
for lect_idx in range(lect_n):
row = []
if lect_idx < len(lects):
observacions_ = ''
if lects[lect_idx]['observacions']:
observacions = lects[lect_idx]['observacions'].split('\n')
for o in observacions:
if o.startswith(u'From') or \
o.startswith(u'Lectura') or \
o.startswith(u'Tenim') or \
o.startswith(u'Data') or \
o.startswith(u'Limitació') or \
o.startswith(u'Consum'):
continue
observacions_ += o
row += [lects[lect_idx]['name'],
lects[lect_idx]['periode'][1],
lects[lect_idx]['lectura'],
lects[lect_idx]['origen_id'][1],
observacions_]
else:
row += [None, None, None, None, None]
if lect_idx < len(lect_pools):
row += [lect_pools[lect_idx]['name'],
lect_pools[lect_idx]['periode'][1],
lect_pools[lect_idx]['lectura'],
lect_pools[lect_idx]['origen_id'][1],
lect_pools[lect_idx]['observacions']]
else:
row += [None, None, None, None, None]
table.append(row)
for var in vars:
(var_name, var_value) = var
txt = '{var_name}:{var_value}'.format(**locals())
txt = txt.rstrip()
print txt
print tabulate(table, tablefmt=fmt) | agpl-3.0 |
zstackorg/zstack-woodpecker | integrationtest/vm/virt_plus/qos/test_del_data_vol_rw_qos.py | 1 | 4158 | '''
This case can not execute parallelly
@author: Legion
'''
import os
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
volume_offering_uuid = None
def test():
global volume_offering_uuid
test_util.test_dsc('Test VM disk bandwidth QoS by 20MB')
#unit is KB
read_bandwidth = 10*1024*1024
write_bandwidth = 5*1024*1024
new_volume_offering = test_lib.lib_create_disk_offering(read_bandwidth=read_bandwidth, write_bandwidth=write_bandwidth)
volume_offering_uuid = new_volume_offering.uuid
vm = test_stub.create_vm(vm_name = 'vm_volume_qos', disk_offering_uuids = [volume_offering_uuid])
vm.check()
test_obj_dict.add_vm(vm)
vm_inv = vm.get_vm()
cond = res_ops.gen_query_conditions("vmInstanceUuid", '=', vm_inv.uuid)
cond = res_ops.gen_query_conditions("type", '=', 'Data', cond)
volume_uuid = res_ops.query_resource(res_ops.VOLUME, cond)[0].uuid
test_lib.lib_mkfs_for_volume(volume_uuid, vm_inv)
path = '/mnt'
user_name = 'root'
user_password = 'password'
os.system("sshpass -p '%s' ssh %s@%s 'mount /dev/vdb1 %s'"%(user_password, user_name, vm_inv.vmNics[0].ip, path))
test_stub.make_ssh_no_password(vm_inv)
test_stub.install_fio(vm_inv)
vm_ops.set_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid, read_bandwidth*2)
if vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidth != read_bandwidth*2:
test_util.test_fail('Retrieved disk qos not match')
if vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidthRead == read_bandwidth:
test_util.test_fail('read qos must be cleared after set total qos')
if vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidthWrite == write_bandwidth:
test_util.test_fail('write qos must be cleared after set total qos')
# test_stub.test_fio_bandwidth(vm_inv, read_bandwidth, '/dev/vdb')
# check read bw
vm_ops.set_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid, read_bandwidth*2, 'read')
if vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidthRead != read_bandwidth*2:
test_util.test_fail('Retrieved disk qos not match')
test_stub.test_fio_bandwidth(vm_inv, read_bandwidth*2, '/dev/vdb')
vm_ops.del_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid, mode='all')
if test_stub.test_fio_bandwidth(vm_inv, read_bandwidth, '/dev/vdb', raise_exception=False):
test_util.test_fail('disk qos is not expected to have limit after qos setting is deleted')
# check write bw
vm_ops.set_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid, write_bandwidth*2, 'write')
if vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidthWrite != write_bandwidth*2:
test_util.test_fail('Retrieved disk qos not match')
test_stub.test_fio_bandwidth(vm_inv, write_bandwidth*2, path)
vm_ops.del_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid, mode='all')
if test_stub.test_fio_bandwidth(vm_inv, write_bandwidth, path, raise_exception=False):
test_util.test_fail('disk qos is not expected to have limit after qos setting is deleted')
vol_ops.delete_disk_offering(volume_offering_uuid)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('VM data volume QoS Test Pass')
#Will be called only if exception happens in test().
def error_cleanup():
global volume_offering_uuid
test_lib.lib_error_cleanup(test_obj_dict)
try:
vol_ops.delete_disk_offering(volume_offering_uuid)
except:
pass
| apache-2.0 |
noba3/KoTos | addons/script.module.myconnpy/lib/mysql/connector/conversion.py | 13 | 15618 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Converting MySQL and Python types
"""
import struct
import datetime
import time
from decimal import Decimal
from mysql.connector.constants import FieldType, FieldFlag, CharacterSet
class HexLiteral(str):
"""Class holding MySQL hex literals"""
def __new__(cls, str_, charset='utf8'):
hexed = ["{0:x}".format(ord(i)) for i in str_.encode(charset)]
obj = str.__new__(cls, ''.join(hexed))
obj.charset = charset
obj.original = str_
return obj
def __str__(self):
return '0x' + self
class MySQLConverterBase(object):
"""Base class for conversion classes
All class dealing with converting to and from MySQL data types must
be a subclass of this class.
"""
def __init__(self, charset='utf8', use_unicode=True):
self.python_types = None
self.mysql_types = None
self.charset = None
self.charset_id = 0
self.use_unicode = None
self.set_charset(charset)
self.set_unicode(use_unicode)
def set_charset(self, charset):
"""Set character set"""
if charset == 'utf8mb4':
charset = 'utf8'
if charset is not None:
self.charset = charset
else:
# default to utf8
self.charset = 'utf8'
self.charset_id = CharacterSet.get_charset_info(self.charset)[0]
def set_unicode(self, value=True):
"""Set whether to use Unicode"""
self.use_unicode = value
def to_mysql(self, value):
"""Convert Python data type to MySQL"""
return value
def to_python(self, vtype, value):
"""Convert MySQL data type to Python"""
return value
def escape(self, buf):
"""Escape buffer for sending to MySQL"""
return buf
def quote(self, buf):
"""Quote buffer for sending to MySQL"""
return str(buf)
class MySQLConverter(MySQLConverterBase):
"""Default conversion class for MySQL Connector/Python.
o escape method: for escaping values send to MySQL
o quoting method: for quoting values send to MySQL in statements
o conversion mapping: maps Python and MySQL data types to
function for converting them.
Whenever one needs to convert values differently, a converter_class
argument can be given while instantiating a new connection like
cnx.connect(converter_class=CustomMySQLConverterClass).
"""
def __init__(self, charset=None, use_unicode=True):
MySQLConverterBase.__init__(self, charset, use_unicode)
self._cache_field_types = {}
def escape(self, value):
"""
Escapes special characters as they are expected to by when MySQL
receives them.
As found in MySQL source mysys/charset.c
Returns the value if not a string, or the escaped string.
"""
if value is None:
return value
elif isinstance(value, (int, float, long, Decimal, HexLiteral)):
return value
res = value
res = res.replace('\\', '\\\\')
res = res.replace('\n', '\\n')
res = res.replace('\r', '\\r')
res = res.replace('\047', '\134\047') # single quotes
res = res.replace('\042', '\134\042') # double quotes
res = res.replace('\032', '\134\032') # for Win32
return res
def quote(self, buf):
"""
Quote the parameters for commands. General rules:
o numbers are returns as str type (because operation expect it)
o None is returned as str('NULL')
o String are quoted with single quotes '<string>'
Returns a string.
"""
if isinstance(buf, (int, float, long, Decimal, HexLiteral)):
return str(buf)
elif isinstance(buf, type(None)):
return "NULL"
else:
# Anything else would be a string
return "'%s'" % buf
def to_mysql(self, value):
"""Convert Python data type to MySQL"""
type_name = value.__class__.__name__.lower()
return getattr(self, "_%s_to_mysql" % str(type_name))(value)
def _int_to_mysql(self, value):
"""Convert value to int"""
return int(value)
def _long_to_mysql(self, value):
"""Convert value to long"""
return long(value)
def _float_to_mysql(self, value):
"""Convert value to float"""
return float(value)
def _str_to_mysql(self, value):
"""Convert value to string"""
return str(value)
def _unicode_to_mysql(self, value):
"""
Encodes value, a Python unicode string, to whatever the
character set for this converter is set too.
"""
encoded = value.encode(self.charset)
if self.charset_id in CharacterSet.slash_charsets:
if '\x5c' in encoded:
return HexLiteral(value, self.charset)
return encoded
def _bool_to_mysql(self, value):
"""Convert value to boolean"""
if value:
return 1
else:
return 0
def _nonetype_to_mysql(self, value):
"""
This would return what None would be in MySQL, but instead we
leave it None and return it right away. The actual conversion
from None to NULL happens in the quoting functionality.
Return None.
"""
return None
def _datetime_to_mysql(self, value):
"""
Converts a datetime instance to a string suitable for MySQL.
The returned string has format: %Y-%m-%d %H:%M:%S[.%f]
If the instance isn't a datetime.datetime type, it return None.
Returns a string.
"""
if value.microsecond:
return '%d-%02d-%02d %02d:%02d:%02d.%06d' % (
value.year, value.month, value.day,
value.hour, value.minute, value.second,
value.microsecond)
return '%d-%02d-%02d %02d:%02d:%02d' % (
value.year, value.month, value.day,
value.hour, value.minute, value.second)
def _date_to_mysql(self, value):
"""
Converts a date instance to a string suitable for MySQL.
The returned string has format: %Y-%m-%d
If the instance isn't a datetime.date type, it return None.
Returns a string.
"""
return '%d-%02d-%02d' % (value.year, value.month, value.day)
def _time_to_mysql(self, value):
"""
Converts a time instance to a string suitable for MySQL.
The returned string has format: %H:%M:%S[.%f]
If the instance isn't a datetime.time type, it return None.
Returns a string or None when not valid.
"""
if value.microsecond:
return value.strftime('%H:%M:%S.%%06d') % value.microsecond
return value.strftime('%H:%M:%S')
def _struct_time_to_mysql(self, value):
"""
Converts a time.struct_time sequence to a string suitable
for MySQL.
The returned string has format: %Y-%m-%d %H:%M:%S
Returns a string or None when not valid.
"""
return time.strftime('%Y-%m-%d %H:%M:%S', value)
def _timedelta_to_mysql(self, value):
"""
Converts a timedelta instance to a string suitable for MySQL.
The returned string has format: %H:%M:%S
Returns a string.
"""
(hours, remainder) = divmod(value.seconds, 3600)
(mins, secs) = divmod(remainder, 60)
hours = hours + (value.days * 24)
if value.microseconds:
return '%02d:%02d:%02d.%06d' % (hours, mins, secs,
value.microseconds)
return '%02d:%02d:%02d' % (hours, mins, secs)
def _decimal_to_mysql(self, value):
"""
Converts a decimal.Decimal instance to a string suitable for
MySQL.
Returns a string or None when not valid.
"""
if isinstance(value, Decimal):
return str(value)
return None
def to_python(self, flddsc, value):
"""
Converts a given value coming from MySQL to a certain type in Python.
The flddsc contains additional information for the field in the
table. It's an element from MySQLCursor.description.
Returns a mixed value.
"""
if value == '\x00' and flddsc[1] != FieldType.BIT:
# Don't go further when we hit a NULL value
return None
if value is None:
return None
if not self._cache_field_types:
self._cache_field_types = {}
for name, info in FieldType.desc.items():
try:
self._cache_field_types[info[0]] = getattr(
self, '_{0}_to_python'.format(name))
except AttributeError:
# We ignore field types which has no method
pass
try:
return self._cache_field_types[flddsc[1]](value, flddsc)
except KeyError:
# If one type is not defined, we just return the value as str
return str(value)
except ValueError as err:
raise ValueError("%s (field %s)" % (err, flddsc[0]))
except TypeError as err:
raise TypeError("%s (field %s)" % (err, flddsc[0]))
except:
raise
def _FLOAT_to_python(self, value, desc=None): # pylint: disable=C0103
"""
Returns value as float type.
"""
return float(value)
_DOUBLE_to_python = _FLOAT_to_python
def _INT_to_python(self, value, desc=None): # pylint: disable=C0103
"""
Returns value as int type.
"""
return int(value)
_TINY_to_python = _INT_to_python
_SHORT_to_python = _INT_to_python
_INT24_to_python = _INT_to_python
def _LONG_to_python(self, value, desc=None): # pylint: disable=C0103
"""
Returns value as long type.
"""
return int(value)
_LONGLONG_to_python = _LONG_to_python
def _DECIMAL_to_python(self, value, desc=None): # pylint: disable=C0103
"""
Returns value as a decimal.Decimal.
"""
return Decimal(value)
_NEWDECIMAL_to_python = _DECIMAL_to_python
def _str(self, value, desc=None):
"""
Returns value as str type.
"""
return str(value)
def _BIT_to_python(self, value, dsc=None): # pylint: disable=C0103
"""Returns BIT columntype as integer"""
int_val = value
if len(int_val) < 8:
int_val = '\x00' * (8-len(int_val)) + int_val
return struct.unpack('>Q', int_val)[0]
def _DATE_to_python(self, value, dsc=None): # pylint: disable=C0103
"""
Returns DATE column type as datetime.date type.
"""
try:
parts = value.split('-')
return datetime.date(int(parts[0]), int(parts[1]), int(parts[2]))
except ValueError:
return None
_NEWDATE_to_python = _DATE_to_python
def _TIME_to_python(self, value, dsc=None): # pylint: disable=C0103
"""
Returns TIME column type as datetime.time type.
"""
time_val = None
try:
(hms, mcs) = value.split('.')
mcs = int(mcs.ljust(6, '0'))
except ValueError:
hms = value
mcs = 0
try:
(hour, mins, sec) = [int(d) for d in hms.split(':')]
time_val = datetime.timedelta(hours=hour, minutes=mins, seconds=sec,
microseconds=mcs)
except ValueError:
raise ValueError(
"Could not convert %s to python datetime.timedelta" % value)
else:
return time_val
def _DATETIME_to_python(self, value, dsc=None): # pylint: disable=C0103
"""
Returns DATETIME column type as datetime.datetime type.
"""
datetime_val = None
try:
(date_, time_) = value.split(' ')
if len(time_) > 8:
(hms, mcs) = time_.split('.')
mcs = int(mcs.ljust(6, '0'))
else:
hms = time_
mcs = 0
dtval = [int(value) for value in date_.split('-')] +\
[int(value) for value in hms.split(':')] + [mcs,]
datetime_val = datetime.datetime(*dtval)
except ValueError:
datetime_val = None
return datetime_val
_TIMESTAMP_to_python = _DATETIME_to_python
def _YEAR_to_python(self, value, desc=None): # pylint: disable=C0103
"""Returns YEAR column type as integer"""
try:
year = int(value)
except ValueError:
raise ValueError("Failed converting YEAR to int (%s)" % value)
return year
def _SET_to_python(self, value, dsc=None): # pylint: disable=C0103
"""Returns SET column typs as set
Actually, MySQL protocol sees a SET as a string type field. So this
code isn't called directly, but used by STRING_to_python() method.
Returns SET column type as a set.
"""
set_type = None
try:
set_type = set(value.split(','))
except ValueError:
raise ValueError("Could not convert SET %s to a set." % value)
return set_type
def _STRING_to_python(self, value, dsc=None): # pylint: disable=C0103
"""
Note that a SET is a string too, but using the FieldFlag we can see
whether we have to split it.
Returns string typed columns as string type.
"""
if dsc is not None:
# Check if we deal with a SET
if dsc[7] & FieldFlag.SET:
return self._SET_to_python(value, dsc)
if dsc[7] & FieldFlag.BINARY:
return value
if self.use_unicode:
try:
return unicode(value, self.charset)
except:
raise
return str(value)
_VAR_STRING_to_python = _STRING_to_python
def _BLOB_to_python(self, value, dsc=None): # pylint: disable=C0103
"""Convert BLOB data type to Python"""
if dsc is not None:
if dsc[7] & FieldFlag.BINARY:
return value
return self._STRING_to_python(value, dsc)
_LONG_BLOB_to_python = _BLOB_to_python
_MEDIUM_BLOB_to_python = _BLOB_to_python
_TINY_BLOB_to_python = _BLOB_to_python
| gpl-2.0 |
sve-odoo/odoo | addons/l10n_fr_hr_payroll/__openerp__.py | 374 | 2165 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'French Payroll',
'category': 'Localization/Payroll',
'author': 'Yannick Buron (SYNERPGY)',
'depends': ['hr_payroll', 'l10n_fr'],
'version': '1.0',
'description': """
French Payroll Rules.
=====================
- Configuration of hr_payroll for French localization
- All main contributions rules for French payslip, for 'cadre' and 'non-cadre'
- New payslip report
TODO:
-----
- Integration with holidays module for deduction and allowance
- Integration with hr_payroll_account for the automatic account_move_line
creation from the payslip
- Continue to integrate the contribution. Only the main contribution are
currently implemented
- Remake the report under webkit
- The payslip.line with appears_in_payslip = False should appears in the
payslip interface, but not in the payslip report
""",
'active': False,
'data': [
'l10n_fr_hr_payroll_view.xml',
'l10n_fr_hr_payroll_data.xml',
'views/report_l10nfrfichepaye.xml',
'l10n_fr_hr_payroll_reports.xml',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
StephenWeber/ansible | lib/ansible/plugins/action/net_template.py | 83 | 3775 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import glob
import urlparse
from ansible.module_utils._text import to_text
from ansible.plugins.action.normal import ActionModule as _ActionModule
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
try:
self._handle_template()
except (ValueError, AttributeError) as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, __backup__ key may not be in results.
self._write_backup(task_vars['inventory_hostname'], result['__backup__'])
if '__backup__' in result:
del result['__backup__']
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
def _handle_template(self):
src = self._task.args.get('src')
if not src:
raise ValueError('missing required arguments: src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlparse.urlsplit(src).scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
return
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
nkgilley/home-assistant | tests/components/hue/conftest.py | 7 | 3105 | """Test helpers for Hue."""
from collections import deque
from aiohue.groups import Groups
from aiohue.lights import Lights
from aiohue.sensors import Sensors
import pytest
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.components.hue import sensor_base as hue_sensor_base
from tests.async_mock import Mock, patch
@pytest.fixture(autouse=True)
def no_request_delay():
"""Make the request refresh delay 0 for instant tests."""
with patch("homeassistant.components.hue.light.REQUEST_REFRESH_DELAY", 0):
yield
def create_mock_bridge(hass):
"""Create a mock Hue bridge."""
bridge = Mock(
hass=hass,
available=True,
authorized=True,
allow_unreachable=False,
allow_groups=False,
api=Mock(),
reset_jobs=[],
spec=hue.HueBridge,
)
bridge.sensor_manager = hue_sensor_base.SensorManager(bridge)
bridge.mock_requests = []
# We're using a deque so we can schedule multiple responses
# and also means that `popleft()` will blow up if we get more updates
# than expected.
bridge.mock_light_responses = deque()
bridge.mock_group_responses = deque()
bridge.mock_sensor_responses = deque()
async def mock_request(method, path, **kwargs):
kwargs["method"] = method
kwargs["path"] = path
bridge.mock_requests.append(kwargs)
if path == "lights":
return bridge.mock_light_responses.popleft()
if path == "groups":
return bridge.mock_group_responses.popleft()
if path == "sensors":
return bridge.mock_sensor_responses.popleft()
return None
async def async_request_call(task):
await task()
bridge.async_request_call = async_request_call
bridge.api.config.apiversion = "9.9.9"
bridge.api.lights = Lights({}, mock_request)
bridge.api.groups = Groups({}, mock_request)
bridge.api.sensors = Sensors({}, mock_request)
return bridge
@pytest.fixture
def mock_bridge(hass):
"""Mock a Hue bridge."""
return create_mock_bridge(hass)
async def setup_bridge_for_sensors(hass, mock_bridge, hostname=None):
"""Load the Hue platform with the provided bridge for sensor-related platforms."""
if hostname is None:
hostname = "mock-host"
hass.config.components.add(hue.DOMAIN)
config_entry = config_entries.ConfigEntry(
1,
hue.DOMAIN,
"Mock Title",
{"host": hostname},
"test",
config_entries.CONN_CLASS_LOCAL_POLL,
system_options={},
)
mock_bridge.config_entry = config_entry
hass.data[hue.DOMAIN] = {config_entry.entry_id: mock_bridge}
await hass.config_entries.async_forward_entry_setup(config_entry, "binary_sensor")
await hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
# simulate a full setup by manually adding the bridge config entry
hass.config_entries._entries.append(config_entry)
# and make sure it completes before going further
await hass.async_block_till_done()
| apache-2.0 |
Kayoku/iotari | work-area/wireless-sensor/sensor.py | 1 | 2978 | import datetime
import json
from pprint import pprint
import requests
class Sensor():
"""Abstract sensor class."""
def __init__(self, uuid):
"""Initialisation."""
# find a way to get a stable name
self.uuid = uuid
def save_measure(self):
"""How to save a new measure."""
raise NotImplementedError
class APISensor(Sensor):
"""Save a sensor value using a remote API."""
HTTP_STATUS_CREATED = 201
HTTP_STATUS_SUCCESS = 200
def __init__(self, uuid, baseurl):
"""Initialize."""
super().__init__(uuid)
self.baseurl = baseurl
self.uuid = uuid
self.get_id()
def get_id(self):
"""Get the database id for the sensor.
If the sensor doesn't exist, it creates it.
"""
filters = [dict(name='location', op='equals', val=self.uuid)]
params = dict(q=json.dumps(dict(filters=filters)))
r = requests.get(self.baseurl + '/api/sensor',
params=params,
headers={'content-type': 'application/json'})
if r.status_code == self.HTTP_STATUS_SUCCESS:
json_content = json.loads(r.text)
if json_content["num_results"] == 1:
self.id_ = json_content["objects"][0]["id"]
elif json_content["num_results"] == 0:
# add a new sensor in db with the UUID
r = requests.post(baseurl + '/api/sensor',
data=json.dumps({"location": self.uuid}),
headers={'content-type': 'application/json'})
if r.status_code == self.HTTP_STATUS_CREATED:
self.id_ = json.loads(r.text)["id"]
else:
raise Exception("impossible to add new sensor")
else:
raise Exception("mulltiple sensors with same id")
def save_measure(self, measure, time_stamp):
new_mesure = {'value': measure,
'sensor_id': self.id_,
'time_stamp': time_stamp}
try:
r = requests.post(self.baseurl + '/api/measure',
data=json.dumps(new_mesure),
headers={'content-type': 'application/json'})
except requests.exceptions.ConnectionError:
return False
return r.status_code == self.HTTP_STATUS_CREATED
if __name__ == "__main__":
baseurl = 'http://localhost:5000'
sensor = APISensor("salon", baseurl)
for _ in range(50):
sensor.save_measure(_, datetime.datetime.now().isoformat())
r = requests.get(baseurl + '/api/sensor',
headers={'content-type': 'application/json'})
print("Sensors: ")
pprint({"status": r.status_code, "headers": r.headers['content-type'], "content": json.loads(str(r.text))})
| mit |
ContinuumIO/dask | dask/dataframe/accessor.py | 2 | 5362 | import numpy as np
import pandas as pd
from functools import partial
from ..utils import derived_from
def maybe_wrap_pandas(obj, x):
if isinstance(x, np.ndarray):
if isinstance(obj, pd.Series):
return pd.Series(x, index=obj.index, dtype=x.dtype)
return pd.Index(x)
return x
class Accessor(object):
"""
Base class for pandas Accessor objects cat, dt, and str.
Notes
-----
Subclasses should define ``_accessor_name``
"""
_not_implemented = set()
def __init__(self, series):
from .core import Series
if not isinstance(series, Series):
raise ValueError("Accessor cannot be initialized")
series_meta = series._meta
if hasattr(series_meta, "to_series"): # is index-like
series_meta = series_meta.to_series()
meta = getattr(series_meta, self._accessor_name)
self._meta = meta
self._series = series
@staticmethod
def _delegate_property(obj, accessor, attr):
out = getattr(getattr(obj, accessor, obj), attr)
return maybe_wrap_pandas(obj, out)
@staticmethod
def _delegate_method(obj, accessor, attr, args, kwargs):
out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)
return maybe_wrap_pandas(obj, out)
def _property_map(self, attr):
meta = self._delegate_property(self._series._meta, self._accessor_name, attr)
token = "%s-%s" % (self._accessor_name, attr)
return self._series.map_partitions(
self._delegate_property, self._accessor_name, attr, token=token, meta=meta
)
def _function_map(self, attr, *args, **kwargs):
if "meta" in kwargs:
meta = kwargs.pop("meta")
else:
meta = self._delegate_method(
self._series._meta_nonempty, self._accessor_name, attr, args, kwargs
)
token = "%s-%s" % (self._accessor_name, attr)
return self._series.map_partitions(
self._delegate_method,
self._accessor_name,
attr,
args,
kwargs,
meta=meta,
token=token,
)
@property
def _delegates(self):
return set(dir(self._meta)).difference(self._not_implemented)
def __dir__(self):
o = self._delegates
o.update(self.__dict__)
o.update(dir(type(self)))
return list(o)
def __getattr__(self, key):
if key in self._delegates:
if callable(getattr(self._meta, key)):
return partial(self._function_map, key)
else:
return self._property_map(key)
else:
raise AttributeError(key)
class DatetimeAccessor(Accessor):
""" Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.microsecond # doctest: +SKIP
"""
_accessor_name = "dt"
class StringAccessor(Accessor):
""" Accessor object for string properties of the Series values.
Examples
--------
>>> s.str.lower() # doctest: +SKIP
"""
_accessor_name = "str"
_not_implemented = {"get_dummies"}
@derived_from(pd.core.strings.StringMethods)
def split(self, pat=None, n=-1, expand=False):
if expand:
if n == -1:
raise NotImplementedError(
"To use the expand parameter you must specify the number of "
"expected splits with the n= parameter. Usually n splits result in n+1 output columns."
)
else:
delimiter = " " if pat is None else pat
meta = type(self._series._meta)([delimiter.join(["a"] * (n + 1))])
meta = meta.str.split(n=n, expand=expand, pat=pat)
else:
meta = (self._series.name, object)
return self._function_map("split", pat=pat, n=n, expand=expand, meta=meta)
@derived_from(pd.core.strings.StringMethods)
def cat(self, others=None, sep=None, na_rep=None):
from .core import Series, Index
if others is None:
raise NotImplementedError("x.str.cat() with `others == None`")
valid_types = (Series, Index, pd.Series, pd.Index)
if isinstance(others, valid_types):
others = [others]
elif not all(isinstance(a, valid_types) for a in others):
raise TypeError("others must be Series/Index")
return self._series.map_partitions(
str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta
)
@derived_from(pd.core.strings.StringMethods)
def extractall(self, pat, flags=0):
# TODO: metadata inference here won't be necessary for pandas >= 0.23.0
meta = self._series._meta.str.extractall(pat, flags=flags)
return self._series.map_partitions(
str_extractall, pat, flags, meta=meta, token="str-extractall"
)
def __getitem__(self, index):
return self._series.map_partitions(str_get, index, meta=self._series._meta)
def str_extractall(series, pat, flags):
return series.str.extractall(pat, flags=flags)
def str_get(series, index):
""" Implements series.str[index] """
return series.str[index]
def str_cat(self, *others, **kwargs):
return self.str.cat(others=others, **kwargs)
| bsd-3-clause |
indictranstech/osmosis-frappe | frappe/core/report/todo/todo.py | 44 | 1373 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.reportview import execute as runreport
from frappe.utils import getdate
def execute(filters=None):
priority_map = {"High": 3, "Medium": 2, "Low": 1}
todo_list = runreport(doctype="ToDo", fields=["name", "date", "description",
"priority", "reference_type", "reference_name", "assigned_by", "owner"],
filters=[["ToDo", "status", "=", "Open"]])
todo_list.sort(key=lambda todo: (priority_map.get(todo.priority, 0),
todo.date and getdate(todo.date) or getdate("1900-01-01")), reverse=True)
columns = [_("ID")+":Link/ToDo:90", _("Priority")+"::60", _("Date")+ ":Date",
_("Description")+"::150", _("Assigned To/Owner") + ":Data:120",
_("Assigned By")+":Data:120", _("Reference")+"::200"]
result = []
for todo in todo_list:
if todo.owner==frappe.session.user or todo.assigned_by==frappe.session.user:
if todo.reference_type:
todo.reference = """<a href="#Form/%s/%s">%s: %s</a>""" % (todo.reference_type,
todo.reference_name, todo.reference_type, todo.reference_name)
else:
todo.reference = None
result.append([todo.name, todo.priority, todo.date, todo.description,
todo.owner, todo.assigned_by, todo.reference])
return columns, result
| mit |
jk1/intellij-community | python/helpers/pydev/tests_pydevd/test_jyserver.py | 11 | 4259 | '''
@author Fabio Zadrozny
'''
import sys
import unittest
import socket
import urllib
import pytest
import pycompletionserver
IS_JYTHON = sys.platform.find('java') != -1
DEBUG = 0
def dbg(s):
if DEBUG:
sys.stdout.write('TEST %s\n' % s)
@pytest.mark.skipif(not IS_JYTHON, reason='Jython related test')
class TestJython(unittest.TestCase):
def test_it(self):
dbg('ok')
def test_message(self):
t = pycompletionserver.CompletionServer(0)
t.exit_process_on_kill = False
l = []
l.append(('Def', 'description' , 'args'))
l.append(('Def1', 'description1', 'args1'))
l.append(('Def2', 'description2', 'args2'))
msg = t.processor.format_completion_message('test_jyserver.py', l)
self.assertEqual('@@COMPLETIONS(test_jyserver.py,(Def,description,args),(Def1,description1,args1),(Def2,description2,args2))END@@', msg)
l = []
l.append(('Def', 'desc,,r,,i()ption', ''))
l.append(('Def(1', 'descriptio(n1', ''))
l.append(('De,f)2', 'de,s,c,ription2', ''))
msg = t.processor.format_completion_message(None, l)
expected = '@@COMPLETIONS(None,(Def,desc%2C%2Cr%2C%2Ci%28%29ption, ),(Def%281,descriptio%28n1, ),(De%2Cf%292,de%2Cs%2Cc%2Cription2, ))END@@'
self.assertEqual(expected, msg)
def test_completion_sockets_and_messages(self):
dbg('test_completion_sockets_and_messages')
t, socket = self.create_connections()
self.socket = socket
dbg('connections created')
try:
#now that we have the connections all set up, check the code completion messages.
msg = urllib.quote_plus('math')
toWrite = '@@IMPORTS:%sEND@@' % msg
dbg('writing' + str(toWrite))
socket.send(toWrite) #math completions
completions = self.read_msg()
dbg(urllib.unquote_plus(completions))
start = '@@COMPLETIONS('
self.assertTrue(completions.startswith(start), '%s DOESNT START WITH %s' % (completions, start))
self.assertTrue(completions.find('@@COMPLETIONS') != -1)
self.assertTrue(completions.find('END@@') != -1)
msg = urllib.quote_plus('__builtin__.str')
toWrite = '@@IMPORTS:%sEND@@' % msg
dbg('writing' + str(toWrite))
socket.send(toWrite) #math completions
completions = self.read_msg()
dbg(urllib.unquote_plus(completions))
start = '@@COMPLETIONS('
self.assertTrue(completions.startswith(start), '%s DOESNT START WITH %s' % (completions, start))
self.assertTrue(completions.find('@@COMPLETIONS') != -1)
self.assertTrue(completions.find('END@@') != -1)
finally:
try:
self.send_kill_msg(socket)
while not t.ended:
pass #wait until it receives the message and quits.
socket.close()
except:
pass
def get_free_port(self):
from _pydev_bundle.pydev_localhost import get_socket_name
return get_socket_name(close=True)[1]
def create_connections(self):
'''
Creates the connections needed for testing.
'''
p1 = self.get_free_port()
from thread import start_new_thread
t = pycompletionserver.CompletionServer(p1)
t.exit_process_on_kill = False
start_new_thread(t.run, ())
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((pycompletionserver.HOST, p1))
server.listen(1)
sock, _addr = server.accept()
return t, sock
def read_msg(self):
msg = '@@PROCESSING_END@@'
while msg.startswith('@@PROCESSING'):
msg = self.socket.recv(1024)
if msg.startswith('@@PROCESSING:'):
dbg('Status msg:' + str(msg))
while msg.find('END@@') == -1:
msg += self.socket.recv(1024)
return msg
def send_kill_msg(self, socket):
socket.send(pycompletionserver.MSG_KILL_SERVER)
# Run for jython in command line:
# c:\bin\jython2.7.0\bin\jython.exe -m py.test tests\test_jyserver.py
| apache-2.0 |
fakdora/flaksy-upto-login | app/main/views.py | 1 | 3865 | from flask import render_template, redirect, url_for, abort, flash, request,\
current_app
from flask.ext.login import login_required, current_user
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, PostForm
from .. import db
from ..models import Permission, Role, User, Post
from ..decorators import admin_required
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE_ARTICLES) and \
form.validate_on_submit():
post = Post(body=form.body.data,
author=current_user._get_current_object())
db.session.add(post)
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('index.html', form=form, posts=posts,
pagination=pagination)
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts,
pagination=pagination)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>')
def post(id):
post = Post.query.get_or_404(id)
return render_template('post.html', posts=[post])
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMINISTER):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
| mit |
rafiqsaleh/VERCE | verce-hpc-pe/src/dispel4py/examples/graph_testing/word_count_filter.py | 4 | 1093 | # Copyright (c) The University of Edinburgh 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Counts words produced by RandomWordProducer and filtered by RandomFilter.
'''
from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.examples.graph_testing.testing_PEs import RandomFilter, RandomWordProducer, WordCounter
words = RandomWordProducer()
words.numprocesses=1
filter = RandomFilter()
filter.numprocesses=5
count = WordCounter()
count.numprocesses=3
graph = WorkflowGraph()
graph.connect(words, 'output', filter, 'input')
graph.connect(filter, 'output', count, 'input')
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.