code
stringlengths 1
199k
|
|---|
from setuptools import setup, find_packages
setup(
name="simple-crawler",
version="0.1",
url="https://github.com/shonenada/crawler",
author="shonenada",
author_email="shonenada@gmail.com",
description="Simple crawler",
zip_safe=True,
platforms="any",
packages=find_packages(),
install_requires=["requests==2.2.1"],
)
|
import unittest
import numpy as np
import theano
import theano.tensor as T
from tests.helpers import (SimpleTrainer, SimpleClf, SimpleTransformer,
simple_reg)
from theano_wrapper.layers import (BaseLayer, HiddenLayer, MultiLayerBase,
BaseEstimator, BaseTransformer,
LinearRegression, LogisticRegression,
MultiLayerPerceptron, MultiLayerRegression,
TiedAutoEncoder, AutoEncoder)
class TestBaseLayer(unittest.TestCase):
""" Tests for layer.py module, which includes various types of layers
for theano-wrapper
"""
def test_base_layer_has_params(self):
base = BaseLayer(100, 10)
self.assertTrue(hasattr(base, 'params'),
msg="Class has no attribute 'parameters'")
def test_base_layer_params_not_empty(self):
base = BaseLayer(100, 10)
self.assertTrue(base.params, msg="Class 'parameters' are empty")
def test_base_layer_no_args(self):
# Test if BaseLayer initializes as expected when given no
# extra arguements
try:
BaseLayer(100, 10)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_base_layer_params_are_theano_shared_variables(self):
base = BaseLayer(100, 10)
for p in base.params:
self.assertIsInstance(p, theano.compile.SharedVariable)
def test_base_layer_has_input(self):
base = BaseLayer(100, 10)
self.assertTrue(hasattr(base, 'X'))
def test_base_layer_input_is_theano_variable(self):
base = BaseLayer(100, 10)
self.assertIsInstance(base.X, theano.tensor.TensorVariable)
def test_base_layer_weights_shape(self):
base = BaseLayer(100, 10)
self.assertEqual(base.W.get_value().shape, (100, 10))
def test_base_layer_bias_shape(self):
base = BaseLayer(100, 10)
self.assertEqual(base.b.get_value().shape, (10,))
def test_base_layer_weights_shape_single_output(self):
base = BaseLayer(100, 1)
self.assertEqual(base.W.get_value().shape, (100,))
def test_base_layer_bias_shape_single_output(self):
base = BaseLayer(100, 1)
self.assertEqual(base.b.get_value().shape, ())
def test_base_layer_no_output(self):
base = BaseLayer(100, 10)
self.assertFalse(hasattr(base, 'y'))
def test_base_layer_int_output(self):
base = BaseLayer(100, 10, y='int')
self.assertTrue(hasattr(base, 'y'))
self.assertTrue(hasattr(base.y, 'dtype'))
self.assertEqual(base.y.dtype, 'int32')
def test_base_layer_float_output(self):
base = BaseLayer(100, 10, y='float')
self.assertTrue(hasattr(base, 'y'))
self.assertTrue(hasattr(base.y, 'dtype'))
self.assertEqual(base.y.dtype, 'float32')
def test_base_layer_custom_weights(self):
try:
BaseLayer(100, 10, weights=np.random.random_sample((100, 10)))
except TypeError:
self.fail("Class did not accept 'weights' arg")
class TestHiddenLayer(unittest.TestCase):
""" Tests for HiddenLayer class.
This class is used only by other classes, so mostly basic stuff here.
"""
def test_hidden_layer_has_params(self):
base = HiddenLayer(100, 10)
self.assertTrue(hasattr(base, 'params'),
msg="Class has no attribute 'parameters'")
def test_hidden_layer_params_not_empty(self):
base = HiddenLayer(100, 10)
self.assertTrue(base.params, msg="Class 'parameters' are empty")
def test_hidden_layer_no_args(self):
# Test if HiddenLayer initializes as expected when given no
# extra arguements
try:
HiddenLayer(100, 10)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_hidden_layer_params_are_theano_shared_variables(self):
base = HiddenLayer(100, 10)
for p in base.params:
self.assertIsInstance(p, theano.compile.SharedVariable)
def test_hidden_layer_has_input(self):
base = HiddenLayer(100, 10)
self.assertTrue(hasattr(base, 'X'))
def test_hidden_layer_input_is_theano_variable(self):
base = HiddenLayer(100, 10)
self.assertIsInstance(base.X, theano.tensor.TensorVariable)
def test_hidden_layer_weights_shape(self):
base = HiddenLayer(100, 10)
self.assertEqual(base.W.get_value().shape, (100, 10))
def test_hidden_layer_bias_shape(self):
base = HiddenLayer(100, 10)
self.assertEqual(base.b.get_value().shape, (10,))
def test_hidden_layer_weights_shape_single_output(self):
base = HiddenLayer(100, 1)
self.assertEqual(base.W.get_value().shape, (100,))
def test_hidden_layer_bias_shape_single_output(self):
base = HiddenLayer(100, 1)
self.assertEqual(base.b.get_value().shape, ())
def test_hidden_layer_no_output(self):
base = HiddenLayer(100, 10)
self.assertFalse(hasattr(base, 'y'))
def test_hidden_layer_int_output(self):
base = HiddenLayer(100, 10, y='int')
self.assertTrue(hasattr(base, 'y'))
self.assertTrue(hasattr(base.y, 'dtype'))
self.assertEqual(base.y.dtype, 'int32')
def test_hidden_layer_float_output(self):
base = HiddenLayer(100, 10, y='float')
self.assertTrue(hasattr(base, 'y'))
self.assertTrue(hasattr(base.y, 'dtype'))
self.assertEqual(base.y.dtype, 'float32')
class TestMultiLayerBase(unittest.TestCase):
""" Tests for MultiLayerBase class """
def test_multi_layer_base_has_params(self):
base = MultiLayerBase(100, 50, 10, SimpleClf)
self.assertTrue(hasattr(base, 'params'),
msg="Class has no attribute 'parameters'")
def test_multi_layer_base_params_not_empty(self):
base = MultiLayerBase(100, 50, 10, SimpleClf)
self.assertTrue(base.params, msg="Class 'parameters' are empty")
def test_multi_layer_base_no_args(self):
# Test if MultiLayerBase initializes as expected when given no
# extra arguements
try:
MultiLayerBase(100, 50, 10, SimpleClf)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_multi_layer_base_single_layer(self):
# Test if MultiLayerBase initializes as expected when given no
# extra arguements
try:
MultiLayerBase(100, 50, 10, SimpleClf)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_multi_layer_base_multi_layer_single_activation(self):
# Test if MultiLayerBase initializes as expected when given no
# extra arguements
try:
MultiLayerBase(100, [100, 30, 50], 10, SimpleClf, lambda x: x)
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
def test_multi_layer_base_multi_layer_multi_activation(self):
# Test if MultiLayerBase initializes as expected when given no
# extra arguements
try:
MultiLayerBase(100, [100, 30, 50], 10, SimpleClf,
[lambda x: x for i in range(3)])
except Exception as e:
self.fail("Class initialization failed: %s" % str(e))
class BaseEstimatorTransformerTests:
def test_has_trainers(self):
clf = self.Clf()
for t in ['epoch', 'sgd']:
self.assertIn(t, clf.trainer_aliases)
def test_builtin_sgd_trainer(self):
clf = self.Clf()
try:
clf.fit(*self.fit_args, 'sgd', max_iter=1)
except Exception as e:
self.fail("Fitting failed: %s" % str(e))
def test_builtin_sgd_trainer_all_args(self):
clf = self.Clf()
try:
clf.fit(*self.fit_args, 'sgd', alpha=0.1, batch_size=20,
max_iter=1, patience=100, p_inc=3, imp_thresh=0.9,
random=10, verbose=1000)
except Exception as e:
self.fail("Fitting failed: %s" % str(e))
def test_builtin_trainer_regularizer(self):
clf = self.Clf()
reg = simple_reg(clf)
try:
clf.fit(*self.fit_args, reg=reg, max_iter=2)
except Exception as e:
self.fail("Fitting failed: %s" % str(e))
class TestBaseEstimator(unittest.TestCase, BaseEstimatorTransformerTests):
TheBase = BaseEstimator
TheClf = SimpleClf
X = np.random.standard_normal((500, 100)).astype(np.float32)
y = np.random.randint(0, 9, (500,)).astype(np.int32)
fit_args = (X, y,)
def setUp(self):
class Clf(self.TheClf, self.TheBase):
def __init__(*args, **kwargs):
SimpleClf.__init__(*args, **kwargs)
self.Clf = Clf
class TestBaseTransformer(unittest.TestCase, BaseEstimatorTransformerTests):
TheBase = BaseTransformer
TheClf = SimpleTransformer
X = np.random.standard_normal((500, 100)).astype(np.float32)
fit_args = (X,)
def setUp(self):
class Clf(self.TheClf, self.TheBase):
def __init__(*args, **kwargs):
self.TheClf.__init__(*args, **kwargs)
self.Clf = Clf
class EstimatorTests:
X = np.random.standard_normal((500, 100)).astype(np.float32)
def test_estimator_has_params(self):
clf = self.estimator(*self.args)
self.assertTrue(hasattr(clf, 'params'))
self.assertIsNotNone(clf.params)
def test_estimator_has_output(self):
clf = self.estimator(*self.args)
self.assertIsInstance(clf.output, theano.tensor.TensorVariable)
def test_estimator_has_cost(self):
clf = self.estimator(*self.args)
self.assertIsInstance(clf.cost, theano.tensor.TensorVariable)
def test_estimator_fit(self):
trn = SimpleTrainer(self.estimator(*self.args))
try:
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_estimator_with_regularization(self):
clf = self.estimator(*self.args)
reg = simple_reg(clf)
try:
trn = SimpleTrainer(clf, reg)
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Estimator failed: %s" % str(e))
def test_estimator_builtin_fit(self):
clf = self.estimator(*self.args)
try:
clf.fit(self.X, self.y, max_iter=1)
except Exception as e:
self.fail("Estimator failed: %s" % str(e))
def test_estimator_builtin_predict(self):
clf = self.estimator(*self.args)
clf.fit(self.X, self.y, max_iter=1)
pred = clf.predict(self.X)
self.assertEqual(pred.shape, (self.X.shape[0],))
class MultiLayerEstimatorMixin:
def test_estimator_fit_three_hidden_single_activation(self):
args = list(self.args)
# set n_hidden arg to an array of n_nodes for three layers
args[1] = [args[0], int(args[0]/2), int(args[0]/3)]
trn = SimpleTrainer(self.estimator(*args))
try:
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_estimator_random_arguement_int_seed(self):
# The estimator should accept a random arguement for initialization
# of weights. Here we test an integer seed.
trn = SimpleTrainer(self.estimator(*self.args, random=42))
try:
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_estimator_random_arguement_rng(self):
# The estimator should accept a random arguement for initialization
# of weights. Here we test a random state generator
trn = SimpleTrainer(self.estimator(*self.args,
random=np.random.RandomState(42)))
try:
trn.fit(self.X, self.y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
class ClassificationTest(EstimatorTests):
y = np.random.randint(0, 9, (500,)).astype(np.int32)
class RegressionTest(EstimatorTests):
y = np.random.random((500,)).astype(np.float32)
def test_estimator_fit_multivariate(self):
args = list(self.args)
args[-1] = 5
y = np.random.random((500, 5)).astype(np.float32)
trn = SimpleTrainer(self.estimator(*args))
try:
trn.fit(self.X, y)
except Exception as e:
self.fail("Training failed: %s" % str(e))
class TestLinearRegression(unittest.TestCase, RegressionTest):
estimator = LinearRegression
args = (100, 1)
class TestLogisticRegression(unittest.TestCase, ClassificationTest):
estimator = LogisticRegression
args = (100, 10)
class TestMultiLayerPerceptron(unittest.TestCase,
ClassificationTest, MultiLayerEstimatorMixin):
estimator = MultiLayerPerceptron
args = (100, 100, 10)
class TestMultiLayerRegression(unittest.TestCase,
RegressionTest, MultiLayerEstimatorMixin):
estimator = MultiLayerRegression
args = (100, 100, 1)
class TransformerTests:
X = np.random.standard_normal((500, 100)).astype(np.float32)
def test_transformer_has_params(self):
clf = self.transformer(*self.args)
self.assertTrue(hasattr(clf, 'params'))
self.assertIsNotNone(clf.params)
def test_transformer_has_encode(self):
clf = self.transformer(*self.args)
self.assertIsInstance(clf.encode, theano.tensor.TensorVariable)
def test_transformer_has_cost(self):
clf = self.transformer(*self.args)
self.assertIsInstance(clf.cost, theano.tensor.TensorVariable)
def test_transformer_fit(self):
trn = SimpleTrainer(self.transformer(*self.args))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_with_regularization(self):
clf = self.transformer(*self.args)
reg = simple_reg(clf)
try:
trn = SimpleTrainer(clf, reg)
trn.fit(self.X)
except Exception as e:
self.fail("Estimator failed: %s" % str(e))
def test_transfomer_float_n_hidden(self):
args = list(self.args)
args[-1] = 0.5
trn = SimpleTrainer(self.transformer(*args))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_builtin_fit(self):
clf = self.transformer(*self.args)
try:
clf.fit(self.X, max_iter=1)
except Exception as e:
self.fail("Estimator failed: %s" % str(e))
def test_transformer_builtin_predict(self):
clf = self.transformer(*self.args)
clf.fit(self.X, max_iter=1)
pred = clf.predict(self.X)
self.assertEqual(pred.shape, (self.X.shape))
def test_transformer_builtin_transform(self):
clf = self.transformer(*self.args)
clf.fit(self.X, max_iter=1)
pred = clf.transform(self.X)
self.assertEqual(pred.shape, (self.X.shape[0], self.args[-1]))
class MultiLayerTransformerMixin:
def test_transformer_fit_three_hidden_single_activation(self):
args = list(self.args)
# set n_hidden arg to an array of n_nodes for three layers
args[1] = [args[0], int(args[0]/2), int(args[0]/3)]
trn = SimpleTrainer(self.transformer(*args))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_fit_three_hidden_all_activations(self):
args = list(self.args)
# set n_hidden arg to an array of n_nodes for three layers
args[1] = [args[0], int(args[0]/2), int(args[0]/3)]
activation = [T.nnet.sigmoid, T.nnet.softplus, T.nnet.softmax,
T.nnet.sigmoid]
trn = SimpleTrainer(self.transformer(*args, activation))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_random_arguement_int_seed(self):
# The transformer should accept a random arguement for initialization
# of weights. Here we test an integer seed.
trn = SimpleTrainer(self.transformer(*self.args, random=42))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_transformer_random_arguement_rng(self):
# The transformer should accept a random arguement for initialization
# of weights. Here we test a random state generator
trn = SimpleTrainer(self.transformer(*self.args,
random=np.random.RandomState(42)))
try:
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
class TestTiedAutoEncoder(unittest.TestCase, TransformerTests):
transformer = TiedAutoEncoder
args = (100, 50)
class TestAutoEncoder(unittest.TestCase, TransformerTests,
MultiLayerTransformerMixin):
transformer = AutoEncoder
args = (100, 50)
def test_cost_cross_entropy(self):
try:
trn = SimpleTrainer(self.transformer(*self.args,
cost='cross_entropy'))
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
def test_denoising_mode(self):
try:
trn = SimpleTrainer(self.transformer(*self.args,
corrupt=0.1))
trn.fit(self.X)
except Exception as e:
self.fail("Training failed: %s" % str(e))
|
a = 5
if a >= 5:
print("Value is greater than 5")
else:
print("Value is less than 5")
if a >= 5:
print("Value is greater than 5")
elif a < 5:
print("Value is less than 5")
else:
print("Value is 5")
a=3
b=5
if (a==3) and (b==5):
print("a and b are as expected - great :)")
else:
print("a and b not as expected - not great :(")
|
import py
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation
from rpython.conftest import option
from rpython.rtyper.annlowlevel import (annotate_lowlevel_helper,
MixLevelHelperAnnotator, PseudoHighLevelCallable, llhelper,
cast_instance_to_base_ptr, cast_base_ptr_to_instance)
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rtyper.lltypesystem.lltype import *
from rpython.rtyper.rclass import fishllattr, OBJECTPTR
from rpython.rtyper.test.test_llinterp import interpret
from rpython.translator.translator import TranslationContext
def annotated_calls(ann, ops=('simple_call,')):
for block in ann.annotated:
for op in block.operations:
if op.opname in ops:
yield op
def derived(op, orig):
if op.args[0].value.__name__.startswith(orig):
return op.args[0].value
else:
return None
class TestLowLevelAnnotateTestCase:
from rpython.annotator.annrpython import RPythonAnnotator
def annotate(self, ll_function, argtypes):
self.a = self.RPythonAnnotator()
graph = annotate_lowlevel_helper(self.a, ll_function, argtypes)
if option.view:
self.a.translator.view()
return self.a.binding(graph.getreturnvar())
def test_simple(self):
S = GcStruct("s", ('v', Signed))
def llf():
s = malloc(S)
return s.v
s = self.annotate(llf, [])
assert s.knowntype == int
def test_simple2(self):
S = Struct("s", ('v', Signed))
S2 = GcStruct("s2", ('a', S), ('b', S))
def llf():
s = malloc(S2)
return s.a.v+s.b.v
s = self.annotate(llf, [])
assert s.knowntype == int
def test_array(self):
A = GcArray(('v', Signed))
def llf():
a = malloc(A, 1)
return a[0].v
s = self.annotate(llf, [])
assert s.knowntype == int
def test_array_longlong(self):
from rpython.rlib.rarithmetic import r_longlong
A = GcArray(('v', Signed))
one = r_longlong(1)
def llf():
a = malloc(A, one)
return a[0].v
s = self.annotate(llf, [])
assert s.knowntype == int
def test_prim_array(self):
A = GcArray(Signed)
def llf():
a = malloc(A, 1)
return a[0]
s = self.annotate(llf, [])
assert s.knowntype == int
def test_prim_array_setitem(self):
A = GcArray(Signed)
def llf():
a = malloc(A, 1)
a[0] = 3
return a[0]
s = self.annotate(llf, [])
assert s.knowntype == int
def test_cast_simple_widening(self):
S2 = Struct("s2", ('a', Signed))
S1 = Struct("s1", ('sub1', S2), ('sub2', S2))
PS1 = Ptr(S1)
PS2 = Ptr(S2)
def llf(p1):
p2 = p1.sub1
p3 = cast_pointer(PS1, p2)
return p3
s = self.annotate(llf, [SomePtr(PS1)])
assert isinstance(s, SomePtr)
assert s.ll_ptrtype == PS1
def test_cast_simple_widening_from_gc(self):
S2 = GcStruct("s2", ('a', Signed))
S1 = GcStruct("s1", ('sub1', S2), ('x', Signed))
PS1 = Ptr(S1)
def llf():
p1 = malloc(S1)
p2 = p1.sub1
p3 = cast_pointer(PS1, p2)
return p3
s = self.annotate(llf, [])
assert isinstance(s, SomePtr)
assert s.ll_ptrtype == PS1
def test_cast_pointer(self):
S3 = GcStruct("s3", ('a', Signed))
S2 = GcStruct("s3", ('sub', S3))
S1 = GcStruct("s1", ('sub', S2))
PS1 = Ptr(S1)
PS2 = Ptr(S2)
PS3 = Ptr(S3)
def llf():
p1 = malloc(S1)
p2 = p1.sub
p3 = p2.sub
p12 = cast_pointer(PS1, p2)
p13 = cast_pointer(PS1, p3)
p21 = cast_pointer(PS2, p1)
p23 = cast_pointer(PS2, p3)
p31 = cast_pointer(PS3, p1)
p32 = cast_pointer(PS3, p2)
return p12, p13, p21, p23, p31, p32
s = self.annotate(llf, [])
assert [x.ll_ptrtype for x in s.items] == [PS1, PS1, PS2, PS2, PS3, PS3]
def test_array_length(self):
A = GcArray(('v', Signed))
def llf():
a = malloc(A, 1)
return len(a)
s = self.annotate(llf, [])
assert s.knowntype == int
def test_funcptr(self):
F = FuncType((Signed,), Signed)
PF = Ptr(F)
def llf(p):
return p(0)
s = self.annotate(llf, [SomePtr(PF)])
assert s.knowntype == int
def test_ll_calling_ll(self):
A = GcArray(Float)
B = GcArray(Signed)
def ll_make(T, n):
x = malloc(T, n)
return x
def ll_get(T, x, i):
return x[i]
def llf():
a = ll_make(A, 3)
b = ll_make(B, 2)
a[0] = 1.0
b[1] = 3
y0 = ll_get(A, a, 1)
y1 = ll_get(B, b, 1)
#
a2 = ll_make(A, 4)
a2[0] = 2.0
return ll_get(A, a2, 1)
s = self.annotate(llf, [])
a = self.a
assert s == annmodel.SomeFloat()
seen = {}
ngraphs = len(a.translator.graphs)
vTs = []
for call in annotated_calls(a):
if derived(call, "ll_"):
func, T = [x.value for x in call.args[0:2]]
if (func, T) in seen:
continue
seen[func, T] = True
desc = a.bookkeeper.getdesc(func)
g = desc.specialize([a.binding(x) for x in call.args[1:]])
args = g.getargs()
rv = g.getreturnvar()
if func is ll_get:
vT, vp, vi = args
assert a.binding(vT) == a.bookkeeper.immutablevalue(T)
assert a.binding(vi).knowntype == int
assert a.binding(vp).ll_ptrtype.TO == T
assert a.binding(rv) == lltype_to_annotation(T.OF)
elif func is ll_make:
vT, vn = args
assert a.binding(vT) == a.bookkeeper.immutablevalue(T)
assert a.binding(vn).knowntype == int
assert a.binding(rv).ll_ptrtype.TO == T
else:
assert False, func
vTs.append(vT)
assert len(seen) == 4
return a, vTs # reused by a test in test_rtyper
def test_ll_calling_ll2(self):
A = GcArray(Float)
B = GcArray(Signed)
def ll_make(T, n):
x = malloc(T, n)
return x
def ll_get(x, i):
return x[i]
def makelen4(T):
return ll_make(T, 4)
def llf():
a = ll_make(A, 3)
b = ll_make(B, 2)
a[0] = 1.0
b[1] = 3
y0 = ll_get(a, 1)
y1 = ll_get(b, 1)
#
a2 = makelen4(A)
a2[0] = 2.0
return ll_get(a2, 1)
s = self.annotate(llf, [])
a = self.a
assert s == annmodel.SomeFloat()
seen = {}
def q(v):
s = a.binding(v)
if s.is_constant():
return s.const
else:
return s.ll_ptrtype
vTs = []
for call in annotated_calls(a):
if derived(call, "ll_") or derived(call, "makelen4"):
func, T = [q(x) for x in call.args[0:2]]
if (func, T) in seen:
continue
seen[func, T] = True
desc = a.bookkeeper.getdesc(func)
g = desc.specialize([a.binding(x) for x in call.args[1:]])
args = g.getargs()
rv = g.getreturnvar()
if func is ll_make:
vT, vn = args
assert a.binding(vT) == a.bookkeeper.immutablevalue(T)
assert a.binding(vn).knowntype == int
assert a.binding(rv).ll_ptrtype.TO == T
vTs.append(vT)
elif func is makelen4:
vT, = args
assert a.binding(vT) == a.bookkeeper.immutablevalue(T)
assert a.binding(rv).ll_ptrtype.TO == T
vTs.append(vT)
elif func is ll_get:
vp, vi = args
assert a.binding(vi).knowntype == int
assert a.binding(vp).ll_ptrtype == T
assert a.binding(rv) == lltype_to_annotation(
T.TO.OF)
else:
assert False, func
assert len(seen) == 5
return a, vTs # reused by a test in test_rtyper
def test_ll_stararg(self):
A = GcArray(Float)
B = GcArray(Signed)
def ll_sum(*args):
result = 0
if len(args) > 0:
result += args[0]
if len(args) > 1:
result += args[1]
if len(args) > 2:
result += args[2]
if len(args) > 3:
result += args[3]
return result
def llf():
a = ll_sum()
b = ll_sum(4, 5)
c = ll_sum(2.5)
d = ll_sum(4, 5.25)
e = ll_sum(1000, 200, 30, 4)
f = ll_sum(1000, 200, 30, 5)
return a, b, c, d, e, f
s = self.annotate(llf, [])
assert isinstance(s, annmodel.SomeTuple)
assert s.items[0].knowntype is int
assert s.items[0].const == 0
assert s.items[1].knowntype is int
assert s.items[2].knowntype is float
assert s.items[3].knowntype is float
assert s.items[4].knowntype is int
assert s.items[5].knowntype is int
def test_str_vs_ptr(self):
S = GcStruct('s', ('x', Signed))
def ll_stuff(x):
if x is None or isinstance(x, str):
return 2
else:
return 3
def llf():
x = ll_stuff("hello")
y = ll_stuff(nullptr(S))
return x, y
s = self.annotate(llf, [])
assert isinstance(s, annmodel.SomeTuple)
assert s.items[0].is_constant()
assert s.items[0].const == 2
assert s.items[1].is_constant()
assert s.items[1].const == 3
def test_getRuntimeTypeInfo(self):
S = GcStruct('s', ('x', Signed), rtti=True)
def llf():
return getRuntimeTypeInfo(S)
s = self.annotate(llf, [])
assert isinstance(s, SomePtr)
assert s.ll_ptrtype == Ptr(RuntimeTypeInfo)
assert s.const == getRuntimeTypeInfo(S)
def test_runtime_type_info(self):
S = GcStruct('s', ('x', Signed), rtti=True)
def llf(p):
return runtime_type_info(p)
s = self.annotate(llf, [SomePtr(Ptr(S))])
assert isinstance(s, SomePtr)
assert s.ll_ptrtype == Ptr(RuntimeTypeInfo)
def test_cast_primitive(self):
def llf(u):
return cast_primitive(Signed, u)
s = self.annotate(llf, [annmodel.SomeInteger(unsigned=True)])
assert s.knowntype == int
def llf(s):
return cast_primitive(Unsigned, s)
s = self.annotate(llf, [annmodel.SomeInteger()])
assert s.unsigned == True
def test_pbctype(self):
TYPE = Void
TYPE2 = Signed
def g(lst):
n = lst[0]
if isinstance(TYPE, Number):
result = 123
else:
result = 456
if isinstance(TYPE2, Number):
result += 1
return result + n
def llf():
lst = [5]
g(lst)
lst.append(6)
self.annotate(llf, [])
def test_adtmeths(self):
def h_length(s):
return s.foo
S = GcStruct("S", ('foo', Signed),
adtmeths={"h_length": h_length,
"stuff": 12})
def llf():
s = malloc(S)
s.foo = 321
return s.h_length()
s = self.annotate(llf, [])
assert s.knowntype == int and not s.is_constant()
def llf():
s = malloc(S)
return s.stuff
s = self.annotate(llf, [])
assert s.is_constant() and s.const == 12
def test_pseudohighlevelcallable():
t = TranslationContext()
t.buildannotator()
rtyper = t.buildrtyper()
rtyper.specialize()
a = MixLevelHelperAnnotator(rtyper)
class A:
value = 5
def double(self):
return self.value * 2
def fn1(a):
a2 = A()
a2.value = a.double()
return a2
s_A, r_A = a.s_r_instanceof(A)
fn1ptr = a.delayedfunction(fn1, [s_A], s_A)
pseudo = PseudoHighLevelCallable(fn1ptr, [s_A], s_A)
def fn2(n):
a = A()
a.value = n
a2 = pseudo(a)
return a2.value
graph = a.getgraph(fn2, [annmodel.SomeInteger()], annmodel.SomeInteger())
a.finish()
llinterp = LLInterpreter(rtyper)
res = llinterp.eval_graph(graph, [21])
assert res == 42
def test_llhelper():
S = GcStruct('S', ('x', Signed), ('y', Signed))
def f(s,z):
return s.x*s.y+z
def g(s):
return s.x+s.y
F = Ptr(FuncType([Ptr(S), Signed], Signed))
G = Ptr(FuncType([Ptr(S)], Signed))
def h(x, y, z):
s = malloc(S)
s.x = x
s.y = y
fptr = llhelper(F, f)
gptr = llhelper(G, g)
assert typeOf(fptr) == F
return fptr(s, z)+fptr(s, z*2)+gptr(s)
res = interpret(h, [8, 5, 2])
assert res == 99
def test_llhelper_multiple_functions():
S = GcStruct('S', ('x', Signed), ('y', Signed))
def f(s):
return s.x - s.y
def g(s):
return s.x + s.y
F = Ptr(FuncType([Ptr(S)], Signed))
myfuncs = [f, g]
def h(x, y, z):
s = malloc(S)
s.x = x
s.y = y
fptr = llhelper(F, myfuncs[z])
assert typeOf(fptr) == F
return fptr(s)
res = interpret(h, [80, 5, 0])
assert res == 75
res = interpret(h, [80, 5, 1])
assert res == 85
def test_cast_instance_to_base_ptr():
class A:
def __init__(self, x, y):
self.x = x
self.y = y
def f(x, y):
if x > 20:
a = None
else:
a = A(x, y)
a1 = cast_instance_to_base_ptr(a)
return a1
res = interpret(f, [5, 10])
assert typeOf(res) == OBJECTPTR
assert fishllattr(res, 'x') == 5
assert fishllattr(res, 'y') == 10
res = interpret(f, [25, 10])
assert res == nullptr(OBJECTPTR.TO)
def test_cast_base_ptr_to_instance():
class A:
def __init__(self, x, y):
self.x = x
self.y = y
def f(x, y):
if x > 20:
a = None
else:
a = A(x, y)
a1 = cast_instance_to_base_ptr(a)
b = cast_base_ptr_to_instance(A, a1)
return a is b
assert f(5, 10) is True
assert f(25, 10) is True
res = interpret(f, [5, 10])
assert res is True
res = interpret(f, [25, 10])
assert res is True
|
import eventmaster as EM
from time import sleep
import random
import sys
""" Create new Instance of EventMasterSwitcher and turn off logging """
s3 = EM.EventMasterSwitcher()
s3.setVerbose(0)
with open('example_settings_.xml', 'r') as content_file:
content = content_file.read()
s3.loadFromXML(content)
""" Enumerate all Inputs and print known information for each """
print("# Inputs")
for input_id, input_inst in s3.getInputs().items():
input_name = input_inst.getName()
frozen_string = "is Frozen" if input_inst.getFreeze() else "is not Frozen"
print(" ({0!s}) {1!s} {2!s}".format(input_id, input_name, frozen_string))
""" Enumerate all Outputs and print known information for each """
print("\r\n# Outputs")
for output_id, output_inst in s3.getOutputs().items():
output_name = output_inst.getName()
print(" ({0!s}) {1!s}".format(output_id, output_name))
""" Enumerate all Presets and print known information for each """
print("\r\n# Presets")
for preset_id, preset_inst in s3.getPresets().items():
preset_name = preset_inst.getName()
print(" ({0!s}) {1!s}".format(preset_id, preset_name))
""" Enumerate all Destinations and print known information for each """
print("\r\n# Destinations:")
for dest_id, dest_inst in s3.getScreenDests().items():
dest_numoflayers = len(dest_inst.getLayers())
dest_name = dest_inst.getName()
dest_size = dest_inst.getSize()
print("\n {1!s} is {2!s} x {3!s} & has {4!s} layer(s)".format( dest_id,
dest_name,
dest_size["HSize"],
dest_size["VSize"],
dest_numoflayers))
""" Enumerate all Layers for Destination and print known information for each """
for layer_number, layer_inst in dest_inst.getLayers().items():
if "Pvw" in layer_inst:
layer_name_pvw = layer_inst["Pvw"].getName()
layer_owin_pvw = layer_inst["Pvw"].getOWIN()
layer_hpos_pvw = layer_owin_pvw["HPos"]
layer_hsize_pvw = layer_owin_pvw["HSize"]
layer_vpos_pvw = layer_owin_pvw["VPos"]
layer_vsize_pvw = layer_owin_pvw["VSize"]
if layer_inst["Pvw"].getSource() is not None:
layer_source_name_pvw = layer_inst["Pvw"].getSource().getName()
else:
layer_source_name_pvw = "(Unknown)"
size_string_pvw = " {4!s} is on PVW - {0!s}x{1!s} at {2!s},{3!s}".format(layer_hsize_pvw, layer_vsize_pvw, layer_hpos_pvw, layer_vpos_pvw, layer_name_pvw)
source_string_pvw = " current source is {0!s}".format(layer_source_name_pvw)
else:
size_string_pvw = " Layer is not on PVW "
source_string_pvw = ""
if "Pgm" in layer_inst:
layer_name_pgm = layer_inst["Pgm"].getName()
layer_owin_pgm = layer_inst["Pgm"].getOWIN()
layer_hpos_pgm = layer_owin_pgm["HPos"]
layer_hsize_pgm = layer_owin_pgm["HSize"]
layer_vpos_pgm = layer_owin_pgm["VPos"]
layer_vsize_pgm = layer_owin_pgm["VSize"]
if layer_inst["Pgm"].getSource() is not None:
layer_source_name_pgm = layer_inst["Pgm"].getSource().getName()
else:
layer_source_name_pgm = "(Unknown)"
size_string_pgm = " {4!s} is on PGM - {0!s}x{1!s} at {2!s},{3!s}".format(layer_hsize_pgm, layer_vsize_pgm, layer_hpos_pgm, layer_vpos_pgm, layer_name_pgm)
source_string_pgm = " current source is {0!s}".format(layer_source_name_pgm)
else:
size_string_pgm = " Layer is not on PGM "
source_string_pgm = ""
size_string = " {4!s} is on PGM - {0!s}x{1!s} at {2!s},{3!s}".format(layer_hsize_pgm, layer_vsize_pgm, layer_hpos_pgm, layer_vpos_pgm, layer_name_pgm)
source_string = " current source is {0!s}".format(layer_source_name_pgm)
print(" ({0!s}) {1!s}\n {2!s}\n {3!s}\n {4!s}".format(layer_number+1, size_string_pgm, source_string_pgm, size_string_pvw, source_string_pvw))
sys.exit()
|
import curses
import shutil
import signal
from curses import wrapper
class Picker:
"""Allows you to select from a list with curses"""
stdscr = None
win = None
title = ""
arrow = ""
footer = ""
more = ""
c_selected = ""
c_empty = ""
cursor = 0
offset = 0
selected = 0
selcount = 0
aborted = False
window_height = shutil.get_terminal_size().lines - 10
window_width = shutil.get_terminal_size().columns - 20
all_options = []
length = 0
def curses_start(self):
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
self.win = curses.newwin(
5 + self.window_height,
self.window_width,
2,
4
)
def sigwinch_handler(self, n, frame):
self.window_height = shutil.get_terminal_size().lines - 10
self.window_width = shutil.get_terminal_size().columns - 20
curses.endwin()
self.stdscr.clear()
self.stdscr = curses.initscr()
self.win = curses.newwin(
5 + self.window_height,
self.window_width,
2,
4
)
def curses_stop(self):
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def getSelected(self):
if self.aborted == True:
return( False )
ret_s = [x for x in self.all_options if x["selected"]]
ret = [x["label"] for x in ret_s]
return(ret)
def redraw(self):
self.win.clear()
self.win.border(
self.border[0], self.border[1],
self.border[2], self.border[3],
self.border[4], self.border[5],
self.border[6], self.border[7]
)
self.win.addstr(
self.window_height + 4, 5, " " + self.footer + " "
)
position = 0
range = self.all_options[self.offset:self.offset+self.window_height+1]
for option in range:
if option["selected"] == True:
line_label = self.c_selected + " "
else:
line_label = self.c_empty + " "
if len(option["label"]) > (self.window_width - 20):
reduced = option["label"][:self.window_width - 20] + "..."
else:
reduced = option["label"]
self.win.addstr(position + 2, 5, line_label + reduced)
position = position + 1
# hint for more content above
if self.offset > 0:
self.win.addstr(1, 5, self.more)
# hint for more content below
if self.offset + self.window_height <= self.length - 2:
self.win.addstr(self.window_height + 3, 5, self.more)
self.win.addstr(0, 5, " " + self.title + " ")
self.win.addstr(
0, self.window_width - 8,
" " + str(self.selcount) + "/" + str(self.length) + " "
)
self.win.addstr(self.cursor + 2,1, self.arrow)
self.win.refresh()
def check_cursor_up(self):
if self.cursor < 0:
self.cursor = 0
if self.offset > 0:
self.offset = self.offset - 1
def check_cursor_down(self):
if self.cursor >= self.length:
self.cursor = self.cursor - 1
if self.cursor > self.window_height:
self.cursor = self.window_height
self.offset = self.offset + 1
if self.offset + self.cursor >= self.length:
self.offset = self.offset - 1
def curses_loop(self, stdscr):
while 1:
self.redraw()
c = stdscr.getch()
if c == ord('q') or c == ord('Q'):
self.aborted = True
break
elif c == curses.KEY_UP:
self.cursor = self.cursor - 1
elif c == curses.KEY_DOWN:
self.cursor = self.cursor + 1
#elif c == curses.KEY_PPAGE:
#elif c == curses.KEY_NPAGE:
elif c == ord(' '):
self.all_options[self.selected]["selected"] = \
not self.all_options[self.selected]["selected"]
elif c == 10:
break
# deal with interaction limits
self.check_cursor_up()
self.check_cursor_down()
# compute selected position only after dealing with limits
self.selected = self.cursor + self.offset
temp = self.getSelected()
self.selcount = len(temp)
def __init__(self, options, title='Select', arrow="-->",
footer="Space = toggle, Enter = accept, q = cancel",
more="...", border="||--++++", c_selected="[X]", c_empty="[ ]", checked="[ ]"):
self.title = title
self.arrow = arrow
self.footer = footer
self.more = more
self.border = border
self.c_selected = c_selected
self.c_empty = c_empty
self.all_options = []
for option in options:
self.all_options.append({
"label": option,
"selected": True if (option in checked) else False
})
self.length = len(self.all_options)
self.curses_start()
signal.signal(signal.SIGWINCH, self.sigwinch_handler)
curses.wrapper( self.curses_loop )
self.curses_stop()
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "remakery.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
import dateutil.parser
import dateutil.tz
import feedparser
import re
from datetime import datetime, timedelta
from joblist import JobList
class FilterException(Exception):
pass
class IndeedJobList(JobList):
'''Joblist class for Indeed
This joblist is for the indeed.com rss feed. Indeed has an API,
but it requires registration and is more suited to companies repackaging
their data. The RSS feed works just fine for the kind of search I'm
interested in.
'''
base_url = ('http://www.indeed.{domain}/rss?q={keywords}&l={location}'
'&sort=date&start={offset}')
page_size = 20
def collect_results(self, keywords, location, radius, filter_location=(),
filter_title=(), country='us',
max_results=1000, oldest=None):
'''Collect results for indeed.com (.ca, etc)
The feeds site is "indeed.com/rss?" plus these parameters:
* q: a set of keywords, combined with "+"
* l: the location (a zip code, "city, state", "remote", or just a state)
* sort: "date" or "relevance", I guess
* offset: The rss returns up to 20 results, you can page through them
using this parameter
:param keywords: str A space-separated list of keywords, arguments to
the "q" operator
:param location: str a zip code, "city, state" combination, "remote",
or state code. Argument to "l"
:param radius: int radius around a location. Argument to "r". May use 0
to limit to the location exactly.
:param filter_location: str an iterable of locations to be removed
from results. Any location that contains any of the strings
will be ignored.
:param filter_title: str an iterable of strings to filter titles. A
title will be ignored if it contains any of the strings.
:param country: str A two-letter country code. Defaults to "us", which
will try indeed.com; will try any other code if provided, but there
is no guarantee other codes will be handled well.
:param max_results: int A maximum number of results. The
results may be less than this, but the function will stop
querying if this number is reached.
:param oldest: timedelta Anything older than today - oldest
will be ignored.
:returns: A generator which when called will yield a dict of
the following format:
{
'date': The reported date of the entry,
'id': 'indeed$' + indeed's id for the job entry,
'link': a link to indeed's page about the entry,
'location': the entry's reported location,
'source': the reported author of the post,
'title': the reported title
}
'''
domain = 'com'
if country is not 'us':
domain = country
if oldest is None:
oldest = timedelta(weeks=52)
oldest_cutoff = datetime.now(tz=dateutil.tz.tzlocal()) - oldest
pages = 0
found = 0
cutoff = False
previous = ()
while found < max_results:
# Get a page of feed results (sorted by date), and process
# it until either a date older than *oldest_cutoff*
# appears or all the entries have been processed
offset = pages * self.page_size
feed = feedparser.parse(
self.base_url.format(domain=domain,
keywords=keywords,
location=location,
radius=radius,
offset=offset)
)
new = []
for entry in feed['entries']:
# We've seen this before, skip it.
if entry['id'] in previous:
continue
new.append(entry['id'])
entry_date = dateutil.parser.parse(entry['published'])
if oldest_cutoff > entry_date:
return None
entry_title = entry['title']
entry_location = 'Unspecified'
try:
entry_location = entry_title.split(' - ')[-1]
except IndexError:
pass
try:
for location_filter in filter_location:
if re.search(location_filter, entry_location,
re.IGNORECASE):
raise FilterException
for title_filter in filter_title:
if re.search(title_filter, entry_title,
re.IGNORECASE):
raise FilterException
except FilterException:
continue
found += 1
yield {
'date': entry_date,
'id': 'indeed$' + entry['id'],
'link': entry['link'],
'location': entry_location,
'source': entry['source']['title'],
'title': entry_title,
}
if not new:
# The assumption is that if none of the entries are new,
# indeed is just repeating and the current group
# of jobs is ended
return None
previous = tuple(new)
pages += 1
|
from .config import Config, HentaiHavenConfig, HAnimeConfig, Section
from .last_entry import LastEntry
|
from Tkinter import *
from ScrolledText import ScrolledText
from unicodedata import lookup
import os
class Diacritical:
"""Mix-in class that adds keyboard bindings for accented characters, plus
other common functionality.
An inheriting class must define a select_all method that will respond
to Ctrl-A."""
accents = (('acute', "'"), ('grave', '`'), ('circumflex', '^'),
('tilde', '='), ('diaeresis', '"'), ('cedilla', ','),
('stroke', '/'), ('ring above', ';'))
def __init__(self):
# Fix some key bindings
self.bind("<Control-Key-a>", self.select_all)
# We will need Ctrl-/ for the "stroke", but it cannot be unbound, so
# let's prevent it from being passed to the standard handler
self.bind("<Control-Key-/>", lambda event: "break")
# Diacritical bindings
for a, k in self.accents:
# Little-known feature of Tk, it allows to bind an event to
# multiple keystrokes
self.bind("<Control-Key-%s><Key>" % k,
lambda event, a=a: self.insert_accented(event.char, a))
def insert_accented(self, c, accent):
if c.isalpha():
if c.isupper():
cap = 'capital'
else:
cap = 'small'
try:
c = lookup("latin %s letter %c with %s" % (cap, c, accent))
self.insert(INSERT, c)
# Prevent plain letter from being inserted too, tell Tk to
# stop handling this event
return "break"
except KeyError, e:
pass
class DiacriticalEntry(Entry, Diacritical):
"""Tkinter Entry widget with some extra key bindings for
entering typical Unicode characters - with umlauts, accents, etc."""
def __init__(self, master=None, **kwargs):
Entry.__init__(self, master=None, **kwargs)
Diacritical.__init__(self)
def select_all(self, event=None):
self.selection_range(0, END)
return "break"
class DiacriticalText(ScrolledText, Diacritical):
"""Tkinter ScrolledText widget with some extra key bindings for
entering typical Unicode characters - with umlauts, accents, etc."""
def __init__(self, master=None, **kwargs):
ScrolledText.__init__(self, master=None, **kwargs)
Diacritical.__init__(self)
def select_all(self, event=None):
self.tag_add(SEL, "1.0", "end-1c")
self.mark_set(INSERT, "1.0")
self.see(INSERT)
return "break"
def test():
frame = Frame()
frame.pack(fill=BOTH, expand=YES)
if os.name == "nt":
# Set default font for all widgets; use Windows typical default
frame.option_add("*font", "Tahoma 8")
# The editors
entry = DiacriticalEntry(frame)
entry.pack(fill=BOTH, expand=YES)
text = DiacriticalText(frame, width=76, height=25, wrap=WORD)
if os.name == "nt":
# But this looks better than the default set above
text.config(font="Arial 10")
text.pack(fill=BOTH, expand=YES)
text.focus()
frame.master.title("Diacritical Editor")
frame.mainloop()
if __name__ == "__main__":
test()
|
import base64
import json
import requests
def call_vision_api(image_filename, api_keys):
api_key = api_keys['microsoft']
post_url = "https://api.projectoxford.ai/vision/v1.0/analyze?visualFeatures=Categories,Tags,Description,Faces,ImageType,Color,Adult&subscription-key=" + api_key
image_data = open(image_filename, 'rb').read()
result = requests.post(post_url, data=image_data, headers={'Content-Type': 'application/octet-stream'})
result.raise_for_status()
return result.text
def get_standardized_result(api_result):
output = {
'tags' : [],
'captions' : [],
}
for tag_data in api_result['tags']:
output['tags'].append((tag_data['name'], tag_data['confidence']))
for caption in api_result['description']['captions']:
output['captions'].append((caption['text'], caption['confidence']))
return output
|
from django.db import models
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from django.conf import settings
class Repository(models.Model):
"""
Git repository
"""
# basic info
name = models.CharField(
max_length=64,
validators=[RegexValidator(regex=r'^[^\x00-\x2c\x2f\x3a-\x40\x5b-\x5e\x60\x7b-\x7f\s]+$')],
verbose_name=_('name'),
help_text=_('Name of the repository, cannot contain special characters other than hyphens.'),
)
description = models.TextField(blank=True, verbose_name=_('description'))
# owner
user = models.ForeignKey(
User,
blank=True,
null=True,
related_name='repositories',
on_delete=models.SET_NULL,
verbose_name=_('user'),
help_text=_('Owner of the repository. Repository path will be prefixed by owner\'s username.'),
)
# access control
users = models.ManyToManyField(
User,
blank=True,
verbose_name=_('users'),
help_text=_('These users have right access to the repository.'),
)
groups = models.ManyToManyField(
Group,
blank=True,
verbose_name=_('groups'),
help_text=_('Users in these groups have right access to the repository.'),
)
is_private = models.BooleanField(
default=True,
verbose_name=_('is private'),
help_text=_('Restrict read access to specified users and groups.'),
)
# meta
created = models.DateTimeField(auto_now_add=True, verbose_name=_('created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('modified'))
class Meta:
verbose_name = _('repository')
verbose_name_plural = _('repositories')
ordering = ['user', 'name']
unique_together = ['user', 'name']
def __unicode__(self):
if self.user:
return u'%s/%s' % (self.user.username, self.name)
return u'./%s' % (self.name)
def can_read(self, user):
if not user and settings.PROTECTED:
return False
if not self.is_private:
return True
return self.can_write(user)
def can_write(self, user):
if not user:
return False
if user.id == self.user_id:
return True
if self.users.filter(pk=user.id).exists():
return True
if self.groups.filter(user__pk=user.id).exists():
return True
return False
|
import re
def read_lua():
PATTERN = r'\s*\[(?P<id>\d+)\] = {\s*unidentifiedDisplayName = ' \
r'"(?P<unidentifiedDisplayName>[^"]+)",\s*unidentifie' \
r'dResourceName = "(?P<unidentifiedResourceName>[^"]+' \
r')",\s*unidentifiedDescriptionName = {\s*"(?P<uniden' \
r'tifiedDescriptionName>[^=]+)"\s*},\s*identifiedDisp' \
r'layName = "(?P<identifiedDisplayName>[\S ]+)",\s*id' \
r'entifiedResourceName = "(?P<identifiedResourceName>' \
r'[\S ]+)",\s*identifiedDescriptionName = {\s*"(?P<id' \
r'entifiedDescriptionName>[^=]+)"\s*},\s*slotCount = ' \
r'(?P<slotCount>\d{1}),\s*ClassNum = (?P<ClassNum>\d{' \
r'1})\s*}'
PATTERN = re.compile(PATTERN)
with open('testcase.txt', encoding='utf8', errors='ignore') as file:
test = PATTERN.findall(file.read())
for item in test:
if item[0] == '502':
print(item)
print(len(test))
return 0
"""
for group in test.groupdict():
for k, v in group.items():
print(k + ' : ' + v)
print()
"""
read_lua()
|
from django.apps import AppConfig
class JcvrbaseappConfig(AppConfig):
name = 'jcvrbaseapp'
|
import time
import random
import unittest
from qiniuManager.progress import *
class Pro(object):
def __init__(self):
self.progressed = 0
self.total = 100
self.title = 'test'
self.chunked = False
self.chunk_recved = 0
self.start = time.time()
@bar(100, '=')
def loader(self):
self._normal_loader()
self.title = "固定长度"
@bar(fill='x')
def loader_x(self):
self._normal_loader()
self.title = "x"
@bar()
def auto_loader(self):
self._normal_loader()
self.title = "长度占满宽度"
def _normal_loader(self):
time.sleep(0.01)
self.progressed += 1
def _chunked_loader(self):
self.chunked = True
time.sleep(0.01)
self.chunk_recved += random.randrange(3, 1000000)
if time.time() - self.start > 5:
self.progressed = self.total
@bar()
def chunked_loader(self):
self._chunked_loader()
self.title = "full width"
@bar(100)
def fixed_chunked_loader(self):
self._chunked_loader()
self.title = "fixed width"
class ProgressTester(unittest.TestCase):
def test_100_progress(self):
print("进度条换行")
Pro().loader()
Pro().loader_x()
def test_auto_width_progress(self):
print("进度条换行")
Pro().auto_loader()
def test_disable_progress(self):
pro = Pro()
pro.disable_progress = True
pro.title = "无进度条,也就是说应该看不到这串字符才对"
pro.loader()
def test_chunked_progress(self):
print("进度条换行")
Pro().chunked_loader()
def test_fixed_chunked_progress(self):
print("进度条换行")
Pro().fixed_chunked_loader()
if __name__ == '__main__':
unittest.main(verbosity=2)
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.forms.widgets
class Migration(migrations.Migration):
dependencies = [
('sshcomm', '0002_auto_20170118_1702'),
]
operations = [
migrations.AlterField(
model_name='userdata',
name='user_name',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='userdata',
name='user_password',
field=models.CharField(max_length=128, verbose_name=django.forms.widgets.PasswordInput),
),
]
|
from bson.objectid import ObjectId
import json
class Room():
def __init__(self, players_num, objectid, table, current_color='purple'):
if players_num:
self.players_num = players_num
else:
self.players_num = 0
for el in ['p', 'b', 'g', 'r']:
if el in table:
self.players_num += 1
self.objectid = objectid
self.current_color = current_color
self.players_dict = {}
self.alredy_ex = []
self.colors = []
self.winner = None
for col in ['p', 'b', 'g', 'r']:
if col in table:
self.colors.append(
{'p': 'purple',
'b': 'blue',
'g': 'green',
'r': 'red'}[col])
if current_color in self.colors:
self.current_color = current_color
else:
self.current_color = self.colors[0]
self.users_nicks = {}
self.color_player_dict = {'purple': None, 'blue': None, 'green': None, 'red': None}
self.player_color_dict = {}
self.status = 'waiting'
def get_player_by_color(self, color):
if color in self.color_player_dict:
return self.players_dict[self.color_player_dict[color]]
return None
def get_color_by_player(self, player_id):
if player_id in self.player_color_dict:
return self.player_color_dict[player_id]
return None
def add_player(self, player_id, name):
self.players_dict[player_id] = False
self.users_nicks[player_id] = name
for color in self.colors:
if not self.color_player_dict[color]:
self.color_player_dict[color] = player_id
self.player_color_dict[player_id] = color
break
def dell_player(self, player_id):
self.players_dict[player_id] = False
return self
def change_row(self, row, i, to):
return row[:i] + to + row[i + 1:]
def update_table(self, move, table):
print('Table updating')
pymove = json.loads(move)
pytable = json.loads(table)
print('Old table:')
for row in pytable:
print(' ', row)
x0, y0 = int(pymove['X0']), int(pymove['Y0'])
x1, y1 = int(pymove['X1']), int(pymove['Y1'])
print('Move from ({}, {}) to ({}, {})'.format(x0, y0, x1, y1))
if ((abs(x1 - x0) > 1) or (abs(y1 - y0) > 1)):
pytable[x0] = self.change_row(pytable[x0], y0, 'e')
for i in range(-1, 2):
for j in range(-1, 2):
if (x1 + i < len(pytable)) and (x1 + i > -1):
if (y1 + j < len(pytable[x1])) and (y1 + j > -1):
if pytable[x1 + i][y1 + j] != 'e':
pytable[x1 + i] = self.change_row(pytable[x1 + i], y1 + j, self.current_color[0].lower())
pytable[x1] = self.change_row(pytable[x1], y1, self.current_color[0].lower())
res = json.dumps(pytable)
if 'e' not in res:
r_count = (res.count('r'), 'red')
b_count = (res.count('b'), 'blue')
g_count = (res.count('g'), 'green')
p_count = (res.count('p'), 'purple')
sort_list = [r_count, b_count, p_count, g_count]
sort_list.sort()
self.winner = sort_list[-1][1]
print('New table:')
for row in pytable:
print(' ', row)
return res
def can_move(self, table):
pytable = json.loads(table)
for row_id, row in enumerate(pytable):
for char_id in range(len(row)):
char = row[char_id]
if char == self.current_color[0].lower():
for i in range(-2, 3):
for j in range(-2, 3):
if (row_id + i < len(pytable)) and (row_id + i > -1):
if (char_id + j < len(row)) and (char_id + j > -1):
if pytable[row_id + i][char_id + j] == 'e':
return True
return False
def change_color(self, table):
print('Сolor changing')
colors = self.colors
self.current_color = colors[
(colors.index(self.current_color) + 1) % self.players_num]
i = 1
while ((not self.players_dict[self.color_player_dict[self.current_color]]) or (not self.can_move(table))) and (i <= 5):
self.current_color = colors[
(colors.index(self.current_color) + 1) % self.players_num]
i += 1
if not self.can_move(table):
return None
return self.current_color
class RoomsManager():
def __init__(self, db):
# dict of rooms by their obj_id
self.db = db
self.rooms_dict = {}
def get_room(self, objectid):
if objectid not in self.rooms_dict:
rid = objectid
room_in_db = self.db.rooms.find_one({'_id': ObjectId(rid)})
if room_in_db:
print('Room', objectid, 'extrapolated from db')
new_room = Room(
int(room_in_db['players_num']), rid, room_in_db['table'])
new_room.current_color = room_in_db['current_color']
for user_id in room_in_db['players']:
player = room_in_db['players'][user_id]
new_room.color_player_dict[player['color']] = user_id
new_room.player_color_dict[user_id] = player['color']
new_room.users_nicks[user_id] = player['nick']
new_room.players_dict[user_id] = None
self.rooms_dict[rid] = new_room
else:
return None
return self.rooms_dict[objectid]
def add_room(self, room):
self.rooms_dict[room.objectid] = room
def rooms(self):
for objectid in self.rooms_dict:
yield self.rooms_dict[objectid]
|
class Solution:
def countSegments(self, s: 'str') -> 'int':
return len(s.split())
|
import datetime
from sqlalchemy import create_engine, ForeignKey, Column, Integer, String, Text, Date, Table, Boolean
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from . import app
from flask_login import UserMixin
engine = create_engine(app.config["SQLALCHEMY_DATABASE_URI"])
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
class Fighter(Base):
__tablename__ = "fighters"
id = Column(Integer, primary_key=True)
first_name = Column(String(1024), nullable=False)
last_name = Column(String(1024), nullable=False)
nickname = Column(String(1024))
gender = Column(String(128), nullable=False)
dob = Column(Date)
age = Column(Integer)
promotion = Column(String(1024), nullable=False)
profile_image = Column(String(1024))
right_full = Column(String(1024))
left_full = Column(String(1024))
height = Column(Integer)
weight = Column(String(128), nullable=False)
win = Column(Integer, nullable=False)
loss = Column(Integer, nullable=False)
draw = Column(Integer)
no_contest = Column(Integer)
def as_dictionary(self):
fighter = {
"id": self.id,
"first_name": self.first_name,
"last_name": self.last_name,
"nickname": self.nickname,
"gender": self.gender,
"age": self.age,
"promotion": self.promotion,
"profile_image": self.profile_image,
"right_full": self.right_full,
"left_full": self.left_full,
"height": self.height,
"weight": self.weight,
"win": self.win,
"loss": self.loss,
"draw": self.draw,
"no_contest": self.no_contest,
}
return fighter
class User(Base, UserMixin):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
email = Column(String(1024), unique=True, nullable=False)
password = Column(String(128), nullable=False)
user_history = relationship("History", backref="user")
class History(Base):
__tablename__ = "history"
id = Column(Integer, primary_key=True)
fight_date = Column(String, nullable=False)
has_occured = Column(Boolean, nullable=False)
red_corner = Column(String(1024), nullable=False)
blue_corner = Column(String(1024), nullable=False)
winner = Column(String(1024))
end_round = Column(String, nullable=False)
end_time = Column(String, nullable=False)
method = Column(String, nullable=False)
visible = Column(Boolean, nullable=False)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
def as_dictionary(self):
results = {
"id": self.id,
"fight_date": self.fight_date,
"has_occured": self.has_occured,
"red_corner": self.red_corner,
"blue_corner": self.blue_corner,
"winner": self.winner,
"end_round": self.end_round,
"end_time": self.end_time,
"method": self.method,
"user_id": self.user_id,
}
return results
class Event(Base):
__tablename__ = "events"
id = Column(Integer, primary_key=True)
event_date = Column(String(256))
base_title = Column(String(1024), nullable=False)
title_tag_line = Column(String(1024))
#feature_image = Column(String(1024))
arena = Column(String(1024))
location = Column(String(1024))
event_id = Column(Integer)
def as_dictionary(self):
event = {
"id": self.id,
"event_date": self.event_date,
"base_title": self.base_title,
"title_tag_line": self.title_tag_line,
#"feature_image": self.feature_image,
"arena": self.arena,
"location": self.location,
"event_id": self.event_id
}
return event
Base.metadata.create_all(engine)
|
"""Pyramidal bidirectional LSTM encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class PyramidBLSTMEncoder(object):
"""Pyramidal bidirectional LSTM Encoder.
Args:
num_units (int): the number of units in each layer
num_layers (int): the number of layers
lstm_impl (string, optional): BasicLSTMCell or LSTMCell or
LSTMBlockCell or LSTMBlockFusedCell or CudnnLSTM.
Choose the background implementation of tensorflow.
Default is LSTMBlockCell.
use_peephole (bool, optional): if True, use peephole
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
clip_activation (float, optional): the range of activation clipping (> 0)
# num_proj (int, optional): the number of nodes in the projection layer
concat (bool, optional):
name (string, optional): the name of encoder
"""
def __init__(self,
num_units,
num_layers,
lstm_impl,
use_peephole,
parameter_init,
clip_activation,
num_proj,
concat=False,
name='pblstm_encoder'):
assert num_proj != 0
assert num_units % 2 == 0, 'num_unit should be even number.'
self.num_units = num_units
self.num_proj = None
self.num_layers = num_layers
self.lstm_impl = lstm_impl
self.use_peephole = use_peephole
self.parameter_init = parameter_init
self.clip_activation = clip_activation
self.name = name
def _build(self, inputs, inputs_seq_len, keep_prob, is_training):
"""Construct Pyramidal Bidirectional LSTM encoder.
Args:
inputs (placeholder): A tensor of size`[B, T, input_size]`
inputs_seq_len (placeholder): A tensor of size` [B]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states, a tensor of size
`[T, B, num_units (num_proj)]`
final_state: A final hidden state of the encoder
"""
initializer = tf.random_uniform_initializer(
minval=-self.parameter_init, maxval=self.parameter_init)
# Hidden layers
outputs = inputs
for i_layer in range(1, self.num_layers + 1, 1):
with tf.variable_scope('pblstm_hidden' + str(i_layer),
initializer=initializer) as scope:
lstm_fw = tf.contrib.rnn.LSTMCell(
self.num_units,
use_peepholes=self.use_peephole,
cell_clip=self.clip_activation,
initializer=initializer,
num_proj=None,
forget_bias=1.0,
state_is_tuple=True)
lstm_bw = tf.contrib.rnn.LSTMCell(
self.num_units,
use_peepholes=self.use_peephole,
cell_clip=self.clip_activation,
initializer=initializer,
num_proj=self.num_proj,
forget_bias=1.0,
state_is_tuple=True)
# Dropout for the hidden-hidden connections
lstm_fw = tf.contrib.rnn.DropoutWrapper(
lstm_fw, output_keep_prob=keep_prob)
lstm_bw = tf.contrib.rnn.DropoutWrapper(
lstm_bw, output_keep_prob=keep_prob)
if i_layer > 0:
# Convert to time-major: `[T, B, input_size]`
outputs = tf.transpose(outputs, (1, 0, 2))
max_time = tf.shape(outputs)[0]
max_time_half = tf.floor(max_time / 2) + 1
# Apply concat_fn to each tensor in outputs along
# dimension 0 (times-axis)
i_time = tf.constant(0)
final_time, outputs, tensor_list = tf.while_loop(
cond=lambda t, hidden, tensor_list: t < max_time,
body=lambda t, hidden, tensor_list: self._concat_fn(
t, hidden, tensor_list),
loop_vars=[i_time, outputs, tf.Variable([])],
shape_invariants=[i_time.get_shape(),
outputs.get_shape(),
tf.TensorShape([None])])
outputs = tf.stack(tensor_list, axis=0)
inputs_seq_len = tf.cast(tf.floor(
tf.cast(inputs_seq_len, tf.float32) / 2),
tf.int32)
# Transpose to `[batch_size, time, input_size]`
outputs = tf.transpose(outputs, (1, 0, 2))
(outputs_fw, outputs_bw), final_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_fw,
cell_bw=lstm_bw,
inputs=outputs,
sequence_length=inputs_seq_len,
dtype=tf.float32,
time_major=False,
scope=scope)
# NOTE: initial states are zero states by default
# Concatenate each direction
outputs = tf.concat(axis=2, values=[outputs_fw, outputs_bw])
return outputs, final_state
def _concat_fn(self, current_time, x, tensor_list):
"""Concatenate each 2 time steps to reduce time resolution.
Args:
current_time: The current timestep
x: A tensor of size `[max_time, batch_size, feature_dim]`
result: A tensor of size `[t, batch_size, feature_dim * 2]`
Returns:
current_time: current_time + 2
x: A tensor of size `[max_time, batch_size, feature_dim]`
result: A tensor of size `[t + 1, batch_size, feature_dim * 2]`
"""
print(tensor_list)
print(current_time)
print('-----')
batch_size = tf.shape(x)[1]
feature_dim = x.get_shape().as_list()[2]
# Concat features in 2 timesteps
concat_x = tf.concat(
axis=0,
values=[tf.reshape(x[current_time],
shape=[1, batch_size, feature_dim]),
tf.reshape(x[current_time + 1],
shape=[1, batch_size, feature_dim])])
# Reshape to `[1, batch_size, feature_dim * 2]`
concat_x = tf.reshape(concat_x,
shape=[1, batch_size, feature_dim * 2])
tensor_list = tf.concat(axis=0, values=[tensor_list, [concat_x]])
# Skip 2 timesteps
current_time += 2
return current_time, x, tensor_list
|
import numpy as np
import pandas as pd
arts = pd.DataFrame()
arts["execution_date"] = arts["execution_date"].str.findall(r"([0-9]+)").str[0]
arts["execution_date"] = arts["execution_date"].astype(float)
arts.head()
arts["execution_date"] = arts["execution_date"].apply(lambda x: 1900 + x if x < 100 else x)
arts.head()
arts.groupby("artist_name").mean().head()
arts.groupby("category").mean().head()
artworks_by_artist = arts.groupby("artist_name")[["title"]].aggregate(np.count_nonzero)
artworks_by_artist.sort("title", ascending=False).head()
artworks_by_category = arts.groupby("category")[["title"]].aggregate(np.count_nonzero)
artworks_by_category.sort("title", ascending=False).head()
arts['title_length'] = arts['title'].str.len()
length_by_category = arts.groupby("category")[["title_length"]].aggregate(np.mean)
length_by_category.sort("title_length", ascending=False).head()
artworks_by_year = arts.groupby("execution_date")[["title"]].aggregate(np.count_nonzero)
artworks_by_year.sort("title", ascending=False).head()
period_min = arts.groupby("artist_name")[['execution_date']].aggregate(np.min)
period_max = arts.groupby("artist_name")[['execution_date']].aggregate(np.max)
(period_max - period_min).sort("execution_date", ascending=False).head()
|
import os
from .base import Output
class AppleSay(Output):
"""Speech output supporting the Apple Say subsystem."""
name = 'Apple Say'
def __init__(self, voice = 'Alex', rate = '300'):
self.voice = voice
self.rate = rate
super(AppleSay, self).__init__()
def is_active(self):
return not os.system('which say')
def speak(self, text, interrupt = 0):
if interrupt:
self.silence()
os.system('say -v %s -r %s "%s" &' % (self.voice, self.rate, text))
def silence(self):
os.system('killall say')
output_class = AppleSay
|
from jupyter_server.utils import url_path_join as ujoin
from .config import Lmod as LmodConfig
from .handler import default_handlers, PinsHandler
def _jupyter_server_extension_paths():
return [{"module": "jupyterlmod"}]
def _jupyter_nbextension_paths():
return [
dict(
section="tree", src="static", dest="jupyterlmod", require="jupyterlmod/main"
)
]
def load_jupyter_server_extension(nbapp):
"""
Called when the extension is loaded.
Args:
nbapp : handle to the Notebook webserver instance.
"""
nbapp.log.info("Loading lmod extension")
lmod_config = LmodConfig(parent=nbapp)
launcher_pins = lmod_config.launcher_pins
web_app = nbapp.web_app
base_url = web_app.settings["base_url"]
for path, class_ in default_handlers:
web_app.add_handlers(".*$", [(ujoin(base_url, path), class_)])
web_app.add_handlers(".*$", [
(ujoin(base_url, 'lmod/launcher-pins'), PinsHandler, {'launcher_pins': launcher_pins}),
])
|
from models import Event
from django.views.generic import DetailView, ListView
class EventListView(ListView):
template_name = 'agenda/event_list.html'
queryset = Event.objects.upcoming()
paginate_by = 20
class EventArchiveview(EventListView):
queryset = Event.objects.past()
class EventDetailView(DetailView):
model = Event
template_name = 'agenda/event_detail.html'
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Prueba',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
]
|
"""
Download NLTK data
"""
__author__ = "Manan Kalra"
__email__ = "manankalr29@gmail.com"
import nltk
nltk.download()
|
import numpy as np
import ctypes
import struct
import time
from .ioctl_numbers import _IOR, _IOW
from fcntl import ioctl
SPI_IOC_MAGIC = ord("k")
SPI_IOC_RD_MODE = _IOR(SPI_IOC_MAGIC, 1, "=B")
SPI_IOC_WR_MODE = _IOW(SPI_IOC_MAGIC, 1, "=B")
SPI_IOC_RD_LSB_FIRST = _IOR(SPI_IOC_MAGIC, 2, "=B")
SPI_IOC_WR_LSB_FIRST = _IOW(SPI_IOC_MAGIC, 2, "=B")
SPI_IOC_RD_BITS_PER_WORD = _IOR(SPI_IOC_MAGIC, 3, "=B")
SPI_IOC_WR_BITS_PER_WORD = _IOW(SPI_IOC_MAGIC, 3, "=B")
SPI_IOC_RD_MAX_SPEED_HZ = _IOR(SPI_IOC_MAGIC, 4, "=I")
SPI_IOC_WR_MAX_SPEED_HZ = _IOW(SPI_IOC_MAGIC, 4, "=I")
SPI_CPHA = 0x01 # /* clock phase */
SPI_CPOL = 0x02 # /* clock polarity */
SPI_MODE_0 = (0|0) # /* (original MicroWire) */
SPI_MODE_1 = (0|SPI_CPHA)
SPI_MODE_2 = (SPI_CPOL|0)
SPI_MODE_3 = (SPI_CPOL|SPI_CPHA)
class Lepton(object):
"""Communication class for FLIR Lepton module on SPI
Args:
spi_dev (str): Location of SPI device node. Default '/dev/spidev0.0'.
"""
ROWS = 60
COLS = 80
VOSPI_FRAME_SIZE = COLS + 2
VOSPI_FRAME_SIZE_BYTES = VOSPI_FRAME_SIZE * 2
MODE = SPI_MODE_3
BITS = 8
SPEED = 18000000
SPIDEV_MESSAGE_LIMIT = 24
def __init__(self, spi_dev = "/dev/spidev0.0"):
self.__spi_dev = spi_dev
self.__txbuf = np.zeros(Lepton.VOSPI_FRAME_SIZE, dtype=np.uint16)
# struct spi_ioc_transfer {
# __u64 tx_buf;
# __u64 rx_buf;
# __u32 len;
# __u32 speed_hz;
# __u16 delay_usecs;
# __u8 bits_per_word;
# __u8 cs_change;
# __u32 pad;
# };
self.__xmit_struct = struct.Struct("=QQIIHBBI")
self.__msg_size = self.__xmit_struct.size
self.__xmit_buf = np.zeros((self.__msg_size * Lepton.ROWS), dtype=np.uint8)
self.__msg = _IOW(SPI_IOC_MAGIC, 0, self.__xmit_struct.format)
self.__capture_buf = np.zeros((Lepton.ROWS, Lepton.VOSPI_FRAME_SIZE, 1), dtype=np.uint16)
for i in range(Lepton.ROWS):
self.__xmit_struct.pack_into(self.__xmit_buf, i * self.__msg_size,
self.__txbuf.ctypes.data, # __u64 tx_buf;
self.__capture_buf.ctypes.data + Lepton.VOSPI_FRAME_SIZE_BYTES * i, # __u64 rx_buf;
Lepton.VOSPI_FRAME_SIZE_BYTES, # __u32 len;
Lepton.SPEED, # __u32 speed_hz;
0, # __u16 delay_usecs;
Lepton.BITS, # __u8 bits_per_word;
1, # __u8 cs_change;
0) # __u32 pad;
def __enter__(self):
# "In Python 3 the only way to open /dev/tty under Linux appears to be 1) in binary mode and 2) with buffering disabled."
self.__handle = open(self.__spi_dev, "wb+", buffering=0)
ioctl(self.__handle, SPI_IOC_RD_MODE, struct.pack("=B", Lepton.MODE))
ioctl(self.__handle, SPI_IOC_WR_MODE, struct.pack("=B", Lepton.MODE))
ioctl(self.__handle, SPI_IOC_RD_BITS_PER_WORD, struct.pack("=B", Lepton.BITS))
ioctl(self.__handle, SPI_IOC_WR_BITS_PER_WORD, struct.pack("=B", Lepton.BITS))
ioctl(self.__handle, SPI_IOC_RD_MAX_SPEED_HZ, struct.pack("=I", Lepton.SPEED))
ioctl(self.__handle, SPI_IOC_WR_MAX_SPEED_HZ, struct.pack("=I", Lepton.SPEED))
return self
def __exit__(self, type, value, tb):
self.__handle.close()
@staticmethod
def capture_segment(handle, xs_buf, xs_size, capture_buf):
messages = Lepton.ROWS
iow = _IOW(SPI_IOC_MAGIC, 0, xs_size)
ioctl(handle, iow, xs_buf, True)
while (capture_buf[0] & 0x000f) == 0x000f: # byteswapped 0x0f00
ioctl(handle, iow, xs_buf, True)
messages -= 1
# NB: the default spidev bufsiz is 4096 bytes so that's where the 24 message limit comes from: 4096 / Lepton.VOSPI_FRAME_SIZE_BYTES = 24.97...
# This 24 message limit works OK, but if you really need to optimize the read speed here, this hack is for you:
# The limit can be changed when spidev is loaded, but since it is compiled statically into newer raspbian kernels, that means
# modifying the kernel boot args to pass this option. This works too:
# $ sudo chmod 666 /sys/module/spidev/parameters/bufsiz
# $ echo 65536 > /sys/module/spidev/parameters/bufsiz
# Then Lepton.SPIDEV_MESSAGE_LIMIT of 24 can be raised to 59
while messages > 0:
if messages > Lepton.SPIDEV_MESSAGE_LIMIT:
count = Lepton.SPIDEV_MESSAGE_LIMIT
else:
count = messages
iow = _IOW(SPI_IOC_MAGIC, 0, xs_size * count)
ret = ioctl(handle, iow, xs_buf[xs_size * (60 - messages):], True)
if ret < 1:
raise IOError("can't send {0} spi messages ({1})".format(60, ret))
messages -= count
def capture(self, data_buffer = None, log_time = False, debug_print = False, retry_reset = True):
"""Capture a frame of data.
Captures 80x60 uint16 array of non-normalized (raw 12-bit) data. Returns that frame and a frame_id (which
is currently just the sum of all pixels). The Lepton will return multiple, identical frames at a rate of up
to ~27 Hz, with unique frames at only ~9 Hz, so the frame_id can help you from doing additional work
processing duplicate frames.
Args:
data_buffer (numpy.ndarray): Optional. If specified, should be ``(60,80,1)`` with `dtype`=``numpy.uint16``.
Returns:
tuple consisting of (data_buffer, frame_id)
"""
start = time.time()
if data_buffer is None:
data_buffer = np.ndarray((Lepton.ROWS, Lepton.COLS, 1), dtype=np.uint16)
elif data_buffer.ndim < 2 or data_buffer.shape[0] < Lepton.ROWS or data_buffer.shape[1] < Lepton.COLS or data_buffer.itemsize < 2:
raise Exception("Provided input array not large enough")
while True:
Lepton.capture_segment(self.__handle, self.__xmit_buf, self.__msg_size, self.__capture_buf[0])
if retry_reset and (self.__capture_buf[20, 0] & 0xFF0F) != 0x1400: # make sure that this is a well-formed frame, should find line 20 here
# Leave chip select deasserted for at least 185 ms to reset
if debug_print:
print("Garbage frame number reset waiting...")
time.sleep(0.185)
else:
break
self.__capture_buf.byteswap(True)
data_buffer[:,:] = self.__capture_buf[:,2:]
end = time.time()
if debug_print:
print("---")
for i in range(Lepton.ROWS):
fid = self.__capture_buf[i, 0, 0]
crc = self.__capture_buf[i, 1, 0]
fnum = fid & 0xFFF
print("0x{0:04x} 0x{1:04x} : Row {2:2} : crc={1}".format(fid, crc, fnum))
print("---")
if log_time:
print("frame processed int {0}s, {1}hz".format(end-start, 1.0/(end-start)))
# TODO: turn on telemetry to get real frame id, sum on this array is fast enough though (< 500us)
return data_buffer, data_buffer.sum()
|
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from scipy.interpolate import interp1d,splev,splrep
def extractSpectrum(filename):
"""
NAME:
extractSpectrum
PURPOSE:
To open an input fits file from SDSS and extract the relevant
components, namely the flux and corresponding wavelength.
INPUTS:
filename The path and filename (including the extension) to the
file to be read in.
OUTPUTS:
lam The wavelengths, in angstrom, of the flux values
flux The actual flux, in arbitrary units
EXAMPLE:
flux, lam = extractSpectra('path/to/file/filename.fits')
"""
hdu = fits.open(filename) #Open the file using astropy
data = hdu[1].data #Data is in 2nd component of HDU
flux = data['flux'] #Get flux from read in dict
lam = 10**(data['loglam']) #Get wavelength, make it not log10
hdu.close() #Close the file, we're done with it
return lam, flux #Return the values as numpy arrays
def interpolate(points, lam, flux, method):
"""
NAME:
interpolate
PURPOSE:
General purpose function that can call and use various scipy.interpolate
methods. Defined for convienience.
INPUTS:
points Set of new points to get interpolated values for.
lam The wavelengths of the data points
flux The fluxes of the data points
method The method of interpolation to use. Valide values include
'interp1d:linear', 'interp1d:quadratic', and 'splrep'.
OUTPUTS:
Interpolated set of values for each corresponding input point.
EXAMPLE:
interpFlux = interpolate(interpLam, lam, flux)
"""
if method == 'interp1d:linear':
f = interp1d(lam, flux, assume_sorted = True)
return f(points)
if method == 'interp1d:quadratic':
f = interp1d(lam, flux, kind = 'quadratic', assume_sorted = True)
return f(points)
if method == 'splrep':
return splev(points, splrep(lam, flux))
raise Exception("You didn't choose a proper interpolating method")
fileName = 'spec-4053-55591-0938.fits'
lam, flux = extractSpectrum(fileName)
plt.figure(1)
plt.plot(lam, flux, '-o', lw = 1.5, c = (0.694,0.906,0.561),
mec = 'none', ms = 4, label = 'Original data')
plt.xlabel('Wavelength', fontsize = 16)
plt.ylabel('Flux', fontsize = 16)
plt.ylim(0,1.1*max(flux))
interpLam = np.arange(4000,10000,1)
interpFlux = interpolate(interpLam, lam, flux, 'splrep') #This is my own method
plt.plot(interpLam, interpFlux, '-k', label = 'Interpolated')
plt.legend(loc = 0)
plt.show(block = False)
print('Done...')
|
import unittest
import random
from pygraph.classes.graph import graph
class SWIM(object):
def __init__(self, graph):
self.graph = graph
def edge_alive(self, nodeA, nodeB, alive):
'''
edge_alive(A, B, True|False)
'''
edge = (nodeA, nodeB)
if alive:
self.graph.add_edge(edge)
else:
self.graph.del_edge(edge)
def node_alive(self, node, alive):
'''
node_alive(A, True|False)
'''
if alive:
self.graph.node_attributes(node).clear()
else:
self.graph.node_attributes(node).append("dead")
def ping(self, nodeStart, nodeEnd, k):
'''
NodeStart to ping NodeEnd directly or indirectly through
K random neighbors. Return True if nodeEnd receives ping,
or False otherwise
'''
g = self.graph
# Check if direct ping works
if g.has_edge((nodeStart, nodeEnd)) and \
"dead" not in g.node_attributes(nodeEnd):
return True
# Pick k random neighbors and let them ping end node
for neighbor in self._random_neighbors(nodeStart, k):
if self.ping(neighbor, nodeEnd, 0):
return True
# All pings have failed
return False
def _random_neighbors(self, node, b):
neighbors = self.graph.neighbors(node)
if len(neighbors) <= b:
return neighbors
else:
return random.sample(neighbors, b)
class SWIMTest(unittest.TestCase):
def setUp(self):
g = graph()
g.add_nodes(xrange(10))
g.complete()
self.graph = g
self.swim = SWIM(g)
def test_good_ping(self):
swim = self.swim
self.assertTrue(swim.ping(0, 1, 0))
self.assertTrue(swim.ping(1, 3, 0))
def test_dead_edge_ping(self):
swim = self.swim
swim.edge_alive(0, 1, False)
self.assertFalse(swim.ping(0, 1, 0))
self.assertTrue(swim.ping(0, 1, 1))
def test_dead_node_ping(self):
swim = self.swim
swim.node_alive(2, False)
self.assertFalse(swim.ping(0, 2, 0))
self.assertFalse(swim.ping(0, 2, 3))
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from astropy.coordinates import EarthLocation, SkyCoord
__all__ = ['MWA_LOC', 'MWA_FIELD_EOR0', 'MWA_FIELD_EOR1', 'MWA_FIELD_EOR2',
'MWA_FREQ_EOR_ALL_40KHZ', 'MWA_FREQ_EOR_ALL_80KHZ',
'MWA_FREQ_EOR_HI_40KHZ', 'MWA_FREQ_EOR_HI_80KHZ',
'MWA_FREQ_EOR_LOW_40KHZ', 'MWA_FREQ_EOR_LOW_80KHZ',
'HERA_ANT_DICT', 'F21']
F21 = 1420.405751786e6
MWA_LOC = EarthLocation(lat='−26d42m11.94986s', lon='116d40m14.93485s',
height=377.827)
MWA_FIELD_EOR0 = SkyCoord(ra='0.0h', dec='-30.0d')
MWA_FIELD_EOR1 = SkyCoord(ra='4.0h', dec='-30.0d')
MWA_FIELD_EOR2 = SkyCoord(ra='10.33h', dec='-10.0d')
MWA_FREQ_EOR_LOW_40KHZ = np.arange(138.895, 167.055, 0.04)
MWA_FREQ_EOR_HI_40KHZ = np.arange(167.055, 195.255, 0.04)
MWA_FREQ_EOR_ALL_40KHZ = np.arange(138.895, 195.255, 0.04)
MWA_FREQ_EOR_LOW_80KHZ = np.arange(138.915, 167.075, 0.08)
MWA_FREQ_EOR_HI_80KHZ = np.arange(167.075, 195.275, 0.08)
MWA_FREQ_EOR_ALL_80KHZ = np.arange(138.915, 195.275, 0.08)
HERA_ANT_DICT = {'hera19': 3, 'hera37': 4, 'hera61': 5, 'hera91': 6,
'hera127': 7, 'hera169': 8, 'hera217': 9, 'hera271': 10,
'hera331': 11}
|
__all__ = ['LEAGUE_PROPERTIES']
LEAGUE_PROPERTIES = {
"PL": {
"rl": [18, 20],
"cl": [1, 4],
"el": [5, 5],
},
"EL1": {
"rl": [21, 24],
"cl": [1, 2],
"el": [3, 6]
},
"EL2": {
"rl": [21, 24],
"cl": [1, 2],
"el": [3, 6]
},
"ELC": {
"rl": [22, 24],
"cl": [1,2],
"el": [3,6]
},
"BL1": {
"rl": [16, 18],
"cl": [1, 4],
"el": [5, 6]
},
"BL2": {
"rl": [16, 18],
"cl": [1, 2],
"el": [3, 3]
},
"BL3": {
"rl": [18, 20],
"cl": [1, 2],
"el": [3, 3]
},
"PD": {
"rl": [18,20],
"cl": [1,3],
"el": [4,6]
},
"SD": {
"rl": [19, 22],
"cl": [1, 2],
"el": [3, 6]
},
"SA": {
"rl": [18, 20],
"cl": [1, 3],
"el": [4, 5]
},
"PPL": {
"rl": [17, 18],
"cl": [1, 3],
"el": [4, 5]
},
"DED": {
"rl": [17, 18],
"cl": [1, 3],
"el": [4, 5]
},
"FL1": {
"rl": [19, 20],
"cl": [1, 3],
"el": [4, 4]
},
"FL2": {
"rl": [18, 20],
"cl": [1, 3],
"el": [0, 0]
},
"SB": {
"rl": [19, 22],
"cl": [1, 2],
"el": [3, 6]
},
"ENL": {
"rl": [22, 24],
"cl": [1,2],
"el": [3,6]
},
}
|
'''
This script demonstrates how to create a periodic Gaussian process
using the *gpiso* function.
'''
import numpy as np
import matplotlib.pyplot as plt
from sympy import sin, exp, pi
from rbf.basis import get_r, get_eps, RBF
from rbf.gproc import gpiso
np.random.seed(1)
period = 5.0
cls = 0.5 # characteristic length scale
var = 1.0 # variance
r = get_r() # get symbolic variables
eps = get_eps()
expr = exp(-sin(r*pi/period)**2/eps**2)
basis = RBF(expr)
gp = gpiso(basis, eps=cls, var=var)
t = np.linspace(-10, 10, 1000)[:,None]
sample = gp.sample(t) # draw a sample
mu,sigma = gp(t) # evaluate mean and std. dev.
fig,ax = plt.subplots(figsize=(6,4))
ax.grid(True)
ax.plot(t[:,0], mu, 'b-', label='mean')
ax.fill_between(
t[:,0], mu - sigma, mu + sigma,
color='b', alpha=0.2, edgecolor='none', label='std. dev.')
ax.plot(t, sample, 'k', label='sample')
ax.set_xlim((-10.0, 10.0))
ax.set_ylim((-2.5*var, 2.5*var))
ax.legend(loc=4, fontsize=10)
ax.tick_params(labelsize=10)
ax.set_xlabel('time', fontsize=10)
ax.set_title('periodic Gaussian process', fontsize=10)
fig.tight_layout()
plt.savefig('../figures/gproc.e.png')
plt.show()
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import scipy.sparse as sps
import scipy.sparse.linalg as slinalg
import os
import scipy.io
import PETScIO as IO
import MatrixOperations as MO
def StoreMatrix(A,name):
test ="".join([name,".mat"])
scipy.io.savemat( test, {name: A},oned_as='row')
parameters['num_threads'] = 10
m = 6
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'Direct'
ShowResultPlots = 'yes'
ShowErrorPlots = 'no'
EigenProblem = 'no'
SavePrecond = 'no'
CheckMu = 'no'
case = 4
parameters['linear_algebra_backend'] = 'uBLAS'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
nn = 2**(xx)/2
if (CheckMu == 'yes'):
if (xx != 1):
MU[xx-1] = MU[xx-2]/10
else:
if (xx != 1):
MU[xx-1] = MU[xx-2]
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
parameters["form_compiler"]["quadrature_degree"] = 3
parameters["form_compiler"]["optimize"] = True
parameters["form_compiler"]["representation"] = 'quadrature'
# mesh = BoxMesh(-1,-1,-1,1, 1, 1, nn, nn, nn)
mesh = UnitCubeMesh(nn,nn,nn)
parameters['reorder_dofs_serial'] = False
V = FunctionSpace(mesh, "N1curl",2)
Q = FunctionSpace(mesh, "CG",2)
Vdim[xx-1] = V.dim()
print "\n\n\n V-dim", V.dim()
def boundary(x, on_boundary):
return on_boundary
if case == 1:
u0 = Expression(("x[1]*x[1]*(x[1]-1)","x[0]*x[0]*(x[0]-1)","0"))
elif case == 2:
u0 = Expression(("sin(2*pi*x[1])*cos(2*pi*x[0])","-sin(2*pi*x[0])*cos(2*pi*x[1])"))
elif case == 3:
u0 = Expression(("x[0]*x[0]*(x[0]-1)","x[1]*x[1]*(x[1]-1)","0"))
elif case == 4:
u0 = Expression(("x[0]*x[1]*x[2]*(x[0]-1)","x[0]*x[1]*x[2]*(x[1]-1)","x[0]*x[1]*x[2]*(x[2]-1)"))
bcs = DirichletBC(V,u0, boundary)
# (u1) = TrialFunctions(V)
# (v1) = TestFunctions(V)
c = .5
if case == 1:
# f= Expression(("(8*pow(pi,2)-C)*sin(2*pi*x[1])*cos(2*pi*x[0])","-(8*pow(pi,2)-C)*sin(2*pi*x[0])*cos(2*pi*x[1])"),C = c)
f = Expression(("-6*x[1]+2","-6*x[0]+2"))+c*u0
elif case == 2:
f = 8*pow(pi,2)*u0+c*u0
elif case == 3:
f = Expression(("0","0","0"),C = c)
f = c*u0
elif case == 4:
f = Expression(("x[2]*(2*x[1]-1)+x[1]*(2*x[2]-1)","x[0]*(2*x[2]-1)+x[2]*(2*x[0]-1)","x[1]*(2*x[0]-1)+x[0]*(2*x[1]-1)"))+c*u0
(u) = TrialFunction(V)
(v) = TestFunction(V)
a = dot(curl(u),curl(v))*dx+c*inner(u, v)*dx
L1 = inner(v, f)*dx
tic()
AA, bb = assemble_system(a, L1, bcs)
As = AA.sparray()
StoreMatrix(As,'A')
A = PETSc.Mat().createAIJ(size=As.shape,csr=(As.indptr, As.indices, As.data))
# exit
# A = as_backend_type(AA).mat()
print toc()
b = bb.array()
zeros = 0*b
x = IO.arrayToVec(zeros)
bb = IO.arrayToVec(b)
if (Solving == 'Direct'):
ksp = PETSc.KSP().create()
ksp.setOperators(A)
ksp.setFromOptions()
ksp.setType(ksp.Type.PREONLY)
ksp.pc.setType(ksp.pc.Type.LU)
# print 'Solving with:', ksp.getType()
# Solve!
tic()
ksp.solve(bb, x)
SolTime[xx-1] = toc()
print "time to solve: ",SolTime[xx-1]
del AA
if (Solving == 'Iterative' or Solving == 'Direct'):
if case == 1:
ue = Expression(("x[1]*x[1]*(x[1]-1)","x[0]*x[0]*(x[0]-1)"))
elif case == 2:
ue = Expression(("sin(2*pi*x[1])*cos(2*pi*x[0])","-sin(2*pi*x[0])*cos(2*pi*x[1])"))
elif case == 3:
ue=u0
elif case == 4:
ue=u0
Ve = FunctionSpace(mesh, "N1curl",4)
u = interpolate(ue,Ve)
Nv = u.vector().array().shape
X = IO.vecToArray(x)
x = X[0:Nv[0]]
ua = Function(V)
ua.vector()[:] = x
parameters["form_compiler"]["quadrature_degree"] = 4
parameters["form_compiler"]["optimize"] = True
ErrorB = Function(V)
ErrorB.vector()[:] = interpolate(ue,V).vector().array()-ua.vector().array()
errL2b[xx-1] = sqrt(assemble(inner(ErrorB, ErrorB)*dx))
errCurlb[xx-1] = sqrt(assemble(inner(curl(ErrorB), curl(ErrorB))*dx))
if xx == 1:
a = 1
else:
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
print errL2b[xx-1]
print errCurlb[xx-1]
import pandas as pd
print "\n\n Magnetic convergence"
MagneticTitles = ["Total DoF","Soln Time","B-L2","B-order","B-Curl","Curl-order"]
MagneticValues = np.concatenate((Vdim,SolTime,errL2b,l2border,errCurlb,Curlborder),axis=1)
MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
pd.set_option('precision',3)
MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
print MagneticTable
if (SavePrecond == 'yes'):
scipy.io.savemat('eigenvalues/Wdim.mat', {'Wdim':Wdim-1},oned_as = 'row')
if (ShowResultPlots == 'yes'):
plot(ua)
plot(interpolate(ue,V))
interactive()
|
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import proso.django.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('proso_user', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('proso_common', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(default=datetime.datetime.now)),
('response_time', models.IntegerField()),
('guess', models.FloatField(default=0)),
('type', models.CharField(max_length=10)),
('lang', models.CharField(blank=True, default=None, max_length=2, null=True)),
('config', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_common.Config')),
],
),
migrations.CreateModel(
name='AnswerMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('content_hash', models.CharField(db_index=True, max_length=40, unique=True)),
],
),
migrations.CreateModel(
name='Audit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=50)),
('value', models.FloatField()),
('time', models.DateTimeField(default=datetime.datetime.now)),
('answer', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.Answer')),
],
),
migrations.CreateModel(
name='EnvironmentInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.IntegerField(choices=[(0, 'disabled'), (1, 'loading'), (2, 'enabled'), (3, 'active')], default=1)),
('revision', models.IntegerField()),
('load_progress', models.IntegerField(default=0)),
('updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('config', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='proso_common.Config')),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
],
bases=(models.Model, proso.django.models.ModelDiffMixin),
),
migrations.CreateModel(
name='ItemRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('visible', models.BooleanField(default=True)),
('active', models.BooleanField(default=True)),
('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='child_relations', to='proso_models.Item')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent_relations', to='proso_models.Item')),
],
bases=(models.Model, proso.django.models.ModelDiffMixin),
),
migrations.CreateModel(
name='ItemType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model', models.CharField(max_length=100)),
('table', models.CharField(max_length=100)),
('foreign_key', models.CharField(max_length=100)),
('language', models.CharField(blank=True, default=None, max_length=100, null=True)),
('valid', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='PracticeContext',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('content_hash', models.CharField(db_index=True, max_length=40, unique=True)),
],
),
migrations.CreateModel(
name='PracticeSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('finished', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permanent', models.BooleanField(default=False)),
('key', models.CharField(max_length=50)),
('value', models.FloatField()),
('audit', models.BooleanField(default=True)),
('updated', models.DateTimeField(default=datetime.datetime.now)),
('answer', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.Answer')),
('info', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.EnvironmentInfo')),
('item_primary', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_primary_variables', to='proso_models.Item')),
('item_secondary', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_secondary_variables', to='proso_models.Item')),
('user', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='itemtype',
unique_together=set([('model', 'foreign_key'), ('table', 'foreign_key')]),
),
migrations.AddField(
model_name='item',
name='children',
field=models.ManyToManyField(related_name='parents', through='proso_models.ItemRelation', to='proso_models.Item'),
),
migrations.AddField(
model_name='item',
name='item_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.ItemType'),
),
migrations.AddField(
model_name='audit',
name='info',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.EnvironmentInfo'),
),
migrations.AddField(
model_name='audit',
name='item_primary',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_primary_audits', to='proso_models.Item'),
),
migrations.AddField(
model_name='audit',
name='item_secondary',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_secondary_audits', to='proso_models.Item'),
),
migrations.AddField(
model_name='audit',
name='user',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='answer',
name='context',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.PracticeContext'),
),
migrations.AddField(
model_name='answer',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_answers', to='proso_models.Item'),
),
migrations.AddField(
model_name='answer',
name='item_answered',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_answered_answers', to='proso_models.Item'),
),
migrations.AddField(
model_name='answer',
name='item_asked',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_asked_answers', to='proso_models.Item'),
),
migrations.AddField(
model_name='answer',
name='metainfo',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.AnswerMeta'),
),
migrations.AddField(
model_name='answer',
name='practice_set',
field=models.ForeignKey(blank=None, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_models.PracticeSet'),
),
migrations.AddField(
model_name='answer',
name='session',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='proso_user.Session'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='variable',
unique_together=set([('info', 'key', 'user', 'item_primary', 'item_secondary')]),
),
migrations.AlterIndexTogether(
name='variable',
index_together=set([('info', 'key', 'user'), ('info', 'key', 'user', 'item_primary'), ('info', 'key', 'user', 'item_primary', 'item_secondary'), ('info', 'key', 'item_primary'), ('info', 'key')]),
),
migrations.AlterUniqueTogether(
name='environmentinfo',
unique_together=set([('config', 'revision')]),
),
migrations.AlterIndexTogether(
name='audit',
index_together=set([('info', 'key', 'user'), ('info', 'key', 'user', 'item_primary'), ('info', 'key', 'user', 'item_primary', 'item_secondary'), ('info', 'key', 'item_primary'), ('info', 'key')]),
),
migrations.AlterIndexTogether(
name='answer',
index_together=set([('user', 'context')]),
),
]
|
import sys
import csv
from collections import Counter, defaultdict
sequences = sys.argv[1]
accession2taxonomy = sys.argv[2]
alignment = sys.argv[3]
with open(accession2taxonomy) as inf:
next(inf)
csv_inf = csv.reader(inf, delimiter="\t")
a2t = dict(('_'.join(row[0].split()[0].split('_')[:-1]).split('.')[0], row[-1]) for row in csv_inf)
print("Loaded accession2taxonomy.")
reads_counter = Counter()
with open(sequences) as inf:
for i, line in enumerate(inf):
if i % 100000 == 0:
print("Processed %d lines" % i)
print(line)
if line.startswith('>'):
name = '_'.join(line.split()[0][1:].split('_')[:-1]).split('.')[0]
if name in a2t:
species = a2t[name]
reads_counter.update([species])
print("Loaded read counter")
counts_dict = defaultdict(Counter)
with open(alignment) as inf:
csv_inf = csv.reader(inf, delimiter="\t")
for i, row in enumerate(csv_inf):
if i % 100000 == 0:
print("Processed %d records" % i)
print(row)
if row[-1].startswith('k'):
read = row[0]
read = "_".join(read.split('_')[:-1]).split('.')[0]
if read in a2t:
species = a2t[read]
tax = row[-1]
counts_dict[species].update([tax])
print("Loaded counts_dict.")
with open("sheared_bayes.txt", "w") as outf:
for i, species in enumerate(counts_dict.keys()):
row = [0] * 10
row[-1] = reads_counter[species]
row[0] = species
counts = counts_dict[species]
if i % 10000 == 0:
print("Processed %d records" % i)
print(counts)
for j in counts.keys():
c = j.count(';')
row[c+1] = counts[j]
row = list(map(str, row))
outf.write("\t".join(row) + "\n")
|
from .config import Config
|
import seaborn as sns
import matplotlib.pyplot as plt
def plot_corrmatrix(df, square=True, linewidths=0.1, annot=True,
size=None, figsize=(12, 9), *args, **kwargs):
"""
Plot correlation matrix of the dataset
see doc at https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html#seaborn.heatmap
"""
sns.set(context="paper", font="monospace")
f, ax = plt.subplots(figsize=figsize)
sns.heatmap(df.corr(), vmax=1, square=square, linewidths=linewidths,
annot=annot, annot_kws={"size": size}, *args, **kwargs)
|
import base64
import json
import pickle
import re
from utils import HTTP_REQUESTS
from azure.core.pipeline._tools import is_rest
import types
import unittest
try:
from unittest import mock
except ImportError:
import mock
import pytest
from requests import Request, Response
from msrest import Deserializer
from azure.core.polling import async_poller, AsyncLROPoller
from azure.core.exceptions import DecodeError, HttpResponseError
from azure.core import AsyncPipelineClient
from azure.core.pipeline import PipelineResponse, AsyncPipeline, PipelineContext
from azure.core.pipeline.transport import AsyncioRequestsTransportResponse, AsyncHttpTransport
from azure.core.polling.async_base_polling import (
AsyncLROBasePolling,
)
from utils import ASYNCIO_REQUESTS_TRANSPORT_RESPONSES, request_and_responses_product, create_transport_response
from rest_client_async import AsyncTestRestClient
class SimpleResource:
"""An implementation of Python 3 SimpleNamespace.
Used to deserialize resource objects from response bodies where
no particular object type has been specified.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__(self, other):
return self.__dict__ == other.__dict__
class BadEndpointError(Exception):
pass
TEST_NAME = 'foo'
RESPONSE_BODY = {'properties':{'provisioningState': 'InProgress'}}
ASYNC_BODY = json.dumps({ 'status': 'Succeeded' })
ASYNC_URL = 'http://dummyurlFromAzureAsyncOPHeader_Return200'
LOCATION_BODY = json.dumps({ 'name': TEST_NAME })
LOCATION_URL = 'http://dummyurlurlFromLocationHeader_Return200'
RESOURCE_BODY = json.dumps({ 'name': TEST_NAME })
RESOURCE_URL = 'http://subscriptions/sub1/resourcegroups/g1/resourcetype1/resource1'
ERROR = 'http://dummyurl_ReturnError'
POLLING_STATUS = 200
CLIENT = AsyncPipelineClient("http://example.org")
CLIENT.http_request_type = None
CLIENT.http_response_type = None
async def mock_run(client_self, request, **kwargs):
return TestBasePolling.mock_update(client_self.http_request_type, client_self.http_response_type, request.url)
CLIENT._pipeline.run = types.MethodType(mock_run, CLIENT)
@pytest.fixture
def client():
# The poller itself don't use it, so we don't need something functionnal
return AsyncPipelineClient("https://baseurl")
@pytest.fixture
def async_pipeline_client_builder():
"""Build a client that use the "send" callback as final transport layer
send will receive "request" and kwargs as any transport layer
"""
def create_client(send_cb):
class TestHttpTransport(AsyncHttpTransport):
async def open(self): pass
async def close(self): pass
async def __aexit__(self, *args, **kwargs): pass
async def send(self, request, **kwargs):
return await send_cb(request, **kwargs)
return AsyncPipelineClient(
'http://example.org/',
pipeline=AsyncPipeline(
transport=TestHttpTransport()
)
)
return create_client
@pytest.fixture
def deserialization_cb():
def cb(pipeline_response):
return json.loads(pipeline_response.http_response.text())
return cb
@pytest.fixture
def polling_response():
polling = AsyncLROBasePolling()
headers = {}
response = Response()
response.headers = headers
response.status_code = 200
polling._pipeline_response = PipelineResponse(
None,
AsyncioRequestsTransportResponse(
None,
response,
),
PipelineContext(None)
)
polling._initial_response = polling._pipeline_response
return polling, headers
def test_base_polling_continuation_token(client, polling_response):
polling, _ = polling_response
continuation_token = polling.get_continuation_token()
assert isinstance(continuation_token, str)
polling_args = AsyncLROBasePolling.from_continuation_token(
continuation_token,
deserialization_callback="deserialization_callback",
client=client,
)
new_polling = AsyncLROBasePolling()
new_polling.initialize(*polling_args)
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_post(async_pipeline_client_builder, deserialization_cb, http_request, http_response):
# Test POST LRO with both Location and Operation-Location
# The initial response contains both Location and Operation-Location, a 202 and no Body
initial_response = TestBasePolling.mock_send(
http_request,
http_response,
'POST',
202,
{
'location': 'http://example.org/location',
'operation-location': 'http://example.org/async_monitor',
},
''
)
async def send(request, **kwargs):
assert request.method == 'GET'
if request.url == 'http://example.org/location':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'location_result': True}
).http_response
elif request.url == 'http://example.org/async_monitor':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'status': 'Succeeded'}
).http_response
else:
pytest.fail("No other query allowed")
client = async_pipeline_client_builder(send)
# LRO options with Location final state
poll = async_poller(
client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0))
result = await poll
assert result['location_result'] == True
# Location has no body
async def send(request, **kwargs):
assert request.method == 'GET'
if request.url == 'http://example.org/location':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body=None
).http_response
elif request.url == 'http://example.org/async_monitor':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'status': 'Succeeded'}
).http_response
else:
pytest.fail("No other query allowed")
client = async_pipeline_client_builder(send)
poll = async_poller(
client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0))
result = await poll
assert result is None
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_post_resource_location(async_pipeline_client_builder, deserialization_cb, http_request, http_response):
# ResourceLocation
# The initial response contains both Location and Operation-Location, a 202 and no Body
initial_response = TestBasePolling.mock_send(
http_request,
http_response,
'POST',
202,
{
'operation-location': 'http://example.org/async_monitor',
},
''
)
async def send(request, **kwargs):
assert request.method == 'GET'
if request.url == 'http://example.org/resource_location':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'location_result': True}
).http_response
elif request.url == 'http://example.org/async_monitor':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'status': 'Succeeded', 'resourceLocation': 'http://example.org/resource_location'}
).http_response
else:
pytest.fail("No other query allowed")
client = async_pipeline_client_builder(send)
poll = async_poller(
client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0))
result = await poll
assert result['location_result'] == True
class TestBasePolling(object):
convert = re.compile('([a-z0-9])([A-Z])')
@staticmethod
def mock_send(http_request, http_response, method, status, headers=None, body=RESPONSE_BODY):
if headers is None:
headers = {}
response = Response()
response._content_consumed = True
response._content = json.dumps(body).encode('ascii') if body is not None else None
response.request = Request()
response.request.method = method
response.request.url = RESOURCE_URL
response.request.headers = {
'x-ms-client-request-id': '67f4dd4e-6262-45e1-8bed-5c45cf23b6d9'
}
response.status_code = status
response.headers = headers
response.headers.update({"content-type": "application/json; charset=utf8"})
response.reason = "OK"
if is_rest(http_request):
request = http_request(
response.request.method,
response.request.url,
headers=response.request.headers,
content=body,
)
else:
request = CLIENT._request(
response.request.method,
response.request.url,
None, # params
response.request.headers,
body,
None, # form_content
None # stream_content
)
response = create_transport_response(http_response, request, response)
if is_rest(http_response):
response.body()
return PipelineResponse(
request,
response,
None # context
)
@staticmethod
def mock_update(http_request, http_response, url, headers=None):
response = Response()
response._content_consumed = True
response.request = mock.create_autospec(Request)
response.request.method = 'GET'
response.headers = headers or {}
response.headers.update({"content-type": "application/json; charset=utf8"})
response.reason = "OK"
if url == ASYNC_URL:
response.request.url = url
response.status_code = POLLING_STATUS
response._content = ASYNC_BODY.encode('ascii')
response.randomFieldFromPollAsyncOpHeader = None
elif url == LOCATION_URL:
response.request.url = url
response.status_code = POLLING_STATUS
response._content = LOCATION_BODY.encode('ascii')
response.randomFieldFromPollLocationHeader = None
elif url == ERROR:
raise BadEndpointError("boom")
elif url == RESOURCE_URL:
response.request.url = url
response.status_code = POLLING_STATUS
response._content = RESOURCE_BODY.encode('ascii')
else:
raise Exception('URL does not match')
request = http_request(
response.request.method,
response.request.url,
)
response = create_transport_response(http_response, request, response)
if is_rest(http_response):
response.body()
return PipelineResponse(
request,
response,
None # context
)
@staticmethod
def mock_outputs(pipeline_response):
response = pipeline_response.http_response
try:
body = json.loads(response.text())
except ValueError:
raise DecodeError("Impossible to deserialize")
body = {TestBasePolling.convert.sub(r'\1_\2', k).lower(): v
for k, v in body.items()}
properties = body.setdefault('properties', {})
if 'name' in body:
properties['name'] = body['name']
if properties:
properties = {TestBasePolling.convert.sub(r'\1_\2', k).lower(): v
for k, v in properties.items()}
del body['properties']
body.update(properties)
resource = SimpleResource(**body)
else:
raise DecodeError("Impossible to deserialize")
resource = SimpleResource(**body)
return resource
@staticmethod
def mock_deserialization_no_body(pipeline_response):
"""Use this mock when you don't expect a return (last body irrelevant)
"""
return None
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_long_running_put(http_request, http_response):
#TODO: Test custom header field
CLIENT.http_request_type = http_request
CLIENT.http_response_type = http_response
# Test throw on non LRO related status code
response = TestBasePolling.mock_send(
http_request, http_response, 'PUT', 1000, {}
)
with pytest.raises(HttpResponseError):
await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
# Test with no polling necessary
response_body = {
'properties':{'provisioningState': 'Succeeded'},
'name': TEST_NAME
}
response = TestBasePolling.mock_send(
http_request,
http_response,
'PUT', 201,
{}, response_body
)
def no_update_allowed(url, headers=None):
raise ValueError("Should not try to update")
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method
)
assert poll.name == TEST_NAME
assert not hasattr(polling_method._pipeline_response, 'randomFieldFromPollAsyncOpHeader')
# Test polling from operation-location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PUT', 201,
{'operation-location': ASYNC_URL})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method)
assert poll.name == TEST_NAME
assert not hasattr(polling_method._pipeline_response, 'randomFieldFromPollAsyncOpHeader')
# Test polling location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PUT', 201,
{'location': LOCATION_URL})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method)
assert poll.name == TEST_NAME
assert polling_method._pipeline_response.http_response.internal_response.randomFieldFromPollLocationHeader is None
# Test polling initial payload invalid (SQLDb)
response_body = {} # Empty will raise
response = TestBasePolling.mock_send(
http_request,
http_response,
'PUT', 201,
{'location': LOCATION_URL}, response_body)
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method)
assert poll.name == TEST_NAME
assert polling_method._pipeline_response.http_response.internal_response.randomFieldFromPollLocationHeader is None
# Test fail to poll from operation-location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PUT', 201,
{'operation-location': ERROR})
with pytest.raises(BadEndpointError):
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
# Test fail to poll from location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PUT', 201,
{'location': ERROR})
with pytest.raises(BadEndpointError):
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_long_running_patch(http_request, http_response):
CLIENT.http_request_type = http_request
CLIENT.http_response_type = http_response
# Test polling from location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PATCH', 202,
{'location': LOCATION_URL},
body={'properties':{'provisioningState': 'Succeeded'}})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method)
assert poll.name == TEST_NAME
assert polling_method._pipeline_response.http_response.internal_response.randomFieldFromPollLocationHeader is None
# Test polling from operation-location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PATCH', 202,
{'operation-location': ASYNC_URL},
body={'properties':{'provisioningState': 'Succeeded'}})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method)
assert poll.name == TEST_NAME
assert not hasattr(polling_method._pipeline_response, 'randomFieldFromPollAsyncOpHeader')
# Test polling from location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PATCH', 200,
{'location': LOCATION_URL},
body={'properties':{'provisioningState': 'Succeeded'}})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method)
assert poll.name == TEST_NAME
assert polling_method._pipeline_response.http_response.internal_response.randomFieldFromPollLocationHeader is None
# Test polling from operation-location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PATCH', 200,
{'operation-location': ASYNC_URL},
body={'properties':{'provisioningState': 'Succeeded'}})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method)
assert poll.name == TEST_NAME
assert not hasattr(polling_method._pipeline_response, 'randomFieldFromPollAsyncOpHeader')
# Test fail to poll from operation-location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PATCH', 202,
{'operation-location': ERROR})
with pytest.raises(BadEndpointError):
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
# Test fail to poll from location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'PATCH', 202,
{'location': ERROR})
with pytest.raises(BadEndpointError):
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_long_running_delete(http_request, http_response):
# Test polling from operation-location header
CLIENT.http_request_type = http_request
CLIENT.http_response_type = http_response
response = TestBasePolling.mock_send(
http_request,
http_response,
'DELETE', 202,
{'operation-location': ASYNC_URL},
body=""
)
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_deserialization_no_body,
polling_method)
assert poll is None
assert polling_method._pipeline_response.http_response.internal_response.randomFieldFromPollAsyncOpHeader is None
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_long_running_post(http_request, http_response):
CLIENT.http_request_type = http_request
CLIENT.http_response_type = http_response
# Test polling from operation-location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'POST', 201,
{'operation-location': ASYNC_URL},
body={'properties':{'provisioningState': 'Succeeded'}})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_deserialization_no_body,
polling_method)
assert polling_method._pipeline_response.http_response.internal_response.randomFieldFromPollAsyncOpHeader is None
# Test polling from operation-location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'POST', 202,
{'operation-location': ASYNC_URL},
body={'properties':{'provisioningState': 'Succeeded'}})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_deserialization_no_body,
polling_method)
assert polling_method._pipeline_response.http_response.internal_response.randomFieldFromPollAsyncOpHeader is None
# Test polling from location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'POST', 202,
{'location': LOCATION_URL},
body={'properties':{'provisioningState': 'Succeeded'}})
polling_method = AsyncLROBasePolling(0)
poll = await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
polling_method)
assert poll.name == TEST_NAME
assert polling_method._pipeline_response.http_response.internal_response.randomFieldFromPollLocationHeader is None
# Test fail to poll from operation-location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'POST', 202,
{'operation-location': ERROR})
with pytest.raises(BadEndpointError):
await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
# Test fail to poll from location header
response = TestBasePolling.mock_send(
http_request,
http_response,
'POST', 202,
{'location': ERROR})
with pytest.raises(BadEndpointError):
await async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_long_running_negative(http_request, http_response):
global LOCATION_BODY
global POLLING_STATUS
CLIENT.http_request_type = http_request
CLIENT.http_response_type = http_response
# Test LRO PUT throws for invalid json
LOCATION_BODY = '{'
response = TestBasePolling.mock_send(
http_request,
http_response,
'POST', 202,
{'location': LOCATION_URL})
poll = async_poller(
CLIENT,
response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0)
)
with pytest.raises(DecodeError):
await poll
LOCATION_BODY = '{\'"}'
response = TestBasePolling.mock_send(
http_request,
http_response,
'POST', 202,
{'location': LOCATION_URL})
poll = async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
with pytest.raises(DecodeError):
await poll
LOCATION_BODY = '{'
POLLING_STATUS = 203
response = TestBasePolling.mock_send(
http_request,
http_response,
'POST', 202,
{'location': LOCATION_URL})
poll = async_poller(CLIENT, response,
TestBasePolling.mock_outputs,
AsyncLROBasePolling(0))
with pytest.raises(HttpResponseError) as error: # TODO: Node.js raises on deserialization
await poll
assert error.value.continuation_token == base64.b64encode(pickle.dumps(response)).decode('ascii')
LOCATION_BODY = json.dumps({ 'name': TEST_NAME })
POLLING_STATUS = 200
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request,http_response", request_and_responses_product(ASYNCIO_REQUESTS_TRANSPORT_RESPONSES))
async def test_post_final_state_via(async_pipeline_client_builder, deserialization_cb, http_request, http_response):
# Test POST LRO with both Location and Operation-Location
CLIENT.http_request_type = http_request
CLIENT.http_response_type = http_response
# The initial response contains both Location and Operation-Location, a 202 and no Body
initial_response = TestBasePolling.mock_send(
http_request,
http_response,
'POST',
202,
{
'location': 'http://example.org/location',
'operation-location': 'http://example.org/async_monitor',
},
''
)
async def send(request, **kwargs):
assert request.method == 'GET'
if request.url == 'http://example.org/location':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'location_result': True}
).http_response
elif request.url == 'http://example.org/async_monitor':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'status': 'Succeeded'}
).http_response
else:
pytest.fail("No other query allowed")
client = async_pipeline_client_builder(send)
# Test 1, LRO options with Location final state
poll = async_poller(
client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0, lro_options={"final-state-via": "location"}))
result = await poll
assert result['location_result'] == True
# Test 2, LRO options with Operation-Location final state
poll = async_poller(
client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0, lro_options={"final-state-via": "operation-location"}))
result = await poll
assert result['status'] == 'Succeeded'
# Test 3, "do the right thing" and use Location by default
poll = async_poller(
client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0))
result = await poll
assert result['location_result'] == True
# Test 4, location has no body
async def send(request, **kwargs):
assert request.method == 'GET'
if request.url == 'http://example.org/location':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body=None
).http_response
elif request.url == 'http://example.org/async_monitor':
return TestBasePolling.mock_send(
http_request,
http_response,
'GET',
200,
body={'status': 'Succeeded'}
).http_response
else:
pytest.fail("No other query allowed")
client = async_pipeline_client_builder(send)
poll = async_poller(
client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0, lro_options={"final-state-via": "location"}))
result = await poll
assert result is None
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
async def test_final_get_via_location(port, http_request, deserialization_cb):
client = AsyncTestRestClient(port)
request = http_request(
"PUT",
"http://localhost:{}/polling/polling-with-options".format(port),
)
request.set_json_body({"hello": "world!"})
initial_response = await client._client._pipeline.run(request)
poller = AsyncLROPoller(
client._client,
initial_response,
deserialization_cb,
AsyncLROBasePolling(0, lro_options={"final-state-via": "location"}),
)
result = await poller.result()
assert result == {"returnedFrom": "locationHeaderUrl"}
|
from organise import app
app.run()
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.contrib.gis.geos import GeometryCollection
def change_line_to_multiline(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Poi = apps.get_model("webmap", "Poi")
for poi in Poi.objects.all():
if poi.geom:
poi.geom_multi = GeometryCollection([poi.geom, ])
poi.save()
class Migration(migrations.Migration):
dependencies = [
('webmap', '0011_auto_20160101_0521'),
]
operations = [
migrations.RunPython(change_line_to_multiline),
]
|
from eth_utils import (
is_hex,
is_string,
is_integer,
remove_0x_prefix,
force_text,
)
def is_predefined_block_number(value):
if not is_string(value):
return False
return force_text(value) in {"latest", "pending", "earliest"}
def is_hex_encoded_block_hash(value):
if not is_string(value):
return False
return len(remove_0x_prefix(value)) == 64 and is_hex(value)
def is_hex_encoded_block_number(value):
if not is_string(value):
return False
elif is_hex_encoded_block_hash(value):
return False
try:
value_as_int = int(value, 16)
except ValueError:
return False
return 0 <= value_as_int < 2**256
def select_method_for_block_identifier(value, if_hash, if_number, if_predefined):
if is_predefined_block_number(value):
return if_predefined
elif isinstance(value, bytes):
return if_hash
elif is_hex_encoded_block_hash(value):
return if_hash
elif is_integer(value) and (0 <= value < 2**256):
return if_number
elif is_hex_encoded_block_number(value):
return if_number
else:
raise ValueError(
"Value did not match any of the recognized block identifiers: {0}".format(value)
)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('ebets.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
inspected_dict = {}
for i, num in enumerate(nums):
try:
j = inspected_dict[num]
return j+1, i+1
except KeyError:
inspected_dict[target-num] = i
|
"""Unit tests for the image downloader."""
import unittest
import download
__author__ = "Nick Pascucci (npascut1@gmail.com)"
class DownloadTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_img_matcher(self):
html = """<html>
<body>
<b>Hi there!</b>
<img src="abcd-(myfile)[1].jpg">
</body>
</html>
"""
paths = download.get_image_paths(html)
assert paths == ["abcd-(myfile)[1].jpg"]
def test_img_matcher_http(self):
html = """<html>
<body>
<b>Hi there!</b>
<img src="http://www.def.com/abcd-(myfile)[1].jpg">
</body>
</html>
"""
paths = download.get_image_paths(html)
assert paths == ["http://www.def.com/abcd-(myfile)[1].jpg"]
def test_extension_matcher(self):
filename = "abcdef.jpg"
assert download.match_extension(filename)
filename = "abcdef.txt"
assert not download.match_extension(filename)
def test_sitename_matcher(self):
site = "http://www.xkcd.com/208/"
sitename = download.sitename(site)
assert "http://www.xkcd.com" == sitename
if __name__ == "__main__":
unittest.main()
|
r"""
.. _SoftiMAX:
SoftiMAX at MAX IV
------------------
The images below are produced by scripts in
``\examples\withRaycing\14_SoftiMAX``.
The beamline will have two branches:
- STXM (Scanning Transmission X-ray Microscopy) and
- CXI (Coherent X-ray Imaging),
see the scheme provided by K. Thånell.
.. imagezoom:: _images/softiMAX_layout.*
STXM branch
~~~~~~~~~~~
.. rubric:: Rays vs. hybrid
The propagation through the first optical elements – from undulator to front
end (FE) slit, to M1, to M2 and to plane grating (PG) – is done with rays:
+------------+------------+------------+------------+
| FE | M1 | M2 | PG |
+============+============+============+============+
| |st_rFE| | |st_rM1| | |st_rM2| | |st_rPG| |
+------------+------------+------------+------------+
.. |st_rFE| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-00-FE.*
.. |st_rM1| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-01-M1local.*
.. |st_rM2| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-02-M2local.*
.. |st_rPG| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-02a-PGlocal.*
:loc: upper-right-corner
Starting from PG – to M3, to exit slit, to Fresnel zone plate (FZP) and to
variously positioned sample screen – the propagation is done by rays or waves,
as compared below. Despite the M3 footprint looks not perfect (not black at
periphery), the field at normal surfaces (exit slit, FZP (not shown) and sample
screen) is of perfect quality. At the best focus, rays and waves result in a
similar image. Notice a micron-sized depth of focus.
+-----------+---------------------+---------------------+
| | rays | wave |
+===========+=====================+=====================+
| M3 | |st_rM3| | |st_hM3| |
+-----------+---------------------+---------------------+
| exit slit | |st_rES| | |st_hES| |
+-----------+---------------------+---------------------+
| sample | |st_rS| | |st_hS| |
+-----------+---------------------+---------------------+
.. |st_rM3| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-03-M3local.*
.. |st_hM3| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-03-M3local.*
:loc: upper-right-corner
.. |st_rES| imagezoom:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-04-ExitSlit.*
.. |st_hES| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-04-ExitSlit.*
:loc: upper-right-corner
.. |st_rS| animation:: _images/stxm-2D-1-rays-0emit-0enSpread-monoE-06i-ExpFocus-Is
.. |st_hS| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-06i-ExpFocus-Is
:loc: upper-right-corner
.. rubric:: Influence of emittance
Non-zero emittance radiation is treated in xrt by incoherent addition of single
electron intensities. The single electron (filament) fields are considered as
fully coherent and are resulted from filament trajectories (one per repeat)
that attain positional and angular shifts within the given emittance
distribution. The following images are calculated for the exit slit and the
focus screen for zero and non-zero emittance
(for MAX IV 3 GeV ring: ε\ :sub:`x`\ =263 pm·rad,
β\ :sub:`x`\ =9 m, ε\ :sub:`z`\ =8 pm·rad, β\ :sub:`z`\ =2 m). At the real
emittance, the horizontal focal size increases by ~75%. A finite energy band,
as determined by vertical size of the exit slit, results in somewhat bigger
broadening due to a chromatic dependence of the focal length.
+-----------+---------------------+---------------------+---------------------+
| | 0 emittance | real emittance | |refeb| |
+===========+=====================+=====================+=====================+
| exit slit | |st_hESb| | |st_hES2| | |st_hES3| |
+-----------+---------------------+---------------------+---------------------+
| sample | |st_hSb| | |st_hS2| | |st_hS3| |
+-----------+---------------------+---------------------+---------------------+
.. |refeb| replace:: real emittance, finite energy band
.. |st_hESb| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-04-ExitSlit.*
.. |st_hES2| imagezoom:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-04-ExitSlit.*
.. |st_hS2| animation:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-06i-ExpFocus-Is
.. |st_hES3| imagezoom:: _images/stxm-2D-2-hybr-non0e-0enSpread-wideE-04-ExitSlit.*
:loc: upper-right-corner
.. |st_hSb| imagezoom:: _images/stxm-2D-2-hybr-0emit-0enSpread-monoE-06i-ExpFocus-Is
.. |st_hS3| animation:: _images/stxm-2D-2-hybr-non0e-0enSpread-wideE-06i-ExpFocus-Is
:loc: upper-right-corner
.. rubric:: Correction of emittance effects
The increased focal size can be amended by closing the exit slit. With flux
loss of about 2/3, the focal size is almost restored.
+-----------+--------------------+--------------------+
| | 80 µm exit slit | 20 µm exit slit |
+===========+====================+====================+
| exit slit | |st_hES2b| | |st_hES4| |
+-----------+--------------------+--------------------+
| sample | |st_hS2b| | |st_hS4| |
+-----------+--------------------+--------------------+
.. |st_hES2b| imagezoom:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-04-ExitSlit.*
.. |st_hES4| imagezoom:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-025H-04-ExitSlit.*
:loc: upper-right-corner
.. |st_hS2b| animation:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-06i-ExpFocus-Is
.. |st_hS4| animation:: _images/stxm-2D-2-hybr-non0e-0enSpread-monoE-025H-06i-ExpFocus-Is
:loc: upper-right-corner
.. rubric:: Coherence signatures
The beam improvement can also be viewed via the coherence properties by the
four available methods (see :ref:`coh_signs`). As the horizontal exit slit
becomes smaller, one can observe the increase of the coherent fraction ζ and
the increase of the primary (coherent) mode weight. The width of degree of
coherence (DoC) relative to the width of the intensity distribution determines
the coherent beam fraction. Both widths vary with varying screen position
around the focal point such that their ratio is not invariant, so that the
coherent fraction also varies, which is counter-intuitive. An important
advantage of the eigen-mode or PCA methods is a simple definition of the
coherent fraction as the eigenvalue of the zeroth mode (component); this
eigenvalue appears to be invariant around the focal point, see below. Note that
the methods 2 and 3 give equal results. The method 4 that gives the degree of
transverse coherence (DoTC) is also invariant around the focal point, see DoTC
values on the pictures of Principal Components.
+-----------+--------------------------+--------------------------+
| | 80 µm exit slit | 20 µm exit slit |
+===========+==========================+==========================+
| method 1 | |st_hS80m1| | |st_hS20m1| |
+-----------+--------------------------+--------------------------+
| method 2 | |st_hS80m3| | |st_hS20m3| |
+-----------+--------------------------+--------------------------+
| method 3, | |st_hS80m4| | |st_hS20m4| |
| method 4b | | |
+-----------+--------------------------+--------------------------+
.. |st_hS80m1| animation:: _images/stxm-IDOC-2D-2-hybr-non0e-0enSpread-monoE
.. |st_hS20m1| animation:: _images/stxm-IDOC-2D-2-hybr-non0e-0enSpread-monoE-025H
:loc: upper-right-corner
.. |st_hS80m3| animation:: _images/stxm-Modes-2D-2-hybr-non0e-0enSpread-monoE
.. |st_hS20m3| animation:: _images/stxm-Modes-2D-2-hybr-non0e-0enSpread-monoE-025H
:loc: upper-right-corner
.. |st_hS80m4| animation:: _images/stxm-PCA-2D-2-hybr-non0e-0enSpread-monoE
.. |st_hS20m4| animation:: _images/stxm-PCA-2D-2-hybr-non0e-0enSpread-monoE-025H
:loc: upper-right-corner
CXI branch
~~~~~~~~~~
.. rubric:: 2D vs 1D
Although the sample screen images are of good quality (the dark field is almost
black), the mirror footprints may be noisy and not well convergent in the
periphery. Compare the M3 footprint with that in the previous section (STXM
branch) where the difference is in the mirror area and thus in the sample
density. The used 10\ :sup:`6` wave samples (i.e. 10\ :sup:`12` possible paths)
are not enough for the slightly enlarged area in the present example. The
propagation is therefore performed in separated horizontal and vertical
directions, which dramatically improves the quality of the footprints.
Disadvantages of the cuts are losses in visual representation and incorrect
evaluation of the flux.
+------+----------------------+-----------------------+-----------------------+
| | 2D | 1D horizontal cut | 1D vertical cut |
+======+======================+=======================+=======================+
| |M3| | |cxiM32D| | |cxiM31Dh| | |cxiM31Dv| |
+------+----------------------+-----------------------+-----------------------+
| |SS| | |cxiS2D| | |cxiS1Dh| | |cxiS1Dv| |
+------+----------------------+-----------------------+-----------------------+
.. |M3| replace:: M3 footprint
.. |SS| replace:: sample screen
.. |cxiM32D| imagezoom:: _images/cxi_2D-2-hybr-0emit-0enSpread-monoE-03-M3local.*
.. |cxiM31Dh| imagezoom:: _images/cxi_1D-2-hybr-1e6hor-0emit-0enSpread-monoE-03-M3local.*
.. |cxiM31Dv| imagezoom:: _images/cxi_1D-2-hybr-1e6ver-0emit-0enSpread-monoE-03-M3local.*
:loc: upper-right-corner
.. |cxiS2D| animation:: _images/cxi_S2D
.. |cxiS1Dh| animation:: _images/cxi_S1Dh
.. |cxiS1Dv| animation:: _images/cxi_S1Dv
:loc: upper-right-corner
.. _wavefronts:
.. rubric:: Flat screen vs normal-to-k screen (wave front)
The following images demonstrate the correctness of the directional
Kirchhoff-like integral (see :ref:`seq_prop`). Five diffraction integrals are
calculated on flat screens around the focus position: for two polarizations and
for three directional components. The latter ones define the wave fronts at
every flat screen position; these wave fronts are further used as new curved
screens. The calculated diffraction fields on these curved screens have narrow
phase distributions, as shown by the color histograms, which is indeed expected
for a wave front by its definition. In contrast, the *flat* screens at the same
positions have rapid phase variation over several Fresnel zones.
.. note::
In the process of wave propagation, wave fronts -- surfaces of
constant phase -- are not used in any way. We therefore call it “wave
propagation”, not “wave *front* propagation” as frequently called by
others. The wave fronts in this example were calculated to solely
demonstrate the correctness of the local propagation directions after
having calculated the diffracted field.
+------------------------------+------------------------------+
| flat screen | curved screen (wave front) |
+==============================+==============================+
| |cxiFlat| | |cxiFront| |
+------------------------------+------------------------------+
.. |cxiFlat| animation:: _images/cxi-S1DhFlat
.. |cxiFront| animation:: _images/cxi-S1DhFront
:loc: upper-right-corner
The curvature of the calculated wave fronts varies across the focus position.
The wave fronts become more flat as one approaches the focus, see the figure
below. This is in contrast to *ray* propagation, where the angular ray
distribution is invariant at any position between two optical elements.
.. imagezoom:: _images/cxi_waveFronts.*
.. rubric:: Rays, waves and hybrid
The following images are horizontal cuts at the footprints and sample screens
calculated by
- rays,
- rays + waves hybrid (rays up to PG and wave from PG) and
- purely by waves.
+-----------------+-------------------+-------------------+-------------------+
| | rays | hybrid | waves |
+=================+===================+===================+===================+
| front end slit | |cxi-hFE| | same as rays | |cxi-wFE| |
+-----------------+-------------------+-------------------+-------------------+
| footprint on M1 | |cxi-hM1| | same as rays | |cxi-wM1| |
+-----------------+-------------------+-------------------+-------------------+
| footprint on M2 | |cxi-hM2| | same as rays | |cxi-wM2| |
+-----------------+-------------------+-------------------+-------------------+
| footprint on PG | |cxi-hPG| | same as rays | |cxi-wPG| |
+-----------------+-------------------+-------------------+-------------------+
| footprint on M3 | |cxi-rM3| | |cxi-hM3| | |cxi-wM3| |
+-----------------+-------------------+-------------------+-------------------+
| exit slit | |cxi-rES| | |cxi-hES| | |cxi-wES| |
+-----------------+-------------------+-------------------+-------------------+
| footprint on M4 | |cxi-rM4| | |cxi-hM4| | |cxi-wM4| |
+-----------------+-------------------+-------------------+-------------------+
| footprint on M5 | |cxi-rM5| | |cxi-hM5| | |cxi-wM5| |
+-----------------+-------------------+-------------------+-------------------+
| sample screen | |cxi-rS| | |cxi-hS| | |cxi-wS| |
+-----------------+-------------------+-------------------+-------------------+
.. |cxi-hFE| imagezoom:: _images/cxi_1D-1-rays-hor-0emit-0enSpread-monoE-00-FE.*
.. |cxi-wFE| imagezoom:: _images/cxi_1D-3-wave-hor-0emit-0enSpread-monoE-00-FE.*
:loc: upper-right-corner
.. |cxi-hM1| imagezoom:: _images/cxi_1D-1-rays-hor-0emit-0enSpread-monoE-01-M1local.*
.. |cxi-wM1| imagezoom:: _images/cxi_1D-3-wave-hor-0emit-0enSpread-monoE-01-M1local.*
:loc: upper-right-corner
.. |cxi-hM2| imagezoom:: _images/cxi_1D-1-rays-hor-0emit-0enSpread-monoE-02-M2local.*
.. |cxi-wM2| imagezoom:: _images/cxi_1D-3-wave-hor-0emit-0enSpread-monoE-02-M2local.*
:loc: upper-right-corner
.. |cxi-hPG| imagezoom:: _images/cxi_1D-1-rays-hor-0emit-0enSpread-monoE-02-PGlocal.*
.. |cxi-wPG| imagezoom:: _images/cxi_1D-3-wave-hor-0emit-0enSpread-monoE-02-PGlocal.*
:loc: upper-right-corner
.. |cxi-rM3| imagezoom:: _images/cxi_1D-1-rays-hor-0emit-0enSpread-monoE-03-M3local.*
.. |cxi-hM3| imagezoom:: _images/cxi_1D-2-hybr-hor-0emit-0enSpread-monoE-03-M3local.*
.. |cxi-wM3| imagezoom:: _images/cxi_1D-3-wave-hor-0emit-0enSpread-monoE-03-M3local.*
:loc: upper-right-corner
.. |cxi-rES| imagezoom:: _images/cxi_1D-1-rays-hor-0emit-0enSpread-monoE-04-ExitSlit.*
.. |cxi-hES| imagezoom:: _images/cxi_1D-2-hybr-hor-0emit-0enSpread-monoE-04-ExitSlit.*
.. |cxi-wES| imagezoom:: _images/cxi_1D-3-wave-hor-0emit-0enSpread-monoE-04-ExitSlit.*
:loc: upper-right-corner
.. |cxi-rM4| imagezoom:: _images/cxi_1D-1-rays-hor-0emit-0enSpread-monoE-05-M4local.*
.. |cxi-hM4| imagezoom:: _images/cxi_1D-2-hybr-hor-0emit-0enSpread-monoE-05-M4local.*
.. |cxi-wM4| imagezoom:: _images/cxi_1D-3-wave-hor-0emit-0enSpread-monoE-05-M4local.*
:loc: upper-right-corner
.. |cxi-rM5| imagezoom:: _images/cxi_1D-1-rays-hor-0emit-0enSpread-monoE-06-M5local.*
.. |cxi-hM5| imagezoom:: _images/cxi_1D-2-hybr-hor-0emit-0enSpread-monoE-06-M5local.*
.. |cxi-wM5| imagezoom:: _images/cxi_1D-3-wave-hor-0emit-0enSpread-monoE-06-M5local.*
:loc: upper-right-corner
.. |cxi-rS| animation:: _images/cxi-rS
.. |cxi-hS| animation:: _images/cxi-hS
.. |cxi-wS| animation:: _images/cxi-wS
:loc: upper-right-corner
.. rubric:: Coherence signatures
This section demonstrates the methods 1 and 3 from :ref:`coh_signs`. Notice
again the difficulty in determining the width of DoC owing to its complex shape
(at real emittance) or the restricted field of view (the 0 emittance case). In
contrast, the eigen mode analysis yields an almost invariant well defined
coherent fraction.
+-----------+--------------------------+--------------------------+
| | 0 emittance | real emittance |
+===========+==========================+==========================+
| method 1 | |cxi-coh1-0emit| | |cxi-coh1-non0e| |
+-----------+--------------------------+--------------------------+
| method 3 | |cxi-coh3-0emit| | |cxi-coh3-non0e| |
+-----------+--------------------------+--------------------------+
.. |cxi-coh1-0emit| animation:: _images/cxi-coh1-0emit
.. |cxi-coh1-non0e| animation:: _images/cxi-coh1-non0e
.. |cxi-coh3-0emit| animation:: _images/cxi-coh3-0emit
.. |cxi-coh3-non0e| animation:: _images/cxi-coh3-non0e
:loc: upper-right-corner
"""
pass
|
import unittest
import pandas as pd
import nose.tools
from mia.features.blobs import detect_blobs
from mia.features.intensity import detect_intensity
from mia.utils import preprocess_image
from ..test_utils import get_file_path
class IntensityTests(unittest.TestCase):
@classmethod
def setupClass(cls):
img_path = get_file_path("mias/mdb154.png")
msk_path = get_file_path("mias/masks/mdb154_mask.png")
cls._img, cls._msk = preprocess_image(img_path, msk_path)
# def test_detect_intensity(self):
# blobs = detect_blobs(self._img, self._msk)
# intensity = detect_intensity(self._img, blobs)
#
# nose.tools.assert_true(isinstance(intensity, pd.DataFrame))
# nose.tools.assert_equal(intensity.shape[1], 10)
|
import Bio
from Bio import SeqIO
import sys
import os.path
filename = sys.argv[-1]
outname = filename.split('.')
outname1 = '.'.join([outname[0], 'txt'])
FastaFile = open(filename, 'rU')
f = open(outname1, 'w')
for rec in SeqIO.parse(FastaFile, 'fasta'):
name = rec.id
seq = rec.seq
seqLen = len(rec)
print name, seqLen
f.write("%s\t" % name)
f.write("%s\n" % seqLen)
f.close()
print 'Done'
|
"""
[2015-12-28] Challenge #247 [Easy] Secret Santa
https://www.reddit.com/r/dailyprogrammer/comments/3yiy2d/20151228_challenge_247_easy_secret_santa/
Every December my friends do a "Secret Santa" - the traditional gift exchange
where everybody is randomly assigned to give a gift to a friend. To make
things exciting, the matching is all random (you cannot pick your gift
recipient) and nobody knows who got assigned to who until the day when the
gifts are exchanged - hence, the "secret" in the name.
Since we're a big group with many couples and families, often a husband gets
his wife as secret santa (or vice-versa), or a father is assigned to one of
his children. This creates a series of issues:
* If you have a younger kid and he/she is assigned to you, you might end up
paying for your own gift and ruining the surprise.
* When your significant other asks "who did you get for Secret Santa", you
have to lie, hide gifts, etc.
* The inevitable "this game is rigged!" commentary on the day of revelation.
To fix this, you must design a program that randomly assigns the Secret Santa
gift exchange, but *prevents people from the same family to be assigned to
each other*.
A list of all Secret Santa participants. People who belong to the same family
are listed in the same line separated by spaces. Thus, "Jeff Jerry" represents
two people, Jeff and Jerry, who are family and should not be assigned to
eachother.
Joe
Jeff Jerry
Johnson
The list of Secret Santa assignments. As Secret Santa is a random assignment,
output may vary.
Joe -> Jeff
Johnson -> Jerry
Jerry -> Joe
Jeff -> Johnson
But **not** `Jeff -> Jerry` or `Jerry -> Jeff`!
Sean
Winnie
Brian Amy
Samir
Joe Bethany
Bruno Anna Matthew Lucas
Gabriel Martha Philip
Andre
Danielle
Leo Cinthia
Paula
Mary Jane
Anderson
Priscilla
Regis Julianna Arthur
Mark Marina
Alex Andrea
The assignment list must avoid "closed loops" where smaller subgroups get
assigned to each other, breaking the overall loop.
Joe -> Jeff
Jeff -> Joe # Closed loop of 2
Jerry -> Johnson
Johnson -> Jerry # Closed loop of 2
Thanks to /u/oprimo for his idea in /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
|
import os, os.path
from matplotlib import pyplot as plt
from pylab import get_cmap
import SimpleCV as cv
from glob import glob
def show_img(img, ax = None):
if ax is not None:
plt.sca(ax)
nimg = img.getNumpy()
return plt.imshow(nimg, aspect='equal')
path = '/home/will/Dropbox/burnimages/*.jpg'
norm_files = sorted(f for f in glob(path) if '-e' not in f)
masked_files = sorted(f for f in glob(path) if '-e' in f)
fig, axs = plt.subplots(6,6, figsize = (10,10))
for f, ax in zip(norm_files, axs.flatten()):
img = cv.Image(f)
show_img(img, ax = ax)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
from itertools import islice, izip_longest
from dateutil.parser import parse
def make_wound_mask(norm_img, green_img, color,
minsize = None,
maxsize = None):
wmask = green_img.hueDistance(color).invert().threshold(200)
blobs = norm_img.findBlobsFromMask(wmask,
minsize = minsize,
maxsize = maxsize)
return wmask, blobs
fig, axs = plt.subplots(6,6, figsize = (10,10))
results = []
for fname, mf, of, ax in izip_longest(norm_files, masked_files, norm_files, axs.flatten()):
mask_img = cv.Image(mf)
norm_img = cv.Image(of)
dt = parse(fname.rsplit(os.sep,1)[1].replace('.jpg', '').replace('.',':'))
wound_mask, wound_blobs = make_wound_mask(norm_img, mask_img, cv.Color.GREEN,
minsize = 1000)
dime_mask, dime_blobs = make_wound_mask(norm_img, mask_img, cv.Color.BLUE,
minsize = 500)
layer = cv.DrawingLayer((norm_img.width, norm_img.height))
wound_blobs[-1].drawHull(color=cv.Color.BLUE, width = 100, layer = layer)
dime_blobs[-1].drawHull(color=cv.Color.RED, width = 100, layer = layer)
norm_img.addDrawingLayer(layer)
fnorm = norm_img.applyLayers()
ratio = wound_blobs[-1].area()/dime_blobs[-1].area()
results.append((dt, ratio))
if ax is not None:
show_img(fnorm, ax = ax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(ratio)
fig.tight_layout()
import pandas as pd
res_df = pd.DataFrame(sorted(results), columns = ['SampleTime', 'Ratio'])
dime_diameter = 18 #mm
dime_area = 3.141*(dime_diameter/2)**2
res_df['Area-mm2'] = dime_area*res_df['Ratio']
res_df.set_index('SampleTime', inplace=True)
res_df
res_df['Area-mm2'].plot()
out = pd.ewma(res_df['Area-mm2'], freq='d', span = 1)
out.plot(lw = 10, alpha = 0.7)
plt.ylabel('Wound-Area-mm^2')
|
import game
import pygame
from pygame.locals import *
class Resources:
<<<<<<< HEAD
def cambiar(self,imagen):
sheet = game.load_image(imagen)
rects = [pygame.Rect(112,2,26,40),
pygame.Rect(112,2,26,40),
pygame.Rect(112,2,26,40),
pygame.Rect(4,4,30,38),
pygame.Rect(4,4,30,38),
pygame.Rect(4,4,30,38)]
caminando_der = game.load_sprites(sheet, rects, (0,0,0))
caminando_izq = game.flip_sprites(caminando_der)
rects = [pygame.Rect(76,2,26,40),
pygame.Rect(112,2,24,40)]
quieto_der = game.load_sprites(sheet, rects, (0,0,0))
quieto_izq = game.flip_sprites(quieto_der)
rects = [pygame.Rect(4,4,30,38),
pygame.Rect(38,4,30,36)]
saltando_der = game.load_sprites(sheet, rects, (0,0,0))
saltando_izq = game.flip_sprites(saltando_der)
player = [
[quieto_der, quieto_izq],
[caminando_der,caminando_izq],
[saltando_der, saltando_izq]]
return player
def __init__(self,imagen):
# Carga de imagenes
self.imagen=imagen
sheet = game.load_image(self.imagen)
=======
def __init__(self):
# Carga de imagenes
sheet = game.load_image('graphics/arc22.png')
>>>>>>> origin/master
#rects = [#pygame.Rect(514,8,24,34),
# pygame.Rect(550,8,30,34),
# pygame.Rect(582,8,28,34),
# pygame.Rect(550,8,30,34)]
rects = [pygame.Rect(112,2,26,40),
pygame.Rect(112,2,26,40),
pygame.Rect(112,2,26,40),
pygame.Rect(4,4,30,38),
pygame.Rect(4,4,30,38),
pygame.Rect(4,4,30,38)]
caminando_der = game.load_sprites(sheet, rects, (0,0,0))
caminando_izq = game.flip_sprites(caminando_der)
rects = [pygame.Rect(76,2,26,40),
pygame.Rect(112,2,24,40)]
quieto_der = game.load_sprites(sheet, rects, (0,0,0))
quieto_izq = game.flip_sprites(quieto_der)
<<<<<<< HEAD
=======
>>>>>>> origin/master
rects = [pygame.Rect(4,4,30,38),
pygame.Rect(38,4,30,36)]
saltando_der = game.load_sprites(sheet, rects, (0,0,0))
saltando_izq = game.flip_sprites(saltando_der)
self.player = [
[quieto_der, quieto_izq],
[caminando_der,caminando_izq],
[saltando_der, saltando_izq]]
<<<<<<< HEAD
=======
>>>>>>> origin/master
sheet = game.load_image('graphics/blocks11.png')
suelo = game.load_sprite(sheet, pygame.Rect(444,104,32,32))
subsuelo = game.load_sprite(sheet, pygame.Rect(172,138,32,32))
self.tiles = [suelo, subsuelo]
|
'''
author Lama Hamadeh
'''
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import assignment2_helper as helper
matplotlib.style.use('ggplot')
scaleFeatures = True #Features scaling (if it's false no scaling appears and that affects the 2D plot and the variance values)
df=pd.read_csv('/Users/ADB3HAMADL/Desktop/Anaconda_Packages/DAT210x-master/Module4/Datasets/kidney_disease.csv',index_col = 0)
df = df.reset_index(drop=True) #remove the index column
df=df.dropna(axis=0) #remove any and all Rows that have a nan
labels = ['red' if i=='ckd' else 'green' for i in df.classification]
df=df[['bgr', 'rc','wc']] #select only the following columns: bgr, rc, and wc
print(df.dtypes) #
df.rc = pd.to_numeric(df.rc, errors='coerce') #
df.wc = pd.to_numeric(df.wc, errors='coerce') #
print(df.var()) #
print(df.describe()) #
if scaleFeatures: df = helper.scaleFeatures(df)
from sklearn import decomposition
pca = decomposition.PCA(n_components=2)
pca.fit(df)
decomposition.PCA(copy=True, n_components=2, whiten=False)
T= pca.transform(df)
ax = helper.drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures)
T = pd.DataFrame(T)
T.columns = ['component1', 'component2']
T.plot.scatter(x='component1', y='component2', marker='o', c=labels, alpha=0.75, ax=ax)
plt.show()
|
import unittest
import os
from sqltxt.table import Table
from sqltxt.column import Column, ColumnName, AmbiguousColumnNameError
from sqltxt.expression import Expression
class TableTest(unittest.TestCase):
def setUp(self):
self.data_path = os.path.join(os.path.dirname(__file__), '../data')
table_header = ["col_a", "col_b"]
table_contents = """1,1
2,3
3,2"""
self.table_a = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
table_header = ["col_a", "col_b"]
table_contents = """1,w
2,x
2,y
5,z"""
self.table_b = Table.from_cmd(
name = 'table_b',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
def test_subset_rows(self):
conditions = [
[Expression('col_b', '==', '1'), 'or', Expression('col_a', '==', '2')]
]
self.table_a.subset_rows(conditions)
cmds_actual = self.table_a.cmds
cmds_expected = [
'echo -e "1,1\n2,3\n3,2"',
"awk -F',' 'OFS=\",\" { if (($2 == 1 || $1 == 2)) { print $1,$2 } }'"]
self.assertEqual(cmds_actual, cmds_expected)
def test_order_columns(self):
col_name_order = [ColumnName('col_b'), ColumnName('col_a')]
self.table_a.order_columns(col_name_order)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "awk -F',' 'OFS=\",\" { print $2,$1 }'"]
self.assertEqual(cmds_actual, cmds_expected)
def test_sort(self):
sort_by_col_names = [ColumnName('col_a'), ColumnName('col_b')]
self.table_a.sort(sort_by_col_names)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "sort -t, -k 1,1 -k 2,2"]
self.assertEqual(cmds_actual, cmds_expected)
sort_by_cols = [self.table_a.get_column_for_name(cn) for cn in sort_by_col_names]
self.assertEqual(self.table_a.sorted_by, sort_by_cols)
def test_is_sorted_by(self):
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['col_a', 'col_b'])
table_from_cmd.sorted_by = [Column('table_a.col_a'), Column('table_a.col_b')]
self.assertTrue(table_from_cmd.is_sorted_by([0]))
self.assertFalse(table_from_cmd.is_sorted_by([1]))
self.assertTrue(table_from_cmd.is_sorted_by([0,1]))
def test_get_column_for_name_raises_on_ambiguity(self):
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['col_a', 'col_a'])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['ta.col_a', 'tb.col_a'])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
first_column = Column('ta.col_a')
first_column.add_name('col_alpha')
second_column = Column('tb.col_a')
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = [first_column, second_column])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
def test_sample_rows(self):
self.table_a.sample_rows(1)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"',
"""awk -v seed=$RANDOM -v n={0} '
BEGIN {{ srand(seed) }}
NR <= n {{ reservoir[NR] = $0 }}
NR > n {{ M = int(rand() * NR) + 1; if (M <= n) {{ reservoir[M] = $0 }}}}
END {{ for (key in reservoir) {{ print reservoir[key] }}}}'""".format(1)
]
self.assertEqual(cmds_actual, cmds_expected)
def test_get_cmd_str(self):
table_from_file = Table.from_file_path(os.path.join(self.data_path, 'table_a.txt'))
# output from a file-backed Table to STDOUT
cmd_actual = table_from_file.get_cmd_str()
cmd_expected = 'tail -n+2 {}/table_a.txt'.format(self.data_path)
self.assertEqual(cmd_actual, cmd_expected)
table_from_cmd = Table.from_cmd(
'table_a',
cmd = 'echo -e "1,2,3,4"',
columns = ['col_a', 'col_b', 'col_c', 'col_d'])
# output from a command-backed Table to STDOUT
cmd_actual = table_from_cmd.get_cmd_str()
cmd_expected = 'echo -e "1,2,3,4"'
self.assertEqual(cmd_actual, cmd_expected)
# add a command, then output
table_from_cmd.cmds += ['sort']
# to STDOUT
cmd_actual = table_from_cmd.get_cmd_str()
cmd_expected = 'echo -e "1,2,3,4" | sort'
self.assertEqual(cmd_actual, cmd_expected)
|
"""Test a Fast R-CNN network on an image database."""
import argparse
import pprint
import time
import os
import os.path as osp
import sys
import cPickle
import numpy as np
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib'))
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
sys.path.insert(0, osp.join(this_dir, '../../external'))
from vdetlib.utils.protocol import proto_load, proto_dump
sys.path.insert(0, osp.join(this_dir, '../../src'))
from tpn.propagate import gt_motion_propagation
from tpn.target import add_track_targets
from tpn.data_io import save_track_proto_to_zip
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('vid_file')
parser.add_argument('box_file')
parser.add_argument('annot_file', default=None,
help='Ground truth annotation file. [None]')
parser.add_argument('save_file', help='Save zip file')
parser.add_argument('--job', dest='job_id', help='Job slot, GPU ID + 1. [1]',
default=1, type=int)
parser.add_argument('--length', type=int, default=20,
help='Propagation length. [20]')
parser.add_argument('--window', type=int, default=5,
help='Prediction window. [5]')
parser.add_argument('--sample_rate', type=int, default=1,
help='Temporal subsampling rate. [1]')
parser.add_argument('--offset', type=int, default=0,
help='Offset of sampling. [0]')
parser.add_argument('--overlap', type=float, default=0.5,
help='GT overlap threshold for tracking. [0.5]')
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.set_defaults(vis=False, zip=False, keep_feat=False)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print 'Called with args:'
print args
if osp.isfile(args.save_file):
print "{} already exists.".format(args.save_file)
sys.exit(1)
vid_proto = proto_load(args.vid_file)
box_proto = proto_load(args.box_file)
annot_proto = proto_load(args.annot_file)
track_proto = gt_motion_propagation(vid_proto, box_proto, annot_proto,
window=args.window, length=args.length,
sample_rate=args.sample_rate, overlap_thres=args.overlap)
# add ground truth targets if annotation file is given
add_track_targets(track_proto, annot_proto)
if args.zip:
save_track_proto_to_zip(track_proto, args.save_file)
else:
proto_dump(track_proto, args.save_file)
|
import django_filters
from django import forms
from django.utils.translation import ugettext_lazy as _
from courses.models import Course
from issues.models import Issue
from issues.model_issue_status import IssueStatus
class IssueFilterStudent(django_filters.FilterSet):
is_active = django_filters.ChoiceFilter(label=_('tip_kursa'), name='task__course__is_active')
years = django_filters.MultipleChoiceFilter(
label=_('god_kursa'),
name='task__course__year',
widget=forms.CheckboxSelectMultiple
)
courses = django_filters.MultipleChoiceFilter(label=_('kurs'), name='task__course', widget=forms.SelectMultiple)
responsible = django_filters.MultipleChoiceFilter(label=_('prepodavateli'), widget=forms.SelectMultiple)
status_field = django_filters.MultipleChoiceFilter(label=_('status'), widget=forms.SelectMultiple)
update_time = django_filters.DateRangeFilter(label=_('data_poslednego_izmenenija'))
def set_user(self, user):
for field in self.filters:
self.filters[field].field.label = u'<strong>{0}</strong>'.format(self.filters[field].field.label)
groups = user.group_set.all()
courses = Course.objects.filter(groups__in=groups)
course_choices = set()
year_choices = set()
teacher_set = set()
status_set = set()
for course in courses:
course_choices.add((course.id, course.name))
year_choices.add((course.year.id, unicode(course.year)))
for teacher in course.get_teachers():
teacher_set.add(teacher)
for status in course.issue_status_system.statuses.all():
status_set.add(status)
self.filters['is_active'].field.choices = ((u'', _(u'luboj')),
(1, _(u'aktivnyj')),
(0, _(u'arhiv')))
self.filters['years'].field.choices = tuple(year_choices)
self.filters['courses'].field.choices = tuple(course_choices)
teacher_choices = [(teacher.id, teacher.get_full_name()) for teacher in teacher_set]
self.filters['responsible'].field.choices = tuple(teacher_choices)
lang = user.profile.language
status_choices = [(status.id, status.get_name(lang)) for status in status_set]
for status_id in sorted(IssueStatus.HIDDEN_STATUSES.values(), reverse=True):
status_field = IssueStatus.objects.get(pk=status_id)
status_choices.insert(0, (status_field.id, status_field.get_name(lang)))
self.filters['status_field'].field.choices = tuple(status_choices)
class Meta:
model = Issue
fields = ['is_active', 'years', 'courses', 'responsible', 'status_field', 'update_time']
|
from django.db import models
import datetime
def get_choices(lst):
return [(i, i) for i in lst]
pprint_pan = lambda pan: "%s %s %s" % (pan[:5], pan[5:9], pan[9:])
class Person(models.Model):
name = models.CharField(max_length=255, db_index=True)
fathers_name = models.CharField(max_length=255, null=True, blank=True, db_index=True)
status = models.CharField(max_length=32, choices=get_choices([
'Individual',
'HUF',
'Partnership Firm',
'Domestic Company',
'LLP',
'Trust(ITR 7)',
]), default='Individual Salaried')
employer = models.CharField(max_length=64, null=True, blank=True)
self_occupied = models.BooleanField()
pan_number = models.CharField(max_length=32, unique=True)
user_id = models.CharField(max_length=32, null=True, blank=True)
password = models.CharField(max_length=32, null=True, blank=True)
bank_name = models.CharField(max_length=255, null=True, blank=True)
bank_branch = models.CharField(max_length=255, null=True, blank=True)
account_number = models.CharField(max_length=32, null=True, blank=True)
micr = models.CharField(max_length=32, blank=True, null=True)
ifsc_code = models.CharField(max_length=32, null=True, blank=True)
account_type = models.CharField(max_length=32, choices=get_choices(['SB', 'CA', 'CC']), default='SB')
contact_number = models.CharField(max_length=13, null=True, blank=True, db_index=True)
email = models.EmailField(null=True, blank=True, db_index=True)
address = models.TextField(max_length=32, null=True, blank=True)
city = models.CharField(max_length=64, null=True, blank=True, db_index=True)
pincode = models.CharField(max_length=10, null=True, blank=True, db_index=True)
date_of_birth_or_incarnation = models.DateField(null=True, blank=True)
def pan_number_pprint(self):
return pprint_pan(self.pan_number)
pan_number_pprint.admin_order_field = 'pan_number_pprint'
pan_number_pprint.short_description = 'Pan Number'
def _trim(self, *args):
for field in args:
value = getattr(self, field)
setattr(self, field, value.replace(' ', ''))
def save(self):
self._trim('pan_number')
super(Person, self).save()
def __unicode__(self):
return u'%s (%s)' % (self.name, self.pan_number)
class MetadataPerson(models.Model):
person = models.ForeignKey(Person)
key = models.CharField(max_length=250)
value = models.CharField(max_length=250)
class Report(models.Model):
finanyr = lambda yr: "%s - %s" % (yr, yr+1)
years = [(finanyr(i), finanyr(i)) for i in xrange(1980, 2020)]
person = models.ForeignKey(Person)
financial_year = models.CharField(max_length=11, choices=years, default=finanyr(datetime.datetime.now().year - 1))
assessment_year = models.CharField(max_length=11, choices=years, default=finanyr(datetime.datetime.now().year))
return_filed_on = models.DateField()
returned_income = models.DecimalField(max_digits=12, decimal_places=2)
#Advanced Tax
july = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
september = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
december = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
march = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
#Interest Detail
interest_234_a = models.DecimalField("Interest 234(a)", max_digits=12, decimal_places=2, null=True, blank=True)
interest_234_b = models.DecimalField("Interest 234(b)", max_digits=12, decimal_places=2, null=True, blank=True)
interest_234_c = models.DecimalField("Interest 234(c)", max_digits=12, decimal_places=2, null=True, blank=True)
#Tax detail
tds = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
self_assessment_tax = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
acknowledgement_number = models.CharField("Ack no.", max_length=64, null=True, blank=True)
#Bill Detail
bill_raised_on = models.DateField(null=True, blank=True)
bill_amount = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
bill_received = models.BooleanField("Bill received ?")
mode_of_payment = models.CharField(max_length=16, choices=get_choices(['Cash', 'Cheque', 'DD', 'Bank Transfer']), null=True, blank=True)
payment_detail = models.CharField(max_length=16, null=True, blank=True)
#Order 143(1)
order_received_on_143_1 = models.DateField("143(1) Order received on", null=True, blank=True)
assessed_income_143_1 = models.DecimalField("Assessed income", max_digits=12, decimal_places=2, null=True, blank=True)
assessed_tax_143_1 = models.DecimalField("Assessed tax", max_digits=12, decimal_places=2, null=True, blank=True)
refund_amount_143_1 = models.DecimalField("Refund amount", max_digits=12, decimal_places=2, null=True, blank=True)
demand_raised_amount_143_1 = models.DecimalField("Demand raised for ", max_digits=12, decimal_places=2, null=True, blank=True)
refund_received_on_143_1 = models.DateField("Refund received on", null=True, blank=True)
#Order 143(2)
order_received_on_143_2 = models.DateField("Notice received on", null=True, blank=True)
#Order 143(3)
order_received_on_143_3 = models.DateField("Order received on", null=True, blank=True)
assessed_income_143_3 = models.DecimalField("Assessed income", max_digits=12, decimal_places=2, null=True, blank=True)
assessed_tax_143_3 = models.DecimalField("Assessed tax", max_digits=12, decimal_places=2, null=True, blank=True)
refund_amount_143_3 = models.DecimalField("Refund amount", max_digits=12, decimal_places=2, null=True, blank=True)
demand_raised_amount_143_3 = models.DecimalField("Demand raised for", max_digits=12, decimal_places=2, null=True, blank=True)
refund_received_on_143_3 = models.DateField("Refund received on", null=True, blank=True)
#Appeal before cit
filed_on_cit = models.DateField("Filed on", null=True, blank=True)
order_received_on_cit = models.DateField("Order received on", null=True, blank=True)
assessed_income_cit = models.DecimalField("Assessed income", max_digits=12, decimal_places=2, null=True, blank=True)
assessed_tax_cit = models.DecimalField("Assessed tax", max_digits=12, decimal_places=2, null=True, blank=True)
#Appeal before tribunal
filed_on_tribunal = models.DateField("Filed on", null=True, blank=True)
order_received_on_tribunal = models.DateField("Order received on", null=True, blank=True)
filed_by_tribunal = models.CharField("Filed by", max_length=16, choices=get_choices(['assessee', 'department']), null=True, blank=True)
assessed_income_tribunal = models.DecimalField("Assessed income", max_digits=12, decimal_places=2, null=True, blank=True)
assessed_tax_tribunal = models.DecimalField("Assessed tax", max_digits=12, decimal_places=2, null=True, blank=True)
def got_reimbursement(self):
return self.refund_amount_143_1 > 0
got_reimbursement.admin_order_field = 'got_reimbursement'
got_reimbursement.boolean = True
got_reimbursement.short_description = 'Got reimbursement ?'
def tax_paid(self):
tax = sum([i for i in (self.march, self.september, self.december, self.july) if i is not None])
if tax == 0 and self.tds is not None:
tax = self.tds
return tax
tax_paid.admin_order_field = 'tax_paid'
tax_paid.boolean = False
tax_paid.short_description = 'Tax Paid'
class Meta:
unique_together = ('person', 'financial_year')
def __unicode__(self):
return u'%s - %s' % (self.person, self.financial_year)
class MetadataReport(models.Model):
report = models.ForeignKey(Report)
key = models.CharField(max_length=250)
value = models.CharField(max_length=250)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="serverless-wsgi",
version="3.0.0",
python_requires=">3.6",
author="Logan Raarup",
author_email="logan@logan.dk",
description="Amazon AWS API Gateway WSGI wrapper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/logandk/serverless-wsgi",
py_modules=["serverless_wsgi"],
install_requires=["werkzeug>2"],
classifiers=(
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
keywords="wsgi serverless aws lambda api gateway apigw flask django pyramid",
)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='paid',
field=models.BooleanField(default=False),
),
]
|
from click.testing import CliRunner
from sqlitebiter.__main__ import cmd
from sqlitebiter._const import ExitCode
from .common import print_traceback
class Test_version_subcommand:
def test_smoke(self):
runner = CliRunner()
result = runner.invoke(cmd, ["version"])
print_traceback(result)
assert result.exit_code == ExitCode.SUCCESS
|
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from buildfarm.models import Package, Queue
from repository.models import Repository, PisiPackage
from source.models import SourcePackage
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import xmlrpclib
from django.template.loader import render_to_string
from django.utils import simplejson
from django.template import Context, Template
from django import forms
from django.utils import simplejson
from django.db import transaction
from django.shortcuts import redirect
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import messages
from buildfarm.tasks import build_all_in_queue
class NewQueueForm (forms.ModelForm):
class Meta:
model = Queue
fields = ( 'name', 'builder', 'source_repo', 'binman', 'sandboxed')
def site_index (request):
queues = Queue.objects.all ()
context = { 'queues': queues, 'navhint': 'queue', 'not_reload': 'true', 'form' : NewQueueForm() }
return render (request, "buildfarm/site_index.html", context)
def package_progress_json (request, queue_id):
rdict = {}
q = Queue.objects.get(id=queue_id)
packages = Package.objects.filter(queue=q)
pct =float ( float(q.current) / q.length ) * 100
rdict = { 'percent' : pct, 'total': q.length, 'current': q.current, 'name_current': q.current_package_name }
json = simplejson.dumps(rdict, ensure_ascii=False)
return HttpResponse( json, content_type='application/json')
@staff_member_required
def delete_from_queue (request, package_id):
pkg = get_object_or_404 (Package, id=package_id)
q_id = pkg.queue.id
pkg.delete ()
return redirect ('/buildfarm/queue/%d/' % q_id)
@staff_member_required
def delete_queue (request, queue_id):
queue = get_object_or_404 (Queue, id=queue_id)
queue.delete ()
return redirect ('/manage/')
@staff_member_required
def new_queue (request):
if request.method == 'POST':
# New submission
form = NewQueueForm (request.POST)
rdict = { 'html': "<b>Fail</b>", 'tags': 'fail' }
context = Context ({'form': form})
if form.is_valid ():
rdict = { 'html': "The new queue has been set up", 'tags': 'success' }
model = form.save (commit=False)
model.current = 0
model.length = 0
model.current_package_name = ""
model.save ()
else:
html = render_to_string ('buildfarm/new_queue.html', {'form_queue': form})
rdict = { 'html': html, 'tags': 'fail' }
json = simplejson.dumps(rdict, ensure_ascii=False)
print json
# And send it off.
return HttpResponse( json, content_type='application/json')
else:
form = NewQueueForm ()
context = {'form': form }
return render (request, 'buildfarm/new_queue.html', context)
def queue_index(request, queue_id=None):
q = get_object_or_404 (Queue, id=queue_id)
packages = Package.objects.filter(queue=q).order_by('build_status')
paginator = Paginator (packages, 15)
pkg_count = q.length
if (pkg_count > 0):
pct =float ( float(q.current) / q.length ) * 100
else:
pct = 0
page = request.GET.get("page")
try:
packages = paginator.page(page)
except PageNotAnInteger:
packages = paginator.page (1)
except EmptyPage:
packages = paginator.page (paginator.num_pages)
context = {'navhint': 'queue', 'queue': q, 'package_list': packages, 'total_packages': q.length, 'current_package': q.current, 'total_pct': pct, 'current_package_name': q.current_package_name}
return render (request, "buildfarm/index.html", context)
@staff_member_required
def build_queue (request, queue_id):
queue = Queue.objects.get (id=queue_id)
messages.info (request, "Starting build of \"%s\" queue" % queue.name)
build_all_in_queue.delay (queue_id)
return redirect ('/manage/')
@staff_member_required
def populate_queue (request, queue_id):
q = Queue.objects.get(id=queue_id)
packages = SourcePackage.objects.filter (repository=q.source_repo)
failList = list ()
for package in packages:
binaries = PisiPackage.objects.filter(source_name=package.name)
if len(binaries) == 0:
# We have no binaries
print "New package for source: %s" % (package.name)
failList.append (package)
else:
for package2 in binaries:
if package2.release != package.release:
print "Newer release for: %s" % package2.name
failList.append (package)
break
try:
binary = Package.objects.get(queue=q, name=package.name)
failList.remove (package)
except:
pass
with transaction.commit_on_success():
for fail in failList:
pkg = Package ()
pkg.name = fail.name
pkg.version = fail.version
pkg.build_status = "pending"
pkg.queue = q
pkg.spec_uri = fail.source_uri
pkg.save ()
return redirect ("/buildfarm/queue/%d" % q.id)
|
from django.core.management.base import BaseCommand, CommandError
from ship_data.models import GpggaGpsFix
import datetime
from main import utils
import csv
import os
from django.db.models import Q
import glob
from main.management.commands import findgpsgaps
gps_bridge_working_intervals = None
class Command(BaseCommand):
help = 'Outputs the track in CSV format.'
def add_arguments(self, parser):
parser.add_argument('output_directory', type=str, help="Will delete existing files that started on the same start date")
parser.add_argument('start', type=str, help="Start of the GPS data. Format: YYYYMMDD")
parser.add_argument('end', type=str, help="End of the GPS data. Format: YYYYMMDD or 'yesterday'")
def handle(self, *args, **options):
generate_all_tracks(options['output_directory'], options['start'], options['end'])
def generate_all_tracks(output_directory, start, end):
global gps_bridge_working_intervals
gps_gaps = findgpsgaps.FindDataGapsGps("GPS Bridge1", start, end)
gps_bridge_working_intervals = gps_gaps.find_gps_missings()
generate_fast(output_directory, 3600, "1hour", start, end)
generate_fast(output_directory, 300, "5min", start, end)
generate_fast(output_directory, 60, "1min", start, end)
generate_fast(output_directory, 1, "1second", start, end)
def generate_fast(output_directory, seconds, file_suffix, start, end):
"""
This method uses Mysql datetime 'ends with' instead of doing individual queries
for each 'seconds'. It's faster but harder to find gaps in the data.
"""
first_date = datetime.datetime.strptime(start, "%Y%m%d")
first_date = utils.set_utc(first_date)
if end == "yesterday":
last_date = utils.last_midnight()
else:
last_date = datetime.datetime.strptime(end, "%Y%m%d")
last_date = utils.set_utc(last_date)
starts_file_format = first_date.strftime("%Y%m%d")
ends_file_format = last_date.strftime("%Y%m%d")
filename = "track_{}_{}_{}.csv".format(starts_file_format, ends_file_format, file_suffix)
files_to_delete = glob.glob(os.path.join(output_directory, "track_{}_*_{}.csv".format(starts_file_format,
file_suffix)))
print("Will start processing:", filename)
file_path = os.path.join(output_directory, filename)
if file_path in files_to_delete:
files_to_delete.remove(file_path) # In case that this script is re-generating the file
file = open(file_path + ".tmp", "w")
csv_writer = csv.writer(file)
csv_writer.writerow(["date_time", "latitude", "longitude"])
one_day = datetime.timedelta(days=1)
current_day = first_date
while current_day <= last_date:
process_day(current_day, seconds, csv_writer)
current_day += one_day
delete_files(files_to_delete)
file.close()
os.rename(file_path + ".tmp", file_path)
def process_day(date_time_process, seconds, csv_writer):
date_time_process_tomorrow = date_time_process + datetime.timedelta(days=1)
today_filter = Q(date_time__gte=date_time_process) & Q(date_time__lt=date_time_process_tomorrow)
if seconds == 1:
query_set = GpggaGpsFix.objects.filter(today_filter).order_by('date_time')
elif seconds == 60:
query_set = GpggaGpsFix.objects.filter(today_filter).filter(date_time__contains=':01.').order_by('date_time')
elif seconds == 300:
query_set = GpggaGpsFix.objects.filter(today_filter).filter(Q(date_time__contains=':00:01.') |
Q(date_time__contains=':05:01.') |
Q(date_time__contains=':10:01.') |
Q(date_time__contains=':15:01.') |
Q(date_time__contains=':20:01.') |
Q(date_time__contains=':25:01.') |
Q(date_time__contains=':30:01.') |
Q(date_time__contains=':35:01.') |
Q(date_time__contains=':40:01.') |
Q(date_time__contains=':45:01.') |
Q(date_time__contains=':50:01.') |
Q(date_time__contains=':55:01.')).order_by('date_time')
elif seconds == 3600:
query_set = GpggaGpsFix.objects.filter(today_filter).filter(date_time__contains=':00:01').order_by('date_time')
else:
assert False # need to add a if case for this
# 64: GPS Bridge
# 63: GPS Trimble
query_set = query_set.filter(utils.filter_out_bad_values())
previous_date_time_string = ""
for gps_info in query_set.iterator():
date_time_string = gps_info.date_time.strftime("%Y-%m-%d %H:%M:%S")
if date_time_string == previous_date_time_string:
continue
if which_gps(date_time_string) == "GPS Bridge1":
if gps_info.device_id == 64:
l = [gps_info.date_time.strftime("%Y-%m-%d %H:%M:%S"),
"{:.4f}".format(gps_info.latitude),
"{:.4f}".format(gps_info.longitude)]
# print(l)
csv_writer.writerow(l)
previous_date_time_string = date_time_string
else:
if gps_info.device_id == 63:
l = [gps_info.date_time.strftime("%Y-%m-%d %H:%M:%S"),
"{:.4f}".format(gps_info.latitude),
"{:.4f}".format(gps_info.longitude)]
# print(l)
csv_writer.writerow(l)
previous_date_time_string = date_time_string
def delete_files(files):
for file in files:
print("Deleting file:", file)
os.remove(file)
def generate_method_1(output_directory, seconds, file_suffix):
"""
This method does a query every 'seconds'. Very slow, could be used to find gaps easily on the data.
As it is now it is difficult to decide which GPS the get comes from.
"""
time_delta = datetime.timedelta(seconds=seconds)
first_date = GpggaGpsFix.objects.earliest().date_time
last_date = GpggaGpsFix.objects.latest().date_time
filename = "track_{}_{}_{}.csv".format(first_date.strftime("%Y%m%d"), last_date.strftime("%Y%m%d"), file_suffix)
print("Will start processing:", filename)
file_path = os.path.join(output_directory, filename)
file = open(file_path, "w")
csv_writer = csv.writer(file)
csv_writer.writerow(["date_time", "latitude", "longitude"])
current_date = first_date
previous_date = current_date
while current_date < last_date:
location = utils.ship_location(current_date)
if location.date_time != previous_date:
if location.date_time is not None and location.latitude is not None and location.longitude is not None:
csv_writer.writerow([location.date_time.strftime("%Y-%m-%d %H:%M:%S"), "{:.4f}".format(location.latitude), "{:.4f}".format(location.longitude)])
if location.date_time is None:
print("No data for:", current_date)
if previous_date.day != current_date.day:
print("Generating CSV GPS info:", current_date)
previous_date = current_date
current_date = current_date + time_delta
def which_gps(date_time_str):
for interval in gps_bridge_working_intervals:
if interval['starts'] < date_time_str <= interval['stops']:
# if date_time_str > interval['starts'] and date_time_str <= interval['stops']:
return "GPS Bridge1"
return "Trimble GPS"
|
from yaml import load
from os import environ
from os.path import join, isfile
from ..module_ultra_repo import ModuleUltraRepo
from ..module_ultra_config import ModuleUltraConfig
class RepoDaemonConfig:
"""Represent a MU repo to the MU daemon."""
def __init__(self, **kwargs):
self.repo_name = kwargs['repo_name']
self.repo_path = kwargs['repo_path']
self.pipelines = kwargs['pipelines']
def get_repo(self):
"""Return the MU repo that this represents."""
return ModuleUltraRepo(self.repo_path)
def get_pipeline_list(self):
"""Return a list of (pipe_name, version)."""
return [(pipe['name'], pipe['version']) for pipe in self.pipelines]
def get_pipeline_tolerance(self, pipe_name):
"""Return tolerance for the pipeline."""
for pipe in self.pipelines:
if pipe['name'] == pipe_name:
return pipe.get('tolerance', 0)
def get_pipeline_endpts(self, pipe_name):
"""Return a list of endpts or None."""
return None
def get_pipeline_excluded_endpts(self, pipe_name):
"""Return a list of excluded endpts or None."""
return None
class DaemonConfig:
"""Store config information for the MU daemon."""
def __init__(self, repos, total_jobs=10, run_local=True, pipeline_configs={}):
self.repos = repos
self.total_jobs = int(total_jobs)
self.run_local = run_local
self.pipeline_configs = pipeline_configs
def list_repos(self):
"""Return a list of RepoDaemonConfigs."""
repo_configs = []
for repo_name, repo_path, pipelines in self.repos:
repo_configs.append(RepoDaemonConfig(**{
'repo_name': repo_name,
'repo_path': repo_path,
'pipelines': pipelines,
}))
return repo_configs
def get_pipeline_run_config(self, pipe_name, pipe_version):
"""Return a filepath for the config to be used or None."""
return None
@classmethod
def get_daemon_config_filename(ctype):
try:
return environ['MODULE_ULTRA_DAEMON_CONFIG']
except KeyError:
config_dir = ModuleUltraConfig.getConfigDir()
config_filename = join(config_dir, 'daemon_config.yaml')
if isfile(config_filename):
return config_filename
assert False, "No daemon config found"
@classmethod
def load_from_yaml(ctype, yaml_filename=None):
yaml_filename = yaml_filename if yaml_filename else ctype.get_daemon_config_filename()
raw_config = load(open(yaml_filename))
raw_repos = raw_config['repos']
repo_list = [
(raw_repo['name'], raw_repo['path'], raw_repo['pipelines'])
for raw_repo in raw_repos
]
return DaemonConfig(
repo_list,
total_jobs=raw_config.get('num_jobs', 10),
run_local=raw_config.get('run_on_cluster', True),
pipeline_configs=raw_config.get('pipeline_configs', {})
)
|
import imaplib
import re
from libqtile.log_utils import logger
from libqtile.widget import base
class GmailChecker(base.ThreadPoolText):
"""A simple gmail checker. If 'status_only_unseen' is True - set 'fmt' for one argument, ex. 'unseen: {0}'"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("update_interval", 30, "Update time in seconds."),
("username", None, "username"),
("password", None, "password"),
("email_path", "INBOX", "email_path"),
("display_fmt", "inbox[{0}],unseen[{1}]", "Display format"),
("status_only_unseen", False, "Only show unseen messages"),
]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(GmailChecker.defaults)
def poll(self):
self.gmail = imaplib.IMAP4_SSL('imap.gmail.com')
self.gmail.login(self.username, self.password)
answer, raw_data = self.gmail.status(self.email_path,
'(MESSAGES UNSEEN)')
if answer == "OK":
dec = raw_data[0].decode()
messages = int(re.search(r'MESSAGES\s+(\d+)', dec).group(1))
unseen = int(re.search(r'UNSEEN\s+(\d+)', dec).group(1))
if(self.status_only_unseen):
return self.display_fmt.format(unseen)
else:
return self.display_fmt.format(messages, unseen)
else:
logger.exception(
'GmailChecker UNKNOWN error, answer: %s, raw_data: %s',
answer, raw_data)
return "UNKNOWN ERROR"
|
def gpio_init(pin, output):
try:
with open(f"/sys/class/gpio/gpio{pin}/direction", 'wb') as f:
f.write(b"out" if output else b"in")
except Exception as e:
print(f"Failed to set gpio {pin} direction: {e}")
def gpio_set(pin, high):
try:
with open(f"/sys/class/gpio/gpio{pin}/value", 'wb') as f:
f.write(b"1" if high else b"0")
except Exception as e:
print(f"Failed to set gpio {pin} value: {e}")
|
"""
AppHtml settings
@author Toshiya NISHIO(http://www.toshiya240.com)
"""
defaultTemplate = {
'1) 小さいボタン': '${badgeS}',
'2) 大きいボタン': '${badgeL}',
'3) テキストのみ': '${textonly}',
"4) アイコン付き(小)": u"""<span class="appIcon"><img class="appIconImg" height="60" src="${icon60url}" style="float:left;margin: 0px 15px 15px 5px;"></span>
<span class="appName"><strong><a href="${url}" target="itunes_store">${name}</a></strong></span><br>
<span class="appCategory">カテゴリ: ${category}</span><br>
<span class="badgeS" style="display:inline-block; margin:6px">${badgeS}</span><br style="clear:both;">
""",
"5) アイコン付き(大)": u"""<span class="appIcon"><img class="appIconImg" height="100" src="${icon100url}" style="float:left;;margin: 0px 15px 15px 5px;"></span>
<span class="appName"><strong><a href="${url}" target="itunes_store">${name}</a></strong></span><br>
<span class="appCategory">カテゴリ: ${category}</span><br>
<span class="badgeL" style="display:inline-block; margin:4px">${badgeL}</span><br style="clear:both;">
"""
}
settings = {
'phg': "",
'cnt': 8,
'scs': {
'iphone': 320,
'ipad': 320,
'mac': 480
},
'template': {
'software': defaultTemplate,
'iPadSoftware': defaultTemplate,
'macSoftware': defaultTemplate,
'song': defaultTemplate,
'album': defaultTemplate,
'movie': defaultTemplate,
'ebook': defaultTemplate
}
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "houseofdota.production_settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
"""
Django settings for plasystem project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
from local_settings import *
INSTALLED_APPS = [
'flat_responsive',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'productores',
'organizaciones',
'subsectores',
'lugar',
'resultados',
'reportes',
'smart_selects',
'multiselectfield',
#'nested_admin',
'nested_inline',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'plasystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'plasystem.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'es-ni'
TIME_ZONE = 'America/Managua'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_media"),
)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
|
"""Tests for the object departures module."""
import responses
import test as _test
import navitia_client
import requests
class DeparturesTest(_test.TestCase):
def setUp(self):
self.user = 'leo'
self.core_url = "https://api.navitia.io/v1/"
self.client = navitia_client.Client(self.user)
self.coords = '2.333333;48.866667'
def test_no_region_nor_coords(self):
# Should raise error if no region nor coords specified
pass
|
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 25
pd.options.display.width = 1000
data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True)
features = [data.columns.values]
data['returns'] = (data['NASDAQ'] - data['NASDAQ'].shift(1)) / data['NASDAQ']
data = data.dropna()
round_values = 4
data['gainLoss'] = data['returns'].round(round_values)
total_samples = len(data)
n_bins = data['gainLoss'].nunique()
value_count = data['gainLoss'].value_counts()
value_count = value_count.sort_index()
b = value_count.index.tolist()
bins = ['%.4f' % z for z in b] # match to round value
probability = value_count / total_samples
transitions = np.zeros((n_bins, n_bins))
def map_transition(this_return, previous_return):
current = np.where(probability.index==this_return)[0] - 1 # pandas starts at 1, numpy starts at zero
previous = np.where(probability.index==previous_return)[0] - 1
transitions[current, previous] += 1
total_transitions = 0
for i in range(len(data)-1):
total_transitions += 1
previous = data.iloc[i]['gainLoss']
current = data.iloc[i+1]['gainLoss']
map_transition(current, previous)
transitions /= total_transitions
transitions /= transitions.sum(axis=0)
s = -.03 # today's gain or loss --- be sure it is a valid bin
n = 5
t = transitions
prediction_probabilities = (t **n)
row_number = np.where(probability.index==s)[0] - 1 # pandas starts at 1, numpy starts at zero
probabilities = prediction_probabilities[row_number]
mostlikely = probabilities.argmax()
bin_value = float(bins[mostlikely])
print("%d days from now, the market return will be %.2f" % (n, bin_value))
def convert_return_for_plot(r):
return bins[r]
days_ahead = 5
p = []
for i in range(len(data)-1):
s = data.iloc[i]['gainLoss'] # get current day return from market
prediction_probabilities = (transitions **n) # predict all probabilities for future date
row_number = np.where(probability.index==s)[0] - 1 # get row number matching today's return
probabilities = prediction_probabilities[row_number]
mostlikely = probabilities.argmax()
bin_value = bins[mostlikely]
p.append(bin_value)
p = ([0] * 1 + p)
data['predicted'] = p
plt.figure(figsize=(12,12))
plt.title("Nasdaq daily gain/loss using single chain markov 5 days out")
plt.plot(data['returns'], label='Actual')
plt.plot(data['predicted'], label='Predicted', alpha=0.5)
plt.legend(loc='best')
plt.savefig("SingleChainMarkov.png")
plt.show()
|
"""
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
SECRET_KEY = 'csqwlmc8s55o($rt6ozh7u+ui9zb-et00w$d90j8$^!nvj41_r'
DEBUG = False
ALLOWED_HOSTS = ['*']
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'stroms38@gmail.com'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
'''
If using gmail, you will need to
unlock Captcha to enable Django
to send for you:
https://accounts.google.com/displayunlockcaptcha
'''
INSTALLED_APPS = (
#django app
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#third party apps
'crispy_forms',
'registration',
#my apps
'answers',
'newsletter',
"products",
"carts",
"billing",
"django_filters",
"storages",
'gunicorn',
"djstripe",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
'''Image storage Amazon S3'''
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'examplefy'
S3_URL = 'http://%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = S3_URL
AWS_QUERYSTRING_AUTH = False
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
'''Static storage'''
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', "static_root"),
)
MEDIA_URL = S3_URL
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "media_root")
PROTECTED_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "protected_root")
here = lambda * x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
PROJECT_ROOT = here("..")
root = lambda * x: os.path.join(os.path.abspath(PROJECT_ROOT), *x)
TEMPLATE_DIRS = (
root('templates'),
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
BRAINTREE_PUBLIC = "hsjhmqhy73rvpqbv"
BRAINTREE_PRIVATE = "37b06da7e2cdb493bf0e0ddb1c47cbcd"
BRAINTREE_MERCHANT = "bgd7scxjbcrz6dd2"
BRAINTREE_ENVIRONMENT = "Sandbox"
STRIPE_PUBLIC_KEY = os.environ.get("STRIPE_PUBLIC_KEY", "pk_test_lLFAbBOc7bHtpxq5QnIp94xh")
STRIPE_SECRET_KEY = os.environ.get("STRIPE_SECRET_KEY", "sk_test_hWkIxMrsvR3IGJIRKLRy1Rts")
CURRENCIES = getattr(settings, "DJSTRIPE_CURRENCIES", (
('usd', 'U.S. Dollars',),
('gbp', 'Pounds (GBP)',),
('eur', 'Euros',))
)
DJSTRIPE_PLANS = {
"one-time": {
"stripe_plan_id": "one-time",
"name": "Examplefy ($0.99)",
"description": "A one-time buy to Examplefy",
"price": 99, # $0.99
"currency": "usd",
"interval": "day"
},
"monthly": {
"stripe_plan_id": "pro-monthly",
"name": "Examplefy Pro ($4.99/month)",
"description": "The monthly subscription plan to Examplefy",
"price": 499, # $4.99
"currency": "usd",
"interval": "month",
"interval_count": 1
},
"yearly": {
"stripe_plan_id": "pro-yearly",
"name": "Examplefy Prime ($49/year)",
"description": "The annual subscription plan to Examplefy",
"price": 4900, # $49.00
"currency": "usd",
"interval": "year",
"interval_count": 1
}
}
|
"""geo.py: Implementation of class AbstractTwitterGeoCommand
and its subclasses.
"""
from argparse import ArgumentParser
from . import (AbstractTwitterCommand, call_decorator)
from ..parsers import (filter_args, cache)
GEO_ID_PLACE_ID = ('geo/id/:place_id', 'id')
GEO_REVERSE_GEOCODE = ('geo/reverse_geocode', 'reverse')
GEO_SEARCH = ('geo/search', 'search')
class AbstractTwitterGeoCommand(AbstractTwitterCommand):
"""n/a"""
pass
class IdPlaceId(AbstractTwitterGeoCommand):
"""Output all the information about a known place."""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
GEO_ID_PLACE_ID[0],
aliases=GEO_ID_PLACE_ID[1:],
help=self.__doc__)
parser.add_argument(
'place_id',
help='a place in the world where can be retrieved '
'from geo/reverse_geocode')
return parser
@call_decorator
def __call__(self):
"""Request GET geo/id/:place_id for Twitter."""
# pylint: disable=protected-access
kwargs = dict(_id=self.args.place_id)
return kwargs, self.twhandler.geo.id._id # hack?
class ReverseGeocode(AbstractTwitterGeoCommand):
"""Search for up to 20 places that can be used as a place_id."""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
GEO_REVERSE_GEOCODE[0],
aliases=GEO_REVERSE_GEOCODE[1:],
parents=[parser_geo_common()],
help=self.__doc__)
parser.add_argument(
'long',
metavar='{-180.0..180.0}',
help='the longitude to search around')
parser.add_argument(
'lat',
metavar='{-90.0..90.0}',
help='the latitude to search around')
return parser
@call_decorator
def __call__(self):
"""Request GET geo/reverse_geocode for Twitter."""
kwargs = filter_args(
vars(self.args),
'lat', 'long', 'accuracy', 'granularity', 'max_results')
return kwargs, self.twhandler.geo.reverse_geocode
class Search(AbstractTwitterGeoCommand):
"""Search for places that can be attached to a statuses/update."""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
GEO_SEARCH[0],
aliases=GEO_SEARCH[1:],
parents=[parser_geo_common()],
help=self.__doc__)
parser.add_argument(
'--long',
metavar='{-180.0..180.0}',
help='the longitude to search around')
parser.add_argument(
'--lat',
metavar='{-90.0..90.0}',
help='the latitude to search around')
parser.add_argument(
'-q', '--query',
metavar='<text>',
help='free-form text to match against '
'while executing a geo-based query')
parser.add_argument(
'-i', '--ip-address',
dest='ip',
metavar='<ip-address>',
help='an IP address')
parser.add_argument(
'-c', '--contained-within',
dest='contained_within',
metavar='<place_id>',
help='the place_id which you would like '
'to restrict the search results to')
parser.add_argument(
'-s', '--street-address',
dest='street_address',
metavar='<text>',
help='search for places which have this given street address')
return parser
@call_decorator
def __call__(self):
"""Request GET geo/search for Twitter."""
kwargs = filter_args(
vars(self.args),
'lat', 'long', 'accuracy', 'granularity', 'max_results',
'query', 'ip', 'contained_within', 'street_address')
return kwargs, self.twhandler.geo.search
def make_commands(manager):
"""Prototype"""
# pylint: disable=no-member
return (cmd_t(manager) for cmd_t in
AbstractTwitterGeoCommand.__subclasses__())
CHOICES = ('poi', 'neighborhood', 'city', 'admin', 'country')
@cache
def parser_geo_common():
"""Return the parser for common arguments."""
parser = ArgumentParser(add_help=False)
parser.add_argument(
'-a', '--accuracy',
help='a hint on the region in which to search')
parser.add_argument(
'-g', '--granularity',
choices=CHOICES,
metavar='|'.join(CHOICES),
help='the minimal granularity of place types to return')
parser.add_argument(
'-m', '--max-results',
type=int,
dest='max_results',
help='a hint as to the number of results to return')
return parser
|
from __future__ import absolute_import
from collections import defaultdict as ddict
import os.path as op
def enum(**enums):
"""#enumeration
#backward compatible
:param enums:
"""
return type('Enum', (), enums)
IONISATION_MODE = enum(NEG=-1, POS=1)
class ExperimentalSettings(object):
"""
:param mz_tol_ppm:
:param ionisation_mode:
:param is_dims_experiment:
"""
ADDUCTS_POS = op.abspath("mzos/ressources/POS_ADDUCTS_IMS.csv")
ADDUCTS_NEG = op.abspath("mzos/ressources/NEG_ADDUCTS_IMS.csv")
FRAGMENTS = op.abspath("mzos/ressources/FRAGMENTS_IMS.csv")
def __init__(self, mz_tol_ppm, polarity, is_dims_exp,
frag_conf=None,
neg_adducts_conf=None,
pos_adducts_conf=None):
self.samples = set()
self.polarity = polarity # warning is an ENUM
self.mz_tol_ppm = mz_tol_ppm
self.is_dims_exp = is_dims_exp
# self.databases = databases
self.group_by_id = ddict(set)
self.group_by_sample = {}
# setting isos file, same for both polarity
# self.isos_file = ExperimentalSettings.ISOS
# setting good frags_file
self.frags_file = frag_conf or ExperimentalSettings.FRAGMENTS
self.adducts_file = neg_adducts_conf or ExperimentalSettings.ADDUCTS_NEG \
if polarity == IONISATION_MODE.NEG else pos_adducts_conf or ExperimentalSettings.ADDUCTS_POS
def get_frags(self):
"""
:return:
"""
lines = list()
with open(self.frags_file) as f:
lines += [l.split(",") for l in f.readlines()[1:]]
return [((float(l[3]), 1), l[0]) for l in lines]
def get_adducts(self):
"""
:return:
"""
lines = list()
with open(self.adducts_file) as f:
lines += [l.split(",") for l in f.readlines()[1:]]
return [((float(l[3]), 1), l[0]) for l in lines]
def get_mass_to_check(self):
"""
:return:
"""
if self.is_dims_exp:
return self.get_frags()
return self.get_adducts() + self.get_frags()
def create_group(self, id_, samples):
"""
:param id_:
:param samples:
:return:
"""
group = Group(id_, samples)
for s in list(samples):
self.group_by_sample[s] = group
self.group_by_id[id_] = group
self.samples.union(set(samples))
return group
def get_group(self, id_):
"""
:param id_:
:return:
"""
return self.group_by_id.get(id_)
def get_group_of(self, sample):
"""
:param sample:
:return: return group or None
"""
return self.group_by_sample.get(sample)
def get_group_id_of(self, sample):
"""
:param sample:
:return:
"""
group = self.get_group_of(sample)
if group is None:
return None
return group.name_id
class Group(list):
"""
:param name_id:
:param samples:
:param description:
"""
def __init__(self, name_id, samples, description=""):
super(Group, self).__init__()
self.samples = samples
self.description = description
self.name_id = name_id
|
from nose.tools import (
eq_,
raises,
)
from py3oauth2.utils import (
normalize_netloc,
normalize_path,
normalize_query,
normalize_url,
)
def test_normalize_url():
eq_(normalize_url('http://a/b/c/%7Bfoo%7D'),
normalize_url('hTTP://a/./b/../b/%63/%7bfoo%7d'))
@raises(ValueError)
def test_normalize_url_unknown_scheme():
normalize_url('example://example.com/')
@raises(ValueError)
def test_normalize_url_fragment():
normalize_url('http://example.com/#foo')
@raises(ValueError)
def test_normalize_url_invalid_port():
normalize_url('https://example.com:1bb/#foo')
def test_normalize_netloc():
eq_(normalize_netloc('eXamPLe.com', 80), 'example.com')
eq_(normalize_netloc('user:pass@example.com', 80), 'user:pass@example.com')
eq_(normalize_netloc('user:@example.com', 80), 'user@example.com')
eq_(normalize_netloc(':pass@example.com', 80), ':pass@example.com')
eq_(normalize_netloc('example.com:443', 80), 'example.com:443')
eq_(normalize_netloc('example.com:80', 80), 'example.com')
eq_(normalize_netloc('example.com:', 80), 'example.com')
def test_normalize_query():
eq_(normalize_query(''), '')
eq_(normalize_query('b=c&a=b'), 'a=b&b=c')
eq_(normalize_query('b&a=b'), 'a=b')
eq_(normalize_query('b=&a=b'), 'a=b')
eq_(normalize_query('b=%e3%81%84&a=%e3%81%82'), 'a=%E3%81%82&b=%E3%81%84')
def test_normalize_path():
eq_(normalize_path(''), '/')
eq_(normalize_path('//'), '/')
eq_(normalize_path('/a//b'), '/a/b/')
eq_(normalize_path('/a/./b'), '/a/b/')
eq_(normalize_path('/a/foo/../b'), '/a/b/')
eq_(normalize_path('/%e3%81%82%a%e3%81%84'), '/%E3%81%82%a%E3%81%84/')
eq_(normalize_path('/%e3%81%82a%e3%81%84'), '/%E3%81%82a%E3%81%84/')
|
"""Executa o servidor de nomes ".br"."""
import logging
import dns
def main():
logging.basicConfig(
format='[%(levelname)s]%(threadName)s %(message)s',
level=logging.INFO)
brNS = dns.NameServer('.br', 2, '127.0.0.1', 10001)
brNS.add_record('uem.br', '127.0.0.1:10002')
brNS.run()
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
from django.apps import AppConfig
class DevelopersConfig(AppConfig):
name = 'developers'
|
"""
Contains all elements of this package. They act as the formal elements of the law.
"""
import json
import sys
def from_json(data):
"""
Reconstructs any `BaseElement` from its own `.as_json()`. Returns the element.
"""
def _decode(data_dict):
values = []
if isinstance(data_dict, str):
return data_dict
assert(len(data_dict) == 1)
klass_string = next(iter(data_dict.keys()))
klass = getattr(sys.modules[__name__], klass_string)
args = []
for e in data_dict[klass_string]:
x = _decode(e)
if isinstance(x, str):
args.append(x)
else:
args += x
values.append(klass(*args))
return values
return _decode(json.loads(data))[0]
class BaseElement(object):
"""
Defines the interface of all elements.
"""
def as_html(self):
"""
How the element converts itself to HTML.
"""
raise NotImplementedError
def as_str(self):
"""
How the element converts itself to simple text.
"""
raise NotImplementedError
def as_dict(self):
"""
How the element converts itself to a dictionary.
"""
raise NotImplementedError
def as_json(self):
"""
How the element converts itself to JSON. Not to be overwritten.
"""
return json.dumps(self.as_dict())
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, repr(self.as_str()))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.as_dict() == other.as_dict()
else:
return False
@staticmethod
def _build_html(tag, text, attrib):
text = text.replace('\n', '') # \n have no meaning in HTML
if not text:
# ignore empty elements
return ''
attributes = ' '.join('%s="%s"' % (key, value)
for (key, value) in sorted(attrib.items())
if value is not None)
if attributes:
attributes = ' ' + attributes
return '<{0}{1}>{2}</{0}>'.format(tag, attributes, text)
class Token(BaseElement):
"""
A simple string.
"""
def __init__(self, string):
assert isinstance(string, str)
self._string = string
def as_str(self):
return self.string
def as_html(self):
return self.as_str()
def as_dict(self):
return {self.__class__.__name__: [self.as_str()]}
@property
def string(self):
return self._string
class Reference(Token):
"""
A generic reference to anything. Contains a number (str) and a parent, which
must be either `None` or a `Token` (or a subclass of `Token`).
"""
def __init__(self, number, parent=None):
super(Reference, self).__init__(number)
assert isinstance(number, str)
assert isinstance(parent, Token) or parent is None
self._parent = parent
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__,
repr(self.number), repr(self.parent))
def as_html(self):
return self._build_html('a', self.as_str(), {})
def as_dict(self):
r = {self.__class__.__name__: [self.number]}
if self.parent:
r[self.__class__.__name__].append(self.parent.as_dict())
return r
@property
def number(self):
return self.string
@property
def parent(self):
return self._parent
class DocumentReference(Reference):
"""
A concrete Reference to a document. Contains an href that identifies where
it points to, as well as a `set_href` to set it.
"""
def __init__(self, number, parent, href=''):
super(DocumentReference, self).__init__(number, parent)
self._href = href
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, repr(self.as_str()),
repr(self.parent.as_str()))
@property
def name(self):
return self.parent.as_str()
def set_href(self, href):
self._href = href
def as_html(self):
if self._href:
return self._build_html('a', self.as_str(), {'href': self._href})
return super(DocumentReference, self).as_html()
def as_dict(self):
r = super(DocumentReference, self).as_dict()
if self._href:
r[self.__class__.__name__].append(self._href)
return r
class LineReference(Reference):
pass
class NumberReference(Reference):
pass
class ArticleReference(Reference):
pass
class EULawReference(Reference):
"""
A reference to EU law. Its href is built from its name and number.
"""
@staticmethod
def _build_eu_url(name, number):
# example: '2000/29/CE'
year, iden = number.split('/')[:2]
label = {'Diretiva': 'L',
'Decisão de Execução': 'D',
'Regulamento (CE)': 'R',
'Regulamento CE': 'R',
'Regulamento CEE': 'R'}[name]
if label == 'R':
year, iden = iden, year
eur_id = '3%s%s%04d' % (year, label, int(iden))
return 'http://eur-lex.europa.eu/legal-content/PT/TXT/?uri=CELEX:%s' \
% eur_id
def __init__(self, number, parent):
super(EULawReference, self).__init__(number, parent)
def as_html(self):
return self._build_html('a', self.as_str(),
{'href': self._build_eu_url(self.parent.as_str(),
self.number)})
class Anchor(Token):
"""
A generic anchor that defines a section that can be referred to.
"""
name = None
def __init__(self, string):
super(Anchor, self).__init__(string)
self._document_section = None
def as_str(self):
return '%s %s\n' % (self.name, self.number)
def as_dict(self):
return {self.__class__.__name__: [self.number]}
@property
def number(self):
return self.string
@property
def format(self):
return self.__class__
@property
def reference(self):
return self._document_section
@reference.setter
def reference(self, document_section):
assert(isinstance(document_section, DocumentSection))
self._document_section = document_section
def ref_as_href(self):
if self.reference.id_as_html():
return '#' + self.reference.id_as_html()
else:
return None
class Section(Anchor):
name = 'Secção'
class SubSection(Anchor):
name = 'Sub-Secção'
class Clause(Anchor):
name = 'Clausula'
def as_str(self):
return '%s\n' % self.number
class Part(Anchor):
name = 'Parte'
class Chapter(Anchor):
name = 'Capítulo'
class Title(Anchor):
name = 'Título'
class Annex(Anchor):
name = 'Anexo'
def as_str(self):
if self.number:
return '%s %s\n' % (self.name, self.number)
else:
return '%s\n' % self.name
class Article(Anchor):
name = 'Artigo'
def as_html(self):
anchor = self._build_html('a', self.number,
{'href': self.ref_as_href()})
return '%s %s' % (self.name, anchor)
class Number(Anchor):
name = 'Número'
def as_str(self):
return '%s -' % self.number
def as_html(self):
return self._build_html('a', self.as_str(),
{'href': self.ref_as_href()})
class Line(Number):
name = 'Alínea'
def as_str(self):
return '%s' % self.number
class Item(Number):
"""
An item of an unordered list.
"""
name = 'Item'
def as_str(self):
return '%s' % self.number
class BaseDocumentSection(BaseElement):
def __init__(self, *children):
self._children = []
for child in children:
self.append(child)
self._parent_section = None
def append(self, element):
if isinstance(element, BaseDocumentSection):
element._parent_section = self
self._children.append(element)
def __len__(self):
return len(self._children)
def as_str(self):
return ''.join(child.as_str() for child in self._children)
def as_html(self):
string = ''
ol = False
ul = False
for child in self._children:
if ul and not isinstance(child, UnorderedDocumentSection):
string += '</ul>'
ul = False
if ol and not isinstance(child, OrderedDocumentSection):
string += '</ol>'
ol = False
if not ul and isinstance(child, UnorderedDocumentSection):
string += '<ul>'
ul = True
if not ol and isinstance(child, OrderedDocumentSection):
string += '<ol>'
ol = True
string += child.as_html()
if ol:
string += '</ol>'
if ul:
string += '</ul>'
return string
def as_dict(self):
return {self.__class__.__name__: [child.as_dict() for child in
self._children]}
def find_all(self, condition, recursive=False):
if recursive:
def _find_all(root):
result = []
if isinstance(root, BaseDocumentSection):
for child in root._children:
if condition(child):
result.append(child)
result += _find_all(child)
return result
return _find_all(self)
return [child for child in self._children if condition(child)]
def id_tree(self):
tree = []
if self._parent_section is not None:
tree = self._parent_section.id_tree()
tree += [self]
return tree
def get_doc_refs(self):
"""
Yields tuples (name, number) of all its `DocumentReference`s.
"""
refs = self.find_all(lambda x: isinstance(x, DocumentReference), True)
ref_set = set()
for ref in refs:
ref_set.add((ref.name, ref.number))
return ref_set
def set_doc_refs(self, mapping):
"""
Uses a dictionary of the form `(name, ref)-> url` to set the href
of its own `DocumentReference`s.
"""
refs = self.find_all(lambda x: isinstance(x, DocumentReference), True)
for ref in refs:
if (ref.name, ref.number) in mapping:
ref.set_href(mapping[(ref.name, ref.number)])
class Paragraph(BaseDocumentSection):
def as_html(self):
return self._build_html('p', super(Paragraph, self).as_html(), {})
class InlineParagraph(Paragraph):
def as_html(self):
return self._build_html('span', super(Paragraph, self).as_html(), {})
class Document(BaseDocumentSection):
pass
class DocumentSection(BaseDocumentSection):
formal_sections = [Annex, Article, Number, Line, Item]
html_classes = {
Annex: 'annex',
Part: 'part',
Title: 'title',
Chapter: 'chapter',
Section: 'section',
SubSection: 'sub-section',
Clause: 'clause',
Article: 'article',
Number: 'number list-unstyled',
Line: 'line list-unstyled',
Item: 'item list-unstyled',
}
def __init__(self, anchor, *children):
super(DocumentSection, self).__init__(*children)
self._anchor = anchor
self._anchor.reference = self
def as_dict(self):
json = super(DocumentSection, self).as_dict()
json[self.__class__.__name__].insert(0, self.anchor.as_dict())
return json
@property
def anchor(self):
return self._anchor
@property
def format(self):
return self.anchor.format
def formal_id_tree(self):
filtered_tree = []
for e in self.id_tree():
if isinstance(e, QuotationSection):
return [] # sections inside quotations have no tree
if isinstance(e, DocumentSection) and e.format in self.formal_sections:
filtered_tree.append(e)
return filtered_tree
def id_as_html(self):
string = '-'.join(e.anchor.name + '-' + e.anchor.number for e in
self.formal_id_tree())
if string != '':
return string
else:
return None
class TitledDocumentSection(DocumentSection):
def __init__(self, anchor, title=None, *children):
super(TitledDocumentSection, self).__init__(anchor, *children)
self._title = title
def as_dict(self):
json = super(TitledDocumentSection, self).as_dict()
if self._title is not None:
json[self.__class__.__name__].insert(1, self._title.as_dict())
return json
hierarchy_html_titles = {
Part: 'h2',
Annex: 'h2',
Title: 'h3',
Chapter: 'h3',
Section: 'h4',
SubSection: 'h5',
Article: 'h5',
Clause: 'h5',
}
def as_html(self):
inner = self.anchor.as_html()
if self._title is not None:
inner += self._title.as_html()
container = self._build_html(self.hierarchy_html_titles[self.format],
inner, {'class': 'title'})
rest = super(TitledDocumentSection, self).as_html()
return self._build_html('div', container + rest,
{'class': self.html_classes[self.format],
'id': self.id_as_html()})
def as_str(self):
string = self.anchor.as_str()
if self._title is not None:
string += self._title.as_str()
return string + super(TitledDocumentSection, self).as_str()
@property
def title(self):
return self._title
@title.setter
def title(self, title):
assert(isinstance(title, Paragraph))
self._title = title
class InlineDocumentSection(DocumentSection):
"""
A section whose elements are inline.
"""
formats = {}
def as_html(self):
container = self._build_html('span', self.anchor.as_html(), {})
rest = super(InlineDocumentSection, self).as_html()
return self._build_html('li', container + rest,
{'class': self.html_classes[self.format],
'id': self.id_as_html()})
def as_str(self):
return self.anchor.as_str() + super(InlineDocumentSection, self).as_str()
class OrderedDocumentSection(InlineDocumentSection):
"""
A section whose elements are inline and ordered.
"""
formats = {Number, Line}
class UnorderedDocumentSection(InlineDocumentSection):
"""
A section whose elements are inline and un-ordered.
"""
formats = {Item}
class QuotationSection(BaseDocumentSection):
"""
A Section quoting something.
"""
def as_html(self):
return '<blockquote>%s</blockquote>' % \
super(QuotationSection, self).as_html()
def as_str(self):
return '«%s»' % super(QuotationSection, self).as_str()
|
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import sys
import matplotlib.lines as lines
import h5py
from matplotlib.font_manager import FontProperties
import matplotlib.ticker as ticker
from scipy.fftpack import fft
axial_label_font = FontProperties()
axial_label_font.set_family('sans-serif')
axial_label_font.set_style('normal')
axial_label_font.set_weight('bold')
axial_label_font.set_size(20)
legend_label_font = FontProperties()
legend_label_font.set_family('sans-serif')
legend_label_font.set_style('normal')
legend_label_font.set_weight('normal')
legend_label_font.set_size(16)
def node_response_extraction_sequential(node_ID, file_name, num_DOF):
h5_file = h5py.File(file_name, 'r');
Time = h5_file['time'][:];
displacement_index = int(h5_file['Model/Nodes/Index_to_Generalized_Displacements'][node_ID]);
displacement_component = h5_file['Model/Nodes/Generalized_Displacements'][int(displacement_index):int(displacement_index+num_DOF), :];
acceleration_component = h5_file['Model/Nodes/Generalized_Accelerations'][int(displacement_index):int(displacement_index+num_DOF), :];
for x1 in xrange(0,num_DOF):
displacement_component[x1,:] = displacement_component[x1,:]-displacement_component[x1,0]; ### in case self weight loading stage, get relative displacement
return Time, displacement_component, acceleration_component;
numbercol = 1;
surface_node_ID = 252; ## 252, 250, 249, 251
node_ID = [252, 212, 172, 132, 92, 52, 12]; ## node ID from surface to bottom
depth = [0, 2, 4, 6, 8, 10, 12];
bottom_node_ID = 6; ## node just beyond DRM layer
file_name = 'Motion1C_DRM_propagation.h5.feioutput' ##
parameteric_case = 'Motion1C_Northridge' ##
postfix = '.feioutput';
middle_name_less_than_ten = '0';
num_DOF = 3;
Time, displacement_component_surface, acceleration_component_surface = node_response_extraction_sequential(surface_node_ID, file_name, num_DOF);
Time, displacement_component_bottom, acceleration_component_bottom = node_response_extraction_sequential(bottom_node_ID, file_name, num_DOF);
surface_acc = np.loadtxt('scaled_northridge_acc.dat');
surface_disp = np.loadtxt('scaled_northridge_dis.dat');
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(surface_acc[:, 0], surface_acc[:, 1], '-r', label='surface analytical', linewidth= 1.5);
ax.plot(Time[200:]-2.0, acceleration_component_surface[0, 200:], '-k', label='DRM propagation', linewidth= 0.5);
plt.gca().set_xlim([0,38]);
plt.gca().get_yaxis().set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
plt.gca().get_xaxis().set_tick_params(direction='in',labelsize='x-large')
plt.gca().get_yaxis().set_tick_params(direction='in',labelsize='x-large')
plt.xlabel('Time [s]', fontproperties=axial_label_font);
plt.ylabel('Acc. [$m/s^2$]', fontproperties=axial_label_font);
plt.grid(True);
plt.legend(ncol= numbercol, loc='upper right', prop=legend_label_font);
filename = 'acc_check_'+ parameteric_case + '.pdf'
plt.savefig(filename, bbox_inches='tight');
plt.show();
|
import smtplib
from email.mime.text import MIMEText
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
def send_message(message):
"""
* desc 快捷发送邮件
* input 要发送的邮件信息
* output None
"""
mail_handler = SendMail()
mail_handler.send_mail(settings.REPORT_USER, 'Error info', message)
class SendMail(object):
"""docstring for SendMail"""
def __init__(self):
self.mail_host = settings.MAIL_HOST
self.mail_host_user = settings.MAIL_HOST_USER
self.mail_host_pwd = settings.MAIL_HOST_PWD
self.smtp = smtplib.SMTP()
self.smtp_login()
def smtp_login(self):
# login the host
self.smtp.connect(self.mail_host)
self.smtp.login(self.mail_host_user, self.mail_host_pwd)
def send_file_mail(self, receiver_list, subject, file_info, file_name):
# 发送附件的方法
part = MIMEApplication(file_info)
part.add_header('Content-Disposition',
'attachment', filename=file_name)
msg.attach(part)
sender = self.mail_host_user
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ";".join(receiver_list)
self.smtp.sendmail(sender, receiver_list, msg.as_string())
def send_mail(self, receiver_list, subject, context, mail_type="plain"):
"""
* desc 发送邮件的接口
* input receiver_list 收件人的地址列表 subject 主题 context 发送的内容 mail_type 邮件的格式 目前测试成功 plain 和 html
* output 发送成功与否
"""
sender = self.mail_host_user
msg = MIMEText(context, mail_type)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ";".join(receiver_list)
self.smtp.sendmail(sender, receiver_list, msg.as_string())
def close(self):
# 关闭建立的链接
self.smtp.close()
class MailHandler(object):
def __init__(self):
pass
def send_mail_message(self, to_user, msg, error=0):
"""
* desc 发送错误邮件
* input 要发送的人 发送的消息 错误还是告警
* output 0 发送成功 1 发送失败
"""
subject = settings.MSUBMAIL
if error:
text_content = 'Virtual Manager Error'
else:
text_content = 'Virtual Manager Warning'
from_email = settings.FMAIL
try:
to = [str(user) + "@hujiang.com" for user in to_user.split(',')]
print(to)
content_msg = EmailMultiAlternatives(
subject, text_content, from_email, to)
html_content = u'<b>' + msg + '</b>'
content_msg.attach_alternative(html_content, 'text/html')
content_msg.send()
return 0
except:
return 1
|
from django.db import models
class Pizza(models.Model):
name = models.CharField(max_length=128)
price = models.DecimalField(decimal_places=2, max_digits=5)
ingredients = models.TextField()
picture = models.ImageField(blank=True, null=True)
def __unicode__(self):
return u'Pizza: {}'.format(self.name)
def __repr__(self):
return unicode(self)
|
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponseNotFound
from django.test import TestCase
from mock import Mock
from utils import use_GET_in
from api.views import msas, tables
class ConversionTest(TestCase):
def test_use_GET_in(self):
fn, request = Mock(), Mock()
request.GET.lists.return_value = [('param1', [0]), ('param2', [-1])]
# Dictionaries become JSON
fn.return_value = {'a': 1, 'b': 2}
response = use_GET_in(fn, request)
self.assertEqual(json.loads(response.content), {'a': 1, 'b': 2})
self.assertEqual(fn.call_args[0][0], {'param1': [0], 'param2': [-1]})
# Everything else is unaltered
fn.return_value = HttpResponseNotFound('Oh noes')
response = use_GET_in(fn, request)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content, 'Oh noes')
class ViewsTests(TestCase):
fixtures = ['agency.json', 'fake_msa.json', 'api_tracts.json', 'test_counties.json', 'fake_respondents.json']
def test_api_all_user_errors(self):
resp = self.client.get(reverse('all'), {'neLat':'42.048794',
'neLon':'-87.430698',
'swLat':'',
'swLon':'-88.225583',
'year':'2013',
'action_taken':'1,2,3,4,5',
'lender':'736-4045996'})
self.assertEqual(resp.status_code, 404)
resp = self.client.get(reverse('all'), {'neLat':'42.048794',
'neLon':'-87.430698',
'swLat':'41.597775',
'swLon':'',
'year':'2013',
'action_taken':'1,2,3,4,5',
'lender':'736-4045996'})
self.assertEqual(resp.status_code, 404)
def test_api_msas_user_errors(self):
resp = self.client.get(reverse('msas'))
self.assertEqual(resp.status_code, 404)
resp = self.client.get(reverse('msas'), {'neLat':'42.048794',
'neLon':'-87.430698',
'swLat':'',
'swLon':'-88.225583',
'year':'2013',
'action_taken':'1,2,3,4,5',
'lender':'736-4045996'})
self.assertEqual(resp.status_code, 404)
resp = self.client.get(reverse('msas'), {'neLat':'42.048794',
'neLon':'-87.430698',
'swLat':'41.597775',
'swLon':'',
'year':'2013',
'action_taken':'1,2,3,4,5',
'lender':'736-4045996'})
self.assertEqual(resp.status_code, 404)
def test_api_msas_endpoint(self):
"""should return a list of MSA ids in view"""
coords = {'neLat': '36.551569', 'neLon':'-78.961487', 'swLat':'35.824494', 'swLon':'-81.828918'}
url = reverse(msas)
resp = self.client.get(url, coords)
result_list = json.loads(resp.content)
self.assertTrue(isinstance(result_list, list))
self.assertContains(resp, '49180')
def test_api_tables_endpoint(self):
"""should return table_data json for a lender/MSA pair"""
params = {'lender': '90000451965', 'metro': '49180'}
url = reverse(tables)
resp = self.client.get(url, params)
result_dict = json.loads(resp.content)
self.assertTrue(isinstance(result_dict, dict))
keys = ['counties', 'msa']
lender_keys = ['hma_pct', 'lma_pct', 'mma_pct', 'lma', 'mma', 'hma', 'lar_total', 'peer_hma_pct', 'peer_lma_pct', 'peer_mma_pct', 'peer_lma', 'peer_mma', 'peer_hma', 'peer_lar_total', 'odds_lma', 'odds_mma', 'odds_hma']
for key in keys:
self.assertTrue(key in result_dict.keys())
for key in lender_keys:
self.assertTrue(key in result_dict['msa'].keys())
self.assertTrue(len(result_dict['msa']) > 0)
|
"""
The most important object in the Gratipay object model is Participant, and the
second most important one is Ccommunity. There are a few others, but those are
the most important two. Participant, in particular, is at the center of
everything on Gratipay.
"""
from contextlib import contextmanager
from postgres import Postgres
import psycopg2.extras
@contextmanager
def just_yield(obj):
yield obj
class GratipayDB(Postgres):
def get_cursor(self, cursor=None, **kw):
if cursor:
if kw:
raise ValueError('cannot change options when reusing a cursor')
return just_yield(cursor)
return super(GratipayDB, self).get_cursor(**kw)
def self_check(self):
with self.get_cursor() as cursor:
check_db(cursor)
def check_db(cursor):
"""Runs all available self checks on the given cursor.
"""
_check_balances(cursor)
_check_no_team_balances(cursor)
_check_tips(cursor)
_check_orphans(cursor)
_check_orphans_no_tips(cursor)
_check_paydays_volumes(cursor)
def _check_tips(cursor):
"""
Checks that there are no rows in tips with duplicate (tipper, tippee, mtime).
https://github.com/gratipay/gratipay.com/issues/1704
"""
conflicting_tips = cursor.one("""
SELECT count(*)
FROM
(
SELECT * FROM tips
EXCEPT
SELECT DISTINCT ON(tipper, tippee, mtime) *
FROM tips
ORDER BY tipper, tippee, mtime
) AS foo
""")
assert conflicting_tips == 0
def _check_balances(cursor):
"""
Recalculates balances for all participants from transfers and exchanges.
https://github.com/gratipay/gratipay.com/issues/1118
"""
b = cursor.all("""
select p.username, expected, balance as actual
from (
select username, sum(a) as expected
from (
select participant as username, sum(amount) as a
from exchanges
where amount > 0
and (status is null or status = 'succeeded')
group by participant
union all
select participant as username, sum(amount-fee) as a
from exchanges
where amount < 0
and (status is null or status <> 'failed')
group by participant
union all
select tipper as username, sum(-amount) as a
from transfers
group by tipper
union all
select participant as username, sum(amount) as a
from payments
where direction='to-participant'
group by participant
union all
select participant as username, sum(-amount) as a
from payments
where direction='to-team'
group by participant
union all
select tippee as username, sum(amount) as a
from transfers
group by tippee
) as foo
group by username
) as foo2
join participants p on p.username = foo2.username
where expected <> p.balance
""")
assert len(b) == 0, "conflicting balances: {}".format(b)
def _check_no_team_balances(cursor):
if cursor.one("select exists (select * from paydays where ts_end < ts_start) as running"):
# payday is running
return
teams = cursor.all("""
SELECT t.slug, balance
FROM (
SELECT team, sum(delta) as balance
FROM (
SELECT team, sum(-amount) AS delta
FROM payments
WHERE direction='to-participant'
GROUP BY team
UNION ALL
SELECT team, sum(amount) AS delta
FROM payments
WHERE direction='to-team'
GROUP BY team
) AS foo
GROUP BY team
) AS foo2
JOIN teams t ON t.slug = foo2.team
WHERE balance <> 0
""")
assert len(teams) == 0, "teams with non-zero balance: {}".format(teams)
def _check_orphans(cursor):
"""
Finds participants that
* does not have corresponding elsewhere account
* have not been absorbed by other participant
These are broken because new participants arise from elsewhere
and elsewhere is detached only by take over which makes a note
in absorptions if it removes the last elsewhere account.
Especially bad case is when also claimed_time is set because
there must have been elsewhere account attached and used to sign in.
https://github.com/gratipay/gratipay.com/issues/617
"""
orphans = cursor.all("""
select username
from participants
where not exists (select * from elsewhere where elsewhere.participant=username)
and not exists (select * from absorptions where archived_as=username)
""")
assert len(orphans) == 0, "missing elsewheres: {}".format(list(orphans))
def _check_orphans_no_tips(cursor):
"""
Finds participants
* without elsewhere account attached
* having non zero outstanding tip
This should not happen because when we remove the last elsewhere account
in take_over we also zero out all tips.
"""
orphans_with_tips = cursor.all("""
WITH valid_tips AS (SELECT * FROM current_tips WHERE amount > 0)
SELECT username
FROM (SELECT tipper AS username FROM valid_tips
UNION
SELECT tippee AS username FROM valid_tips) foo
WHERE NOT EXISTS (SELECT 1 FROM elsewhere WHERE participant=username)
""")
assert len(orphans_with_tips) == 0, orphans_with_tips
def _check_paydays_volumes(cursor):
"""
Recalculate *_volume fields in paydays table using exchanges table.
"""
if cursor.one("select exists (select * from paydays where ts_end < ts_start) as running"):
# payday is running
return
charge_volume = cursor.all("""
select * from (
select id, ts_start, charge_volume, (
select coalesce(sum(amount+fee), 0)
from exchanges
where timestamp > ts_start
and timestamp < ts_end
and amount > 0
and recorder is null
and (status is null or status <> 'failed')
) as ref
from paydays
order by id
) as foo
where charge_volume != ref
""")
assert len(charge_volume) == 0
charge_fees_volume = cursor.all("""
select * from (
select id, ts_start, charge_fees_volume, (
select coalesce(sum(fee), 0)
from exchanges
where timestamp > ts_start
and timestamp < ts_end
and amount > 0
and recorder is null
and (status is null or status <> 'failed')
) as ref
from paydays
order by id
) as foo
where charge_fees_volume != ref
""")
assert len(charge_fees_volume) == 0
ach_volume = cursor.all("""
select * from (
select id, ts_start, ach_volume, (
select coalesce(sum(amount), 0)
from exchanges
where timestamp > ts_start
and timestamp < ts_end
and amount < 0
and recorder is null
) as ref
from paydays
order by id
) as foo
where ach_volume != ref
""")
assert len(ach_volume) == 0
ach_fees_volume = cursor.all("""
select * from (
select id, ts_start, ach_fees_volume, (
select coalesce(sum(fee), 0)
from exchanges
where timestamp > ts_start
and timestamp < ts_end
and amount < 0
and recorder is null
) as ref
from paydays
order by id
) as foo
where ach_fees_volume != ref
""")
assert len(ach_fees_volume) == 0
def add_event(c, type, payload):
SQL = """
INSERT INTO events (type, payload)
VALUES (%s, %s)
"""
c.run(SQL, (type, psycopg2.extras.Json(payload)))
|
"""
This module contains functions dealing with platform information.
"""
import OvfLibvirt
from locale import getlocale, getdefaultlocale
from time import timezone
def virtTypeIsAvailable(virtType):
"""
Check if the given virt type is available on this system
@type node: String
@param node: Platform type to check
@rtype: Boolean
@return: Indication if type is available or not
"""
if virtType:
return True
return False
def getVsSystemType(vs):
"""
This function gets the list of system types for the virtual system and
selects one based on the libvirt capabilities. It will select the
first type in the list that is present on the system.
@type node: DOM node
@param node: Virtual System node
@rtype: String
@return: Platform type for Virtual System
"""
virtTypes = OvfLibvirt.getOvfSystemType(vs)
for virtType in virtTypes:
# check if this virtType is available
if virtTypeIsAvailable(virtType):
return virtType
return None
def getPlatformDict(vs, virtPlatform=None):
"""
Get the platform information
@type node: DOM node
@param node: Virtual System node
@type virtPlatform: String
@param node: Virtual Platform type. If None, will be taken from vs
@rtype: String
@return: Dictionary containing platform information for the virtual
system the contents are defined by the ovf specification for
the environment.
"""
retDict = {}
if not virtPlatform:
virtPlatform = getVsSystemType(vs)
retDict['Kind'] = virtPlatform
# We could possibly look up the version and vendor here
# gather the details of the platform
(langCode, encoding) = getlocale()
if langCode == None:
(langCode, encoding) = getdefaultlocale()
retDict['Locale'] = langCode
retDict['Timezone'] = timezone
return retDict
|
'''
Created on Jul 1, 2014
@author: anroco
I have a list in python and I want to invest the elements, ie the latter is the
first, how I can do?
Tengo una lista en python y quiero invertir los elementos de la lista, es
decir que el último sea el primero, ¿como puedo hacerlo?
'''
lista = [9, 2, 5, 10, 9, 1, 3]
print (lista)
lista.reverse()
print (lista)
lista = ['abc', 'a', 'bcd', 'c', 'bb', 'abcd']
print (lista)
lista.reverse()
print (lista)
|
import os
import glob
import subprocess
def expand_path(path):
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
def is_file(path):
if not path:
return False
if not os.path.isfile(path):
return False
return True
def arg_is_file(path):
try:
if not is_file(path):
raise
except:
msg = '{0!r} is not a file'.format(path)
raise argparse.ArgumentTypeError(msg)
return expand_path(path)
def run_jmodeltest(name):
jmodel_proc=subprocess.Popen('java -jar ~/phylo_tools/jmodeltest-2.1.5/jModelTest.jar -d '+str(name)+' -s 3 -f -i -g 4 -BIC -c 0.95 > '+str(name)+'.results.txt', shell=True, executable='/bin/bash')
jmodel_proc.wait()
def get_models(f, gene_name, out):
fl=file(f)
for line in fl:
line=line.strip()
if "the 95% confidence interval" in line:
model=line.split(': ')[1]
out.write(str(gene_name)+'\t'+str(model)+'\n')
def main():
for f in glob.glob('*.nex'):
run_jmodeltest(f)
out=open('models.txt','w')
for f in glob.glob('*.results.txt'):
gene_name=f.split('.')[0]
get_models(f, gene_name,out)
''' description = ('This program will run jModelTest on a single file or set '
'of files in nexus format. User can choose the set of models'
'and type of summary using flags. The standard 24 models used'
'in MrBayes and BIC summary with 95% credible set are defaults.')
FILE_FORMATS = ['nex']
parser = argparse.ArgumentParser(description = description)
parser.add_argument('input_files', metavar='INPUT-SEQ-FILE',
nargs = '+',
type = arg_is_file,
help = ('Input sequence file(s) name '))
parser.add_argument('-o', '--out-format',
type = str,
choices = ['nex', 'fasta', 'phy'],
help = ('The format of the output sequence file(s). Valid options '))
parser.add_argument('-j', '--path-to-jModelTest',
type = str,
help=('The full path to the jModelTest executable'))
parser.add_argument('-s', '--substitution-models',
type = str,
choices = ['3','5','7','11']
default = ['3']
help = ('Number of substitution schemes to test. Default is all GTR models "-s 3".'))
parser.add_argument('-g', '--gamma',
type = str,
default = ['4']
help = ('Include models with rate variation among sites and number of categories (e.g., -g 8)'))
parser.add_argument('-i', '--invar',
type = str,
default = ['false']
help = ('include models with a proportion invariable sites (e.g., -i)'))
args = parser.parse_args()
for f in args.input_files:
in_type=os.path.splitext(f)[1]
filename=os.path.splitext(f)[0]
if in_type == '.nex' or in_type == '.nexus':
dict=in_nex(f)
elif in_type == '.fa' or in_type == '.fas' or in_type == '.fasta':
dict=in_fasta(f)
elif in_type == '.phy' or in_type == '.phylip':
dict=in_phy(f)
if args.out_format == 'nex':
out_nex(dict, filename)
elif args.out_format == 'fasta':
out_fasta(dict, filename)
elif args.out_format == 'phy':
out_phy(dict, filename)'''
if __name__ == '__main__':
main()
|
from flask import Blueprint, render_template, request,flash, redirect, url_for
from app.{resources}.models import {Resources}, {Resources}Schema
{resources} = Blueprint('{resources}', __name__, template_folder='templates')
schema = {Resources}Schema()
@{resources}.route('/' )
def {resource}_index():
{resources} = {Resources}.query.all()
results = schema.dump({resources}, many=True).data
return render_template('/{resources}/index.html', results=results)
@{resources}.route('/add' , methods=['POST', 'GET'])
def {resource}_add():
if request.method == 'POST':
#Validate form values by de-serializing the request, http://marshmallow.readthedocs.org/en/latest/quickstart.html#validation
form_errors = schema.validate(request.form.to_dict())
if not form_errors:
{resource}={Resources}({add_fields})
return add({resource}, success_url = '{resources}.{resource}_index', fail_url = '{resources}.{resource}_add')
else:
flash(form_errors)
return render_template('/{resources}/add.html')
@{resources}.route('/update/<int:id>' , methods=['POST', 'GET'])
def {resource}_update (id):
#Get {resource} by primary key:
{resource}={Resources}.query.get_or_404(id)
if request.method == 'POST':
form_errors = schema.validate(request.form.to_dict())
if not form_errors:
{update_fields}
return update({resource} , id, success_url = '{resources}.{resource}_index', fail_url = '{resources}.{resource}_update')
else:
flash(form_errors)
return render_template('/{resources}/update.html', {resource}={resource})
@{resources}.route('/delete/<int:id>' , methods=['POST', 'GET'])
def {resource}_delete (id):
{resource} = {Resources}.query.get_or_404(id)
return delete({resource}, fail_url = '{resources}.{resource}_index')
def add (data, success_url = '', fail_url = ''):
add = data.add(data)
#if does not return any error
if not add :
flash("Add was successful")
return redirect(url_for(success_url))
else:
message=add
flash(message)
return redirect(url_for(fail_url))
def update (data, id, success_url = '', fail_url = ''):
update=data.update()
#if does not return any error
if not update :
flash("Update was successful")
return redirect(url_for(success_url))
else:
message=update
flash(message)
return redirect(url_for(fail_url, id=id))
def delete (data, fail_url=''):
delete=data.delete(data)
if not delete :
flash("Delete was successful")
else:
message=delete
flash(message)
return redirect(url_for(fail_url))
|
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2019-04-26
git sha : $Format:%H$
copyright : (C) 2019 by Philipe Borba -
Cartographic Engineer @ Brazilian Army
email : borba.philipe@eb.mil.br
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt5.QtCore import QCoreApplication
from qgis.core import (QgsDataSourceUri, QgsExpression, QgsExpressionContext,
QgsExpressionContextUtils, QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingOutputMultipleLayers,
QgsProcessingParameterExpression,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterNumber,
QgsProcessingParameterString, QgsProject)
from qgis.utils import iface
class GroupLayersAlgorithm(QgsProcessingAlgorithm):
"""
Algorithm to group layers according to primitive, dataset and a category.
INPUT_LAYERS: list of QgsVectorLayer
CATEGORY_TOKEN: token used to split layer name
CATEGORY_TOKEN_INDEX: index of the split list
OUTPUT: list of outputs
"""
INPUT_LAYERS = 'INPUT_LAYERS'
CATEGORY_EXPRESSION = 'CATEGORY_EXPRESSION'
OUTPUT = 'OUTPUT'
def initAlgorithm(self, config):
"""
Parameter setting.
"""
self.addParameter(
QgsProcessingParameterMultipleLayers(
self.INPUT_LAYERS,
self.tr('Input Layers'),
QgsProcessing.TypeVector
)
)
self.addParameter(
QgsProcessingParameterExpression(
self.CATEGORY_EXPRESSION,
self.tr('Expression used to find out the category'),
defaultValue="regexp_substr(@layer_name ,'([^_]+)')"
)
)
self.addOutput(
QgsProcessingOutputMultipleLayers(
self.OUTPUT,
self.tr('Original reorganized layers')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
inputLyrList = self.parameterAsLayerList(
parameters,
self.INPUT_LAYERS,
context
)
categoryExpression = self.parameterAsExpression(
parameters,
self.CATEGORY_EXPRESSION,
context
)
listSize = len(inputLyrList)
progressStep = 100/listSize if listSize else 0
rootNode = QgsProject.instance().layerTreeRoot()
inputLyrList.sort(key=lambda x: (x.geometryType(), x.name()))
geometryNodeDict = {
0 : self.tr('Point'),
1 : self.tr('Line'),
2 : self.tr('Polygon'),
4 : self.tr('Non spatial')
}
iface.mapCanvas().freeze(True)
for current, lyr in enumerate(inputLyrList):
if feedback.isCanceled():
break
rootDatabaseNode = self.getLayerRootNode(lyr, rootNode)
geometryNode = self.createGroup(
geometryNodeDict[lyr.geometryType()],
rootDatabaseNode
)
categoryNode = self.getLayerCategoryNode(
lyr,
geometryNode,
categoryExpression
)
lyrNode = rootNode.findLayer(lyr.id())
myClone = lyrNode.clone()
categoryNode.addChildNode(myClone)
# not thread safe, must set flag to FlagNoThreading
rootNode.removeChildNode(lyrNode)
feedback.setProgress(current*progressStep)
iface.mapCanvas().freeze(False)
return {self.OUTPUT: [i.id() for i in inputLyrList]}
def getLayerRootNode(self, lyr, rootNode):
"""
Finds the database name of the layer and creates (if not exists)
a node with the found name.
lyr: (QgsVectorLayer)
rootNode: (node item)
"""
uriText = lyr.dataProvider().dataSourceUri()
candidateUri = QgsDataSourceUri(uriText)
rootNodeName = candidateUri.database()
if not rootNodeName:
rootNodeName = self.getRootNodeName(uriText)
#creates database root
return self.createGroup(rootNodeName, rootNode)
def getRootNodeName(self, uriText):
"""
Gets root node name from uri according to provider type.
"""
if 'memory?' in uriText:
rootNodeName = 'memory'
elif 'dbname' in uriText:
rootNodeName = uriText.replace('dbname=', '').split(' ')[0]
elif '|' in uriText:
rootNodeName = os.path.dirname(uriText.split(' ')[0].split('|')[0])
else:
rootNodeName = 'unrecognised_format'
return rootNodeName
def getLayerCategoryNode(self, lyr, rootNode, categoryExpression):
"""
Finds category node based on category expression
and creates it (if not exists a node)
"""
exp = QgsExpression(categoryExpression)
context = QgsExpressionContext()
context.appendScopes(
QgsExpressionContextUtils.globalProjectLayerScopes(lyr)
)
if exp.hasParserError():
raise Exception(exp.parserErrorString())
if exp.hasEvalError():
raise ValueError(exp.evalErrorString())
categoryText = exp.evaluate(context)
return self.createGroup(categoryText, rootNode)
def createGroup(self, groupName, rootNode):
"""
Create group with the name groupName and parent rootNode.
"""
groupNode = rootNode.findGroup(groupName)
return groupNode if groupNode else rootNode.addGroup(groupName)
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'grouplayers'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('Group Layers')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr('Layer Management Algorithms')
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'DSGTools: Layer Management Algorithms'
def tr(self, string):
"""
Translates input string.
"""
return QCoreApplication.translate('GroupLayersAlgorithm', string)
def createInstance(self):
"""
Creates an instance of this class
"""
return GroupLayersAlgorithm()
def flags(self):
"""
This process is not thread safe due to the fact that removeChildNode
method from QgsLayerTreeGroup is not thread safe.
"""
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading
|
"""
Default configuration values for certmaster items when
not specified in config file.
Copyright 2008, Red Hat, Inc
see AUTHORS
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
from config import BaseConfig, BoolOption, IntOption, Option
class CMConfig(BaseConfig):
log_level = Option('INFO')
listen_addr = Option('')
listen_port = IntOption(51235)
cadir = Option('/etc/pki/certmaster/ca')
cert_dir = Option('/etc/pki/certmaster')
certroot = Option('/var/lib/certmaster/certmaster/certs')
csrroot = Option('/var/lib/certmaster/certmaster/csrs')
cert_extension = Option('cert')
autosign = BoolOption(False)
sync_certs = BoolOption(False)
peering = BoolOption(True)
peerroot = Option('/var/lib/certmaster/peers')
hash_function = Option('sha256')
class MinionConfig(BaseConfig):
log_level = Option('INFO')
certmaster = Option('certmaster')
certmaster_port = IntOption(51235)
cert_dir = Option('/etc/pki/certmaster')
|
import forge
from forge.models.groups import Group
class Add(object):
def __init__(self,json_args,session):
if type(json_args) != type({}):
raise TypeError("JSON Arg must be dict type")
if 'name' and 'distribution' not in json_args.keys():
raise forge.ArgumentError()
self.name = json_args['name']
self.distribution = json_args['distribution']
self.session = session
def call(self):
group = Group(self.name,self.distribution)
self.session.add(group)
self.session.commit()
return {'name':self.name, 'distribution':self.distribution}
|
'A simple client for accessing api.ly.g0v.tw.'
import json
import unittest
try:
import urllib.request as request
import urllib.parse as urlparse
except:
import urllib2 as request
import urllib as urlparse
def assert_args(func, *args):
def inner(*args):
required_arg = args[1]
assert(len(required_arg) > 0)
return func(*args)
return inner
class LY_G0V_Client:
BASE_URL = 'http://api-beta.ly.g0v.tw/v0/'
# BASE_URL = 'http://api.ly.g0v.tw/v0/'
def _fetch_data(self, url_path):
URL = LY_G0V_Client.BASE_URL + url_path
try:
f = request.urlopen(URL)
r = f.read()
r = r.decode('utf-8')
return json.loads(r)
except Exception as e:
print("Failed to call " + URL)
raise e
def fetch_all_bills(self):
'Fetch all bills.'
return self._fetch_data('collections/bills')
def fetch_all_motions(self):
'Fetch all motions.'
return self._fetch_data('collections/motions')
def fetch_all_sittings(self):
'Fetch all sittings.'
return self._fetch_data('collections/sittings')
@assert_args
def fetch_bill(self, bill_id):
'Fetch metadata of a specific bill.'
return self._fetch_data('collections/bills/' + str(bill_id))
@assert_args
def fetch_bill_data(self, bill_id):
'Fetch data of a specific bill.'
assert(len(bill_id) > 0)
return self._fetch_data('collections/bills/' + str(bill_id) + '/data')
@assert_args
def fetch_motions_related_with_bill(self, bill_id):
'Fetch motions related with a specific bill.'
query = json.dumps({'bill_ref': bill_id})
query = urlparse.quote(query)
return self._fetch_data('collections/motions/?q='+query)
@assert_args
def fetch_sitting(self, sitting_id):
'Fetch metadata of a specific bill.'
return self._fetch_data('collections/bills/' + str(bill_id))
class TestClient(unittest.TestCase):
def setUp(self):
import time
time.sleep(1)
self.client = LY_G0V_Client()
def _test_bill(self, bill):
self.assertTrue(isinstance(bill, dict), str(type(bill)))
keys = ('proposed_by', 'doc', 'abstract', 'sponsors',
'summary', 'bill_ref', 'motions', 'cosponsors',
'bill_id');
for key in keys:
self.assertTrue(key in bill)
if isinstance(bill['doc'], dict):
self.assertTrue('pdf' in bill['doc'])
self.assertTrue('doc' in bill['doc'])
def _test_bills(self, bills):
for key in ('entries', 'paging'):
self.assertTrue(key in bills)
for key in ('l', 'sk', 'count'):
self.assertTrue(key in bills['paging'])
for bill in bills['entries']:
self._test_bill(bill)
def _test_motion(self, motion):
self.assertTrue(isinstance(motion, dict), str(type(motion)))
keys = ('result', 'resolution', 'motion_class', 'bill_id',
'agenda_item', 'bill_ref', 'tts_id',
'subitem', 'status', 'sitting_id', 'item',
'summary', 'tts_seq', 'proposed_by', 'doc')
for key in keys:
self.assertTrue(key in motion, key)
if isinstance(motion['doc'], dict):
self.assertTrue('pdf' in motion['doc'])
self.assertTrue('doc' in motion['doc'])
def _test_motions(self, motions):
self.assertTrue(isinstance(motions, dict), str(type(motions)))
for key in ('entries', 'paging'):
self.assertTrue(key in motions)
for key in ('l', 'sk', 'count'):
self.assertTrue(key in motions['paging'])
for motion in motions['entries']:
self._test_motion(motion)
def _test_data(self, data):
for key in ('related', 'content'):
self.assertTrue(key in data)
self.assertTrue(isinstance(data['related'], list))
self.assertTrue(isinstance(data['content'], list))
for item in data['content']:
content_keys = ('name', 'type', 'content', 'header')
for content_key in content_keys:
self.assertTrue(content_key in item)
self.assertTrue(len(item['name']) > 0)
self.assertTrue(isinstance(item['name'], str) or \
isinstance(item['name'], unicode))
self.assertTrue(len(item['type']) > 0)
self.assertTrue(isinstance(item['type'], str) or \
isinstance(item['type'], unicode))
self.assertTrue(len(item['content']) > 0)
self.assertTrue(isinstance(item['content'], list))
for content in item['content']:
self.assertTrue(isinstance(content, list))
for line in content:
self.assertTrue(isinstance(line, str))
self.assertTrue(len(item['header']) > 0)
self.assertTrue(isinstance(item['header'], list))
for header in item['header']:
self.assertTrue(isinstance(header, str) or \
isinstance(header, unicode))
def _test_sitting(self, sitting):
self.assertTrue(isinstance(sitting, dict), str(type(sitting)))
keys = ('dates', 'ad', 'videos', 'extra', 'motions',
'sitting', 'summary', 'session', 'committee', 'id',
'name')
for key in keys:
self.assertTrue(key in sitting, key)
def _test_sittings(self, sittings):
self.assertTrue(isinstance(sittings, dict), str(type(sittings)))
for key in ('entries', 'paging'):
self.assertTrue(key in sittings)
for key in ('l', 'sk', 'count'):
self.assertTrue(key in sittings['paging'])
for sitting in sittings['entries']:
self._test_sitting(sitting)
def test_all_bills(self):
bills = self.client.fetch_all_bills()
self._test_bills(bills)
def test_all_motions(self):
motions = self.client.fetch_all_motions()
self._test_motions(motions)
def test_all_sittings(self):
sittings = self.client.fetch_all_sittings()
self._test_sittings(sittings)
def test_fetch_bill(self):
bill = self.client.fetch_bill('1021021071000400')
self._test_bill(bill)
def test_fetch_bill_data(self):
data = self.client.fetch_bill_data('1021021071000400')
self._test_data(data)
def test_fetch_motions_related_with_bill(self):
motions = self.client.fetch_motions_related_with_bill('1021021071000400')
self._test_motions(motions)
if __name__ == '__main__':
unittest.main()
|
import io, readconf, getinfo, pastebin
import os, sys, gettext,string, pexpect,getpass
gettext.textdomain('inforevealer')
_ = gettext.gettext
__version__="0.5.1"
def askYesNo(question,default='y'):
""" Yes/no question throught a console """
if string.lower(default) == 'y':
question = question + " [Y/n]"
else:
question = question + " [y/N]"
ret = string.lower(raw_input(question))
if ret == 'y' or ret == "":
answer=True
else:
answer=False
return answer
def RunAs(category_info,gui=False):
""" Check if root is needed, if user want to be root... """
if gui: from gui import yesNoDialog
run_as='user'
if os.getuid() == 0:
#we are root
run_as='root'
else:
#check if root is needed
root_needed=False
for i in category_info:
if i.root:
root_needed=True
break
if root_needed:
#ask if the user want to substitute
question=_("""To generate a complete report, root access is needed.
Do you want to substitute user?""")
if gui:
#substitute=yesNoDialog(question=question)
substitute=True #It seems more confortable to remove the question
else:
#substitute=askYesNo(question)
substitute=True #It seems more confortable to remove the question
if substitute:
run_as="substitute"
else:
run_as="user"
else:
run_as='user'
return run_as
def CompleteReportAsRoot(run_as,tmp_configfile,gui=False):
"""Run a new instance of inforevealer with root priviledge to complete tmp_configfile"""
if gui: from gui import askPassword
if run_as == "substitute":
#find the substitute user command and run the script
if pexpect.which('su') != None:
message=_("Please, enter the root password.")
root_instance = str(pexpect.which('su')) + " - -c \'"+ os.path.abspath(sys.argv[0])+" --runfile "+ tmp_configfile+"\'"
elif pexpect.which('sudo') != None: #TODO checkme
message=_("Please, enter your user password.")
root_instance = str(pexpect.which('sudo')) + ' ' + os.path.abspath(sys.argv[0])+' --runfile '+ tmp_configfile
else:
sys.stderr.write(_("Error: No substitute user command available.\n"))
return 1
ret=""
count=0
while ret!=[' \r\n'] and count <3:
#Get password
count+=1
if gui:
password=askPassword(question=message)
else:
print(message)
password=getpass.getpass()
if password != False: #askPassword could return False
#Run the command #TODO exceptions ?
child = pexpect.spawn(root_instance)
ret=child.expect([".*:",pexpect.EOF]) #Could we do more ?
child.sendline(password)
ret = child.readlines()
if ret ==[' \r\n']: return 0
message=_("Wrong password.\nThe log will be generated without root priviledge.")
if gui:
import gtk
md = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO, gtk.BUTTONS_CLOSE, message)
md.set_title(_("Error"))
md.run()
md.destroy()
else:
print(message)
def action(category,dumpfile,configfile,tmp_configfile,verbosity,gui=False):
if gui: from gui import yesNoDialog
#####################
# Write in dumpfile
#####################
dumpfile_handler= open(dumpfile,'w')
io.print_write_header(dumpfile_handler)
dumpfile_handler.write('Category: '+ category+'\n')
category_info = readconf.LoadCategoryInfo(configfile,category)
#need/want to run commands as...
run_as = RunAs(category_info,gui)
#detect which distribution the user uses
linux_distrib=getinfo.General_info(dumpfile_handler)
# In the case of run_as='substitute'
# a configuration file is generated
# su/sudo is used to run a new instance of inforevealer in append mode
# to complete the report
tmp_configfile_handler= open(tmp_configfile,'w')
for i in category_info:
i.write(linux_distrib,verbosity,dumpfile_handler,dumpfile,run_as,tmp_configfile_handler)
tmp_configfile_handler.close()
#Use su or sudo to complete the report
dumpfile_handler.close() #the next function will modify the report, close the dumpfile
CompleteReportAsRoot(run_as,tmp_configfile,gui)
# Message to close the report
dumpfile_handler= open(dumpfile,'a')
io.write_title("You didn\'t find what you expected?",dumpfile_handler)
dumpfile_handler.write( 'Please, open a bug report on\nhttp://github.com/sciunto/inforevealer\n')
dumpfile_handler.close()
print( _("The output has been dumped in %s") %dumpfile)
|
from __future__ import print_function
import sys
from blib_util import *
def build_kfont(build_info):
for compiler_info in build_info.compilers:
build_a_project("kfont", "kfont", build_info, compiler_info, True)
if __name__ == "__main__":
cfg = cfg_from_argv(sys.argv)
bi = build_info(cfg.compiler, cfg.archs, cfg.cfg)
print("Building kfont...")
build_kfont(bi)
|
import json
import logging
import sys
import time
from webkitpy.common.system.autoinstall import AutoInstaller
from webkitpy.layout_tests.servers import http_server_base
_log = logging.getLogger(__name__)
def doc_root(port_obj):
doc_root = port_obj.get_option("wptserver_doc_root")
if doc_root is None:
return port_obj.host.filesystem.join("imported", "w3c", "web-platform-tests")
return doc_root
def base_url(port_obj):
config_wk_filepath = port_obj._filesystem.join(port_obj.layout_tests_dir(), "imported", "w3c", "resources", "config.json")
if not port_obj.host.filesystem.isfile(config_wk_filepath):
# This should only be hit by webkitpy unit tests
_log.debug("No WPT config file found")
return "http://localhost:8800/"
json_data = port_obj._filesystem.read_text_file(config_wk_filepath)
config = json.loads(json_data)
ports = config["ports"]
return "http://" + config["host"] + ":" + str(ports["http"][0]) + "/"
class WebPlatformTestServer(http_server_base.HttpServerBase):
def __init__(self, port_obj, name, pidfile=None):
http_server_base.HttpServerBase.__init__(self, port_obj)
self._output_dir = port_obj.results_directory()
self._name = name
self._log_file_name = '%s_process_log.out.txt' % (self._name)
self._wsout = None
self._process = None
self._pid_file = pidfile
if not self._pid_file:
self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
self._servers_file = self._filesystem.join(self._runtime_path, '%s_servers.json' % (self._name))
self._stdout_data = None
self._stderr_data = None
self._filesystem = port_obj.host.filesystem
self._layout_root = port_obj.layout_tests_dir()
self._doc_root = self._filesystem.join(self._layout_root, doc_root(port_obj))
self._resources_files_to_copy = ['testharness.css', 'testharnessreport.js']
current_dir_path = self._filesystem.abspath(self._filesystem.split(__file__)[0])
self._start_cmd = ["python", self._filesystem.join(current_dir_path, "web_platform_test_launcher.py"), self._servers_file]
self._doc_root_path = self._filesystem.join(self._layout_root, self._doc_root)
def _install_modules(self):
modules_file_path = self._filesystem.join(self._doc_root_path, "..", "resources", "web-platform-tests-modules.json")
if not self._filesystem.isfile(modules_file_path):
_log.warning("Cannot read " + modules_file_path)
return
modules = json.loads(self._filesystem.read_text_file(modules_file_path))
for module in modules:
path = module["path"]
name = path.pop()
AutoInstaller(target_dir=self._filesystem.join(self._doc_root, self._filesystem.sep.join(path))).install(url=module["url"], url_subpath=module["url_subpath"], target_name=name)
def _copy_webkit_test_files(self):
_log.debug('Copying WebKit resources files')
for f in self._resources_files_to_copy:
webkit_filename = self._filesystem.join(self._layout_root, "resources", f)
if self._filesystem.isfile(webkit_filename):
self._filesystem.copyfile(webkit_filename, self._filesystem.join(self._doc_root, "resources", f))
_log.debug('Copying WebKit web platform server config.json')
config_wk_filename = self._filesystem.join(self._layout_root, "imported", "w3c", "resources", "config.json")
if self._filesystem.isfile(config_wk_filename):
config_json = self._filesystem.read_text_file(config_wk_filename).replace("%CERTS_DIR%", self._filesystem.join(self._output_dir, "_wpt_certs"))
self._filesystem.write_text_file(self._filesystem.join(self._doc_root, "config.json"), config_json)
wpt_testharnessjs_file = self._filesystem.join(self._doc_root, "resources", "testharness.js")
layout_tests_testharnessjs_file = self._filesystem.join(self._layout_root, "resources", "testharness.js")
# FIXME: Next line to be removed once all bots have wpt_testharnessjs_file updated correctly. See https://bugs.webkit.org/show_bug.cgi?id=152257.
self._filesystem.copyfile(layout_tests_testharnessjs_file, wpt_testharnessjs_file)
if (not self._filesystem.compare(wpt_testharnessjs_file, layout_tests_testharnessjs_file)):
_log.warning("\n//////////\nWPT tests are not using the same testharness.js file as other WebKit Layout tests.\nWebKit testharness.js might need to be updated according WPT testharness.js.\n//////////\n")
def _clean_webkit_test_files(self):
_log.debug('Cleaning WPT resources files')
for f in self._resources_files_to_copy:
wpt_filename = self._filesystem.join(self._doc_root, "resources", f)
if self._filesystem.isfile(wpt_filename):
self._filesystem.remove(wpt_filename)
_log.debug('Cleaning WPT web platform server config.json')
config_wpt_filename = self._filesystem.join(self._doc_root, "config.json")
if self._filesystem.isfile(config_wpt_filename):
self._filesystem.remove(config_wpt_filename)
def _prepare_config(self):
if self._filesystem.exists(self._output_dir):
output_log = self._filesystem.join(self._output_dir, self._log_file_name)
self._wsout = self._filesystem.open_text_file_for_writing(output_log)
self._install_modules()
self._copy_webkit_test_files()
def _spawn_process(self):
self._stdout_data = None
self._stderr_data = None
if self._wsout:
self._process = self._executive.popen(self._start_cmd, cwd=self._doc_root_path, shell=False, stdin=self._executive.PIPE, stdout=self._wsout, stderr=self._wsout)
else:
self._process = self._executive.popen(self._start_cmd, cwd=self._doc_root_path, shell=False, stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=self._executive.STDOUT)
self._filesystem.write_text_file(self._pid_file, str(self._process.pid))
# Wait a second for the server to actually start so that tests do not start until server is running.
time.sleep(1)
return self._process.pid
def _stop_running_subservers(self):
if self._filesystem.exists(self._servers_file):
try:
json_data = self._filesystem.read_text_file(self._servers_file)
started_servers = json.loads(json_data)
for server in started_servers:
if self._executive.check_running_pid(server['pid']):
_log.warning('Killing server process (protocol: %s , port: %d, pid: %d).' % (server['protocol'], server['port'], server['pid']))
self._executive.kill_process(server['pid'])
finally:
self._filesystem.remove(self._servers_file)
def stop(self):
super(WebPlatformTestServer, self).stop()
# In case of orphaned pid, kill the running subservers if any still alive.
self._stop_running_subservers()
def _stop_running_server(self):
_log.debug('Stopping %s server' % (self._name))
self._clean_webkit_test_files()
if self._process:
(self._stdout_data, self._stderr_data) = self._process.communicate(input='\n')
if self._wsout:
self._wsout.close()
self._wsout = None
if self._pid and self._executive.check_running_pid(self._pid):
_log.warning('Cannot stop %s server normally.' % (self._name))
_log.warning('Killing server launcher process (pid: %d).' % (self._pid))
self._executive.kill_process(self._pid)
self._remove_pid_file()
self._stop_running_subservers()
|
"""
OK resolveurl XBMC Addon
Copyright (C) 2016 Seberoth
Version 0.0.2
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json, urllib
from resolveurl import common
from lib import helpers
from resolveurl.resolver import ResolveUrl, ResolverError
class OKResolver(ResolveUrl):
name = "ok.ru"
domains = ['ok.ru', 'odnoklassniki.ru']
pattern = '(?://|\.)(ok\.ru|odnoklassniki\.ru)/(?:videoembed|video)/(\d+)'
header = {"User-Agent": common.OPERA_USER_AGENT}
qual_map = {'ultra': '2160', 'quad': '1440', 'full': '1080', 'hd': '720', 'sd': '480', 'low': '360', 'lowest': '240', 'mobile': '144'}
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
vids = self.__get_Metadata(media_id)
sources = []
for entry in vids['urls']:
quality = self.__replaceQuality(entry['name'])
sources.append((quality, entry['url']))
try: sources.sort(key=lambda x: int(x[0]), reverse=True)
except: pass
source = helpers.pick_source(sources)
source = source.encode('utf-8') + helpers.append_headers(self.header)
return source
def __replaceQuality(self, qual):
return self.qual_map.get(qual.lower(), '000')
def __get_Metadata(self, media_id):
url = "http://www.ok.ru/dk"
data = {'cmd': 'videoPlayerMetadata', 'mid': media_id}
data = urllib.urlencode(data)
html = self.net.http_POST(url, data, headers=self.header).content
json_data = json.loads(html)
if 'error' in json_data:
raise ResolverError('File Not Found or removed')
info = dict()
info['urls'] = []
for entry in json_data['videos']:
info['urls'].append(entry)
return info
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'http://{host}/videoembed/{media_id}')
|
from functions.str import w_str
from wtypes.control import WEvalRequired, WRaisedException, WReturnValue
from wtypes.exception import WException
from wtypes.magic_macro import WMagicMacro
from wtypes.boolean import WBoolean
class WAssert(WMagicMacro):
def call_magic_macro(self, exprs, scope):
if len(exprs) != 1:
raise Exception(
"Macro assert expected 1 argument. "
"Got {} instead.".format(len(exprs)))
expr = exprs[0]
src = w_str(expr)
def callback(_value):
if _value is WBoolean.false:
return WRaisedException(
exception=WException(f'Assertion failed: {src}'))
return WReturnValue(expr=_value)
return WEvalRequired(expr=expr, callback=callback)
|
"""
Builds out and synchronizes yum repo mirrors.
Initial support for rsync, perhaps reposync coming later.
Copyright 2006-2007, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import os.path
import time
import yaml # Howell-Clark version
import sys
HAS_YUM = True
try:
import yum
except:
HAS_YUM = False
import utils
from cexceptions import *
import traceback
import errno
from utils import _
import clogger
class RepoSync:
"""
Handles conversion of internal state to the tftpboot tree layout
"""
# ==================================================================================
def __init__(self,config,tries=1,nofail=False,logger=None):
"""
Constructor
"""
self.verbose = True
self.api = config.api
self.config = config
self.distros = config.distros()
self.profiles = config.profiles()
self.systems = config.systems()
self.settings = config.settings()
self.repos = config.repos()
self.rflags = self.settings.reposync_flags
self.tries = tries
self.nofail = nofail
self.logger = logger
if logger is None:
self.logger = clogger.Logger()
self.logger.info("hello, reposync")
# ===================================================================
def run(self, name=None, verbose=True):
"""
Syncs the current repo configuration file with the filesystem.
"""
self.logger.info("run, reposync, run!")
try:
self.tries = int(self.tries)
except:
utils.die(self.logger,"retry value must be an integer")
self.verbose = verbose
report_failure = False
for repo in self.repos:
env = repo.environment
for k in env.keys():
self.logger.info("environment: %s=%s" % (k,env[k]))
if env[k] is not None:
os.putenv(k,env[k])
if name is not None and repo.name != name:
# invoked to sync only a specific repo, this is not the one
continue
elif name is None and not repo.keep_updated:
# invoked to run against all repos, but this one is off
self.logger.info("%s is set to not be updated" % repo.name)
continue
repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
repo_path = os.path.join(repo_mirror, repo.name)
mirror = repo.mirror
if not os.path.isdir(repo_path) and not repo.mirror.lower().startswith("rhn://"):
os.makedirs(repo_path)
# which may actually NOT reposync if the repo is set to not mirror locally
# but that's a technicality
for x in range(self.tries+1,1,-1):
success = False
try:
self.sync(repo)
success = True
except:
utils.log_exc(self.logger)
self.logger.warning("reposync failed, tries left: %s" % (x-2))
if not success:
report_failure = True
if not self.nofail:
utils.die(self.logger,"reposync failed, retry limit reached, aborting")
else:
self.logger.error("reposync failed, retry limit reached, skipping")
self.update_permissions(repo_path)
if report_failure:
utils.die(self.logger,"overall reposync failed, at least one repo failed to synchronize")
return True
# ==================================================================================
def sync(self, repo):
"""
Conditionally sync a repo, based on type.
"""
if repo.breed == "rhn":
return self.rhn_sync(repo)
elif repo.breed == "yum":
return self.yum_sync(repo)
#elif repo.breed == "apt":
# return self.apt_sync(repo)
elif repo.breed == "rsync":
return self.rsync_sync(repo)
else:
utils.die(self.logger,"unable to sync repo (%s), unknown or unsupported repo type (%s)" % (repo.name, repo.breed))
# ====================================================================================
def createrepo_walker(self, repo, dirname, fnames):
"""
Used to run createrepo on a copied Yum mirror.
"""
if os.path.exists(dirname) or repo['breed'] == 'rsync':
utils.remove_yum_olddata(dirname)
# add any repo metadata we can use
mdoptions = []
if os.path.isfile("%s/.origin/repomd.xml" % (dirname)):
if not HAS_YUM:
utils.die(self.logger,"yum is required to use this feature")
rmd = yum.repoMDObject.RepoMD('', "%s/.origin/repomd.xml" % (dirname))
if rmd.repoData.has_key("group"):
groupmdfile = rmd.getData("group").location[1]
mdoptions.append("-g %s" % groupmdfile)
if rmd.repoData.has_key("prestodelta"):
# need createrepo >= 0.9.7 to add deltas
if utils.check_dist() == "redhat" or utils.check_dist() == "suse":
cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
createrepo_ver = utils.subprocess_get(self.logger, cmd)
if createrepo_ver >= "0.9.7":
mdoptions.append("--deltas")
else:
utils.die(self.logger,"this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.")
blended = utils.blender(self.api, False, repo)
flags = blended.get("createrepo_flags","(ERROR: FLAGS)")
try:
# BOOKMARK
cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname)
utils.subprocess_call(self.logger, cmd)
except:
utils.log_exc(self.logger)
self.logger.error("createrepo failed.")
del fnames[:] # we're in the right place
# ====================================================================================
def rsync_sync(self, repo):
"""
Handle copying of rsync:// and rsync-over-ssh repos.
"""
repo_mirror = repo.mirror
if not repo.mirror_locally:
utils.die(self.logger,"rsync:// urls must be mirrored locally, yum cannot access them directly")
if repo.rpm_list != "" and repo.rpm_list != []:
self.logger.warning("--rpm-list is not supported for rsync'd repositories")
# FIXME: don't hardcode
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
spacer = ""
if not repo.mirror.startswith("rsync://") and not repo.mirror.startswith("/"):
spacer = "-e ssh"
if not repo.mirror.endswith("/"):
repo.mirror = "%s/" % repo.mirror
# FIXME: wrapper for subprocess that logs to logger
cmd = "rsync -rltDv %s --delete --exclude-from=/etc/cobbler/rsync.exclude %s %s" % (spacer, repo.mirror, dest_path)
rc = utils.subprocess_call(self.logger, cmd)
if rc !=0:
utils.die(self.logger,"cobbler reposync failed")
os.path.walk(dest_path, self.createrepo_walker, repo)
self.create_local_file(dest_path, repo)
# ====================================================================================
def rhn_sync(self, repo):
"""
Handle mirroring of RHN repos.
"""
repo_mirror = repo.mirror
# FIXME? warn about not having yum-utils. We don't want to require it in the package because
# RHEL4 and RHEL5U0 don't have it.
if not os.path.exists("/usr/bin/reposync"):
utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
cmd = "" # command to run
has_rpm_list = False # flag indicating not to pull the whole repo
# detect cases that require special handling
if repo.rpm_list != "" and repo.rpm_list != []:
has_rpm_list = True
# create yum config file for use by reposync
# FIXME: don't hardcode
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
temp_path = os.path.join(dest_path, ".origin")
if not os.path.isdir(temp_path):
# FIXME: there's a chance this might break the RHN D/L case
os.makedirs(temp_path)
# how we invoke yum-utils depends on whether this is RHN content or not.
# this is the somewhat more-complex RHN case.
# NOTE: this requires that you have entitlements for the server and you give the mirror as rhn://$channelname
if not repo.mirror_locally:
utils.die("rhn:// repos do not work with --mirror-locally=1")
if has_rpm_list:
self.logger.warning("warning: --rpm-list is not supported for RHN content")
rest = repo.mirror[6:] # everything after rhn://
cmd = "/usr/bin/reposync %s -r %s --download_path=%s" % (self.rflags, rest, "/var/www/cobbler/repo_mirror")
if repo.name != rest:
args = { "name" : repo.name, "rest" : rest }
utils.die(self.logger,"ERROR: repository %(name)s needs to be renamed %(rest)s as the name of the cobbler repository must match the name of the RHN channel" % args)
if repo.arch == "i386":
# counter-intuitive, but we want the newish kernels too
repo.arch = "i686"
if repo.arch != "":
cmd = "%s -a %s" % (cmd, repo.arch)
# now regardless of whether we're doing yumdownloader or reposync
# or whether the repo was http://, ftp://, or rhn://, execute all queued
# commands here. Any failure at any point stops the operation.
if repo.mirror_locally:
rc = utils.subprocess_call(self.logger, cmd)
# Don't die if reposync fails, it is logged
# if rc !=0:
# utils.die(self.logger,"cobbler reposync failed")
# some more special case handling for RHN.
# create the config file now, because the directory didn't exist earlier
temp_file = self.create_local_file(temp_path, repo, output=False)
# now run createrepo to rebuild the index
if repo.mirror_locally:
os.path.walk(dest_path, self.createrepo_walker, repo)
# create the config file the hosts will use to access the repository.
self.create_local_file(dest_path, repo)
# ====================================================================================
def yum_sync(self, repo):
"""
Handle copying of http:// and ftp:// yum repos.
"""
repo_mirror = repo.mirror
# warn about not having yum-utils. We don't want to require it in the package because
# RHEL4 and RHEL5U0 don't have it.
if not os.path.exists("/usr/bin/reposync"):
utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
cmd = "" # command to run
has_rpm_list = False # flag indicating not to pull the whole repo
# detect cases that require special handling
if repo.rpm_list != "" and repo.rpm_list != []:
has_rpm_list = True
# create yum config file for use by reposync
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
temp_path = os.path.join(dest_path, ".origin")
if not os.path.isdir(temp_path) and repo.mirror_locally:
# FIXME: there's a chance this might break the RHN D/L case
os.makedirs(temp_path)
# create the config file that yum will use for the copying
if repo.mirror_locally:
temp_file = self.create_local_file(temp_path, repo, output=False)
if not has_rpm_list and repo.mirror_locally:
# if we have not requested only certain RPMs, use reposync
cmd = "/usr/bin/reposync %s --config=%s --repoid=%s --download_path=%s" % (self.rflags, temp_file, repo.name, "/var/www/cobbler/repo_mirror")
if repo.arch != "":
if repo.arch == "x86":
repo.arch = "i386" # FIX potential arch errors
if repo.arch == "i386":
# counter-intuitive, but we want the newish kernels too
cmd = "%s -a i686" % (cmd)
else:
cmd = "%s -a %s" % (cmd, repo.arch)
elif repo.mirror_locally:
# create the output directory if it doesn't exist
if not os.path.exists(dest_path):
os.makedirs(dest_path)
use_source = ""
if repo.arch == "src":
use_source = "--source"
# older yumdownloader sometimes explodes on --resolvedeps
# if this happens to you, upgrade yum & yum-utils
extra_flags = self.settings.yumdownloader_flags
cmd = "/usr/bin/yumdownloader %s %s --disablerepo=* --enablerepo=%s -c %s --destdir=%s %s" % (extra_flags, use_source, repo.name, temp_file, dest_path, " ".join(repo.rpm_list))
# now regardless of whether we're doing yumdownloader or reposync
# or whether the repo was http://, ftp://, or rhn://, execute all queued
# commands here. Any failure at any point stops the operation.
if repo.mirror_locally:
rc = utils.subprocess_call(self.logger, cmd)
if rc !=0:
utils.die(self.logger,"cobbler reposync failed")
repodata_path = os.path.join(dest_path, "repodata")
if not os.path.exists("/usr/bin/wget"):
utils.die(self.logger,"no /usr/bin/wget found, please install wget")
# grab repomd.xml and use it to download any metadata we can use
cmd2 = "/usr/bin/wget -q %s/repodata/repomd.xml -O %s/repomd.xml" % (repo_mirror, temp_path)
rc = utils.subprocess_call(self.logger,cmd2)
if rc == 0:
# create our repodata directory now, as any extra metadata we're
# about to download probably lives there
if not os.path.isdir(repodata_path):
os.makedirs(repodata_path)
rmd = yum.repoMDObject.RepoMD('', "%s/repomd.xml" % (temp_path))
for mdtype in rmd.repoData.keys():
# don't download metadata files that are created by default
if mdtype not in ["primary", "primary_db", "filelists", "filelists_db", "other", "other_db"]:
mdfile = rmd.getData(mdtype).location[1]
cmd3 = "/usr/bin/wget -q %s/%s -O %s/%s" % (repo_mirror, mdfile, dest_path, mdfile)
utils.subprocess_call(self.logger,cmd3)
if rc !=0:
utils.die(self.logger,"wget failed")
# now run createrepo to rebuild the index
if repo.mirror_locally:
os.path.walk(dest_path, self.createrepo_walker, repo)
# create the config file the hosts will use to access the repository.
self.create_local_file(dest_path, repo)
# ====================================================================================
# ==================================================================================
def create_local_file(self, dest_path, repo, output=True):
"""
Creates Yum config files for use by reposync
Two uses:
(A) output=True, Create local files that can be used with yum on provisioned clients to make use of this mirror.
(B) output=False, Create a temporary file for yum to feed into yum for mirroring
"""
# the output case will generate repo configuration files which are usable
# for the installed systems. They need to be made compatible with --server-override
# which means they are actually templates, which need to be rendered by a cobbler-sync
# on per profile/system basis.
if output:
fname = os.path.join(dest_path,"config.repo")
else:
fname = os.path.join(dest_path, "%s.repo" % repo.name)
self.logger.debug("creating: %s" % fname)
if not os.path.exists(dest_path):
utils.mkdir(dest_path)
config_file = open(fname, "w+")
config_file.write("[%s]\n" % repo.name)
config_file.write("name=%s\n" % repo.name)
optenabled = False
optgpgcheck = False
if output:
if repo.mirror_locally:
line = "baseurl=http://${server}/cobbler/repo_mirror/%s\n" % (repo.name)
else:
mstr = repo.mirror
if mstr.startswith("/"):
mstr = "file://%s" % mstr
line = "baseurl=%s\n" % mstr
config_file.write(line)
# user may have options specific to certain yum plugins
# add them to the file
for x in repo.yumopts:
config_file.write("%s=%s\n" % (x, repo.yumopts[x]))
if x == "enabled":
optenabled = True
if x == "gpgcheck":
optgpgcheck = True
else:
mstr = repo.mirror
if mstr.startswith("/"):
mstr = "file://%s" % mstr
line = "baseurl=%s\n" % mstr
if self.settings.http_port not in (80, '80'):
http_server = "%s:%s" % (self.settings.server, self.settings.http_port)
else:
http_server = self.settings.server
line = line.replace("@@server@@",http_server)
config_file.write(line)
if not optenabled:
config_file.write("enabled=1\n")
config_file.write("priority=%s\n" % repo.priority)
# FIXME: potentially might want a way to turn this on/off on a per-repo basis
if not optgpgcheck:
config_file.write("gpgcheck=0\n")
config_file.close()
return fname
# ==================================================================================
def update_permissions(self, repo_path):
"""
Verifies that permissions and contexts after an rsync are as expected.
Sending proper rsync flags should prevent the need for this, though this is largely
a safeguard.
"""
# all_path = os.path.join(repo_path, "*")
cmd1 = "chown -R root:apache %s" % repo_path
utils.subprocess_call(self.logger, cmd1)
cmd2 = "chmod -R 755 %s" % repo_path
utils.subprocess_call(self.logger, cmd2)
|
"""Copyright (c) 2017 abhishek-sehgal954
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import sys
import os
import re
import subprocess
import math
import numpy as np
import inkex
import simpletransform
from PIL import Image, ImageStat, ImageDraw
import simplestyle
inkex.localize()
class ordered_dithering(inkex.Effect):
def __init__(self):
"""Init the effect library and get options from gui."""
inkex.Effect.__init__(self)
self.OptionParser.add_option("-t", "--width",
action="store", type="int",
dest="width", default=200,
help="this variable will be used to resize the original selected image to a width of whatever \
you enter and height proportional to the new width, thus maintaining the aspect ratio")
self.OptionParser.add_option("--inkscape_path", action="store", type="string", dest="inkscape_path", default="", help="")
self.OptionParser.add_option("--temp_path", action="store", type="string", dest="temp_path", default="", help="")
def effect(self):
outfile = self.options.temp_path
curfile = self.args[-1]
self.exportPage(curfile,outfile)
def draw_rectangle(self,(x, y), (l,b), color, parent, id_):
style = {'stroke': 'none', 'stroke-width': '1', 'fill': color,"mix-blend-mode" : "multiply"}
attribs = {'style': simplestyle.formatStyle(style), 'x': str(x), 'y': str(y), 'width': str(l), 'height':str(b)}
if id_ is not None:
attribs.update({'id': id_})
obj = inkex.etree.SubElement(parent, inkex.addNS('rect', 'svg'), attribs)
return obj
def draw_circle(self,(x, y), r, color, parent, id_):
style = {'stroke': 'none', 'stroke-width': '1', 'fill': color,"mix-blend-mode" : "multiply"}
attribs = {'style': simplestyle.formatStyle(style), 'cx': str(x), 'cy': str(y), 'r': str(r)}
if id_ is not None:
attribs.update({'id': id_})
obj = inkex.etree.SubElement(parent, inkex.addNS('circle', 'svg'), attribs)
return obj
def draw_ellipse(self,(x, y), (r1,r2), color, parent, id_,transform):
style = {'stroke': 'none', 'stroke-width': '1', 'fill': color,"mix-blend-mode" : "multiply"}
if(transform == 1.5):
attribs = {'style': simplestyle.formatStyle(style), 'cx': str(x), 'cy': str(y), 'rx': str(r1), 'ry': str(r2)}
elif(transform == 3):
attribs = {'style': simplestyle.formatStyle(style), 'cx': str(x), 'cy': str(y), 'rx': str(r1), 'ry': str(r2)}
else:
attribs = {'style': simplestyle.formatStyle(style), 'cx': str(x), 'cy': str(y), 'rx': str(r1), 'ry': str(r2)}
if id_ is not None:
attribs.update({'id': id_})
obj = inkex.etree.SubElement(parent, inkex.addNS('ellipse', 'svg'), attribs)
return obj
def draw_svg(self,output,parent):
startu = 0
endu = 0
for i in range(len(output)):
for j in range(len(output[i])):
if (output[i][j]==0):
self.draw_circle((int((startu+startu+1)/2),int((endu+endu+1)/2)),1,'black',parent,'id')
#dwg.add(dwg.circle((int((startu+startu+1)/2),int((endu+endu+1)/2)),1,fill='black'))
startu = startu+2
endu = endu+2
startu = 0
#dwg.save()
def intensity(self,arr):
# calcluates intensity of a pixel from 0 to 9
mini = 999
maxi = 0
for i in range(len(arr)):
for j in range(len(arr[0])):
maxi = max(arr[i][j],maxi)
mini = min(arr[i][j],mini)
level = float(float(maxi-mini)/float(10));
brr = [[0]*len(arr[0]) for i in range(len(arr))]
for i in range(10):
l1 = mini+level*i
l2 = l1+level
for j in range(len(arr)):
for k in range(len(arr[0])):
if(arr[j][k] >= l1 and arr[j][k] <= l2):
brr[j][k]=i
return brr
def order_dither(self,image):
arr = np.asarray(image)
brr = self.intensity(arr)
crr = [[8, 3, 4], [6, 1, 2], [7, 5, 9]]
drr = np.zeros((len(arr),len(arr[0])))
for i in range(len(arr)):
for j in range(len(arr[0])):
if(brr[i][j] > crr[i%3][j%3]):
drr[i][j] = 255
else:
drr[i][j] = 0
return drr
def dithering(self,node,image):
if image:
basewidth = self.options.width
wpercent = (basewidth/float(image.size[0]))
hsize = int((float(image.size[1])*float(wpercent)))
image = image.resize((basewidth,hsize), Image.ANTIALIAS)
(width, height) = image.size
nodeParent = node.getparent()
nodeIndex = nodeParent.index(node)
pixel2svg_group = inkex.etree.Element(inkex.addNS('g', 'svg'))
pixel2svg_group.set('id', "%s_pixel2svg" % node.get('id'))
nodeParent.insert(nodeIndex+1, pixel2svg_group)
nodeParent.remove(node)
image = image.convert("RGBA")
pixel_data = image.load()
if image.mode == "RGBA":
for y in xrange(image.size[1]):
for x in xrange(image.size[0]):
if pixel_data[x, y][3] < 255:
pixel_data[x, y] = (255, 255, 255, 255)
image.thumbnail([image.size[0], image.size[1]], Image.ANTIALIAS)
image = image.convert('L')
self.draw_rectangle((0,0),(width,height),'white',pixel2svg_group,'id')
output = self.order_dither(image)
self.draw_svg(output,pixel2svg_group)
else:
inkex.errormsg(_("Bailing out: No supported image file or data found"))
sys.exit(1)
def exportPage(self, curfile, outfile):
command = "%s %s --export-png %s" %(self.options.inkscape_path,curfile,outfile)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return_code = p.wait()
f = p.stdout
err = p.stderr
img = Image.open(outfile)
if (self.options.ids):
for node in self.selected.itervalues():
found_image = True
self.dithering(node,img)
def main():
e = ordered_dithering()
e.affect()
exit()
if __name__=="__main__":
main()
|
from dnfpluginscore import logger
import dnf
import os.path
class BashCompletionCache(dnf.Plugin):
name = 'generate_completion_cache'
def __init__(self, base, cli):
self.base = base
self.available_cache_file = '/var/cache/dnf/available.cache'
self.installed_cache_file = '/var/cache/dnf/installed.cache'
def _out(self, msg):
logger.debug('Completion plugin: %s', msg)
def sack(self):
''' Generate cache of available packages '''
# We generate this cache only if the repos were just freshed or if the
# cache file doesn't exist
fresh = False
for repo in self.base.repos.iter_enabled():
if repo.metadata is not None and repo.metadata.fresh:
# One fresh repo is enough to cause a regen of the cache
fresh = True
break
if not os.path.exists(self.available_cache_file) or fresh:
try:
with open(self.available_cache_file, 'w') as cache_file:
self._out('Generating completion cache...')
available_packages = self.base.sack.query().available()
for package in available_packages:
cache_file.write(package.name + '\n')
except Exception as e:
self._out('Can\'t write completion cache: %s' % e)
def transaction(self):
''' Generate cache of installed packages '''
try:
with open(self.installed_cache_file, 'w') as cache_file:
installed_packages = self.base.sack.query().installed()
self._out('Generating completion cache...')
for package in installed_packages:
cache_file.write(package.name + '\n')
except Exception as e:
self._out('Can\'t write completion cache: %s' % e)
|
from django.conf.urls import *
urlpatterns = patterns('foo.views',
# Listing URL
url(r'^$', view='browse', name='foo.browse'),
# Detail URL
url(r'^(?P<slug>(?!overview\-)[\w\-\_\.\,]+)/$', view='detail', name='foo.detail'),
)
|
"""
This nagios active check parses the Hadoop HDFS web interface url:
http://<namenode>:<port>/dfsnodelist.jsp?whatNodes=LIVE
to check for active datanodes that use disk beyond the given thresholds.
The output includes performance datas and is truncated if longer than 1024
chars.
Tested on: Hadoop CDH3U5
"""
__author__ = 'Joseph Herlant'
__copyright__ = 'Copyright 2014, Joseph Herlant'
__credits__ = ['Joseph Herlant']
__license__ = 'GNU GPLv3'
__version__ = '1.0.2'
__maintainer__ = 'Joseph Herlant'
__email__ = 'herlantj@gmail.com'
__status__ = 'Production'
__website__ = 'https://github.com/aerostitch/'
from mechanize import Browser
from BeautifulSoup import BeautifulSoup
import argparse, sys
if __name__ == '__main__':
# use -h argument to get help
parser = argparse.ArgumentParser(
description='A Nagios check to verify all datanodes disk usage in \
an HDFS cluster from the namenode web interface.')
parser.add_argument('-n', '--namenode', required=True,
help='hostname of the namenode of the cluster')
parser.add_argument('-p', '--port', type=int, default=50070,
help='port of the namenode http interface. \
Defaults to 50070.')
parser.add_argument('-w', '--warning', type=int, default=80,
help='warning threshold. Defaults to 80.')
parser.add_argument('-c', '--critical', type=int, default=90,
help='critical threshold. Defaults to 90.')
args = parser.parse_args()
# Get the web page from the namenode
url = "http://%s:%d/dfsnodelist.jsp?whatNodes=LIVE" % \
(args.namenode, args.port)
try:
page = Browser().open(url)
except IOError:
print 'CRITICAL: Cannot access namenode interface on %s:%d!' % \
(args.namenode, args.port)
sys.exit(2)
# parse the page
html = page.read()
soup = BeautifulSoup(html)
datanodes = soup.findAll('td', {'class' : 'name'})
pcused = soup.findAll('td', {'class' : 'pcused', 'align' : 'right'})
w_msg = ''
c_msg = ''
perfdata = ''
for (idx, node) in enumerate(datanodes):
pct = float(pcused[idx].contents[0].strip())
node = datanodes[idx].findChildren('a')[0].contents[0].strip()
if pct >= args.critical:
c_msg += ' %s=%.1f%%,' % (node, pct)
perfdata += ' %s=%.1f,' % (node, pct)
elif pct >= args.warning:
w_msg += ' %s=%.1f%%,' % (node, pct)
perfdata += ' %s=%.1f,' % (node, pct)
else:
perfdata += ' %s=%.1f,' % (node, pct)
# Prints the values and exits with the nagios exit code
if len(c_msg) > 0:
print ('CRITICAL:%s%s |%s' % (c_msg, w_msg, perfdata)).strip(',')[:1024]
sys.exit(2)
elif len(w_msg) > 0:
print ('WARNING:%s |%s' % (w_msg, perfdata)).strip(',')[:1024]
sys.exit(1)
elif len(perfdata) == 0:
print 'CRITICAL: Unable to find any node data in the page.'
sys.exit(2)
else:
print ('OK |%s' % (perfdata)).strip(',')[:1024]
sys.exit(0)
|
from __future__ import absolute_import, division, print_function, unicode_literals
from django.template.response import TemplateResponse
from django.template.context import RequestContext
from asymmetricbase.jinja import jinja_env
from asymmetricbase.logging import logger #@UnusedImport
class JinjaTemplateResponse(TemplateResponse):
def resolve_template(self, template):
if isinstance(template, (list, tuple)):
return jinja_env.select_template(template)
elif isinstance(template, basestring):
return jinja_env.get_template(template)
else:
return template
def resolve_context(self, context):
context = super(JinjaTemplateResponse, self).resolve_context(context)
if isinstance(context, RequestContext):
context = jinja_env.context_to_dict(context)
return context
|
"""
* Copyright (c) 2015 BEEVC - Electronic Systems This file is part of BEESOFT
* software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either
* version 3 of the License, or (at your option) any later version. BEESOFT is
* distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU General Public License for more details. You
* should have received a copy of the GNU General Public License along with
* BEESOFT. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Marcos Gomes"
__license__ = "MIT"
import json
import FileFinder
import pygame
class PrinterInfoLoader():
interfaceJson = None
lblJson = None
lblValJson = None
lblFont = None
lblFontColor = None
lblXPos = None
lblYPos = None
lblText = None
lblValFont = None
lblValFontColor = None
lblValXPos = None
lblValFont = None
lblValFontColor = None
displayWidth = 480
displayHeight = 320
"""*************************************************************************
Init Method
Inits current screen components
*************************************************************************"""
def __init__(self, interfaceJson, dispWidth, dispHeight):
self.displayWidth = dispWidth
self.displayHeight = dispHeight
self.interfaceJson = interfaceJson
self.lblJson = json.loads(json.dumps(self.interfaceJson['Labels']))
self.lblValJson = json.loads(json.dumps(self.interfaceJson['ValuesSettings']))
"""
Values Labels Configuration
"X":"220",
"FontType":"Bold",
"FontSize":"12",
"FontColor":"0,0,0"
"""
self.lblValXPos = int(float(self.lblValJson['X'])*self.displayWidth)
lblValFontType = self.lblValJson['FontType']
lblValFontSize = int(float(self.lblValJson['FontSize'])*self.displayHeight)
self.lblValFont = self.GetFont(lblValFontType,lblValFontSize)
lblValFColor = self.lblValJson['FontColor']
splitColor = lblValFColor.split(",")
self.lblValFontColor = pygame.Color(int(splitColor[0]),int(splitColor[1]),int(splitColor[2]))
"""
Load Labels Configuration
"""
self.lblText = []
self.lblXPos = []
self.lblYPos = []
self.lblFont = []
self.lblFontColor = []
for lbl in self.lblJson:
lblFontType = lbl['FontType']
lblFontSize = int(float(lbl['FontSize'])*self.displayHeight)
lblFColor = lbl['FontColor']
self.lblXPos.append(int(float(lbl['X'])*self.displayWidth))
self.lblYPos.append(int(float(lbl['Y'])*self.displayHeight))
self.lblText.append(lbl['Text'])
font = self.GetFont(lblFontType,lblFontSize)
self.lblFont.append(font)
splitColor = lblFColor.split(",")
fontColor = pygame.Color(int(splitColor[0]),int(splitColor[1]),int(splitColor[2]))
self.lblFontColor.append(fontColor)
return
"""
GetFont
"""
def GetFont(self,fontType,fontSize):
r"""
GetFont method
Receives as arguments:
fontType - Regular,Bold,Italic,Light
fontSize - font size
Returns:
pygame font object
"""
ff = FileFinder.FileFinder()
font = None
if fontType == "Regular":
font = pygame.font.Font(ff.GetAbsPath("/Fonts/DejaVuSans-Regular.ttf"),fontSize)
elif fontType == "Bold":
font = pygame.font.Font(ff.GetAbsPath("/Fonts/DejaVuSans-Bold.ttf"),fontSize)
elif fontType == "Italic":
font = pygame.font.Font(ff.GetAbsPath("/Fonts/DejaVuSans-Italic.ttf"),fontSize)
elif fontType == "Light":
font = pygame.font.Font(ff.GetAbsPath("/Fonts/DejaVuSans-Light.ttf"),fontSize)
return font
"""
GetlblText(self)
returns the list with the label text
"""
def GetlblText(self):
return self.lblText
"""
GetlblFont
"""
def GetlblFont(self):
return self.lblFont
"""
GetlblFontColor
"""
def GetlblFontColor(self):
return self.lblFontColor
"""
GetlblXPos
"""
def GetlblXPos(self):
return self.lblXPos
"""
GetlblYPos
"""
def GetlblYPos(self):
return self.lblYPos
"""
GetlblValFont
"""
def GetlblValFont(self):
return self.lblValFont
"""
GetlblValFontColor
"""
def GetlblValFontColor(self):
return self.lblValFontColor
"""
GetlblValXPos
"""
def GetlblValXPos(self):
return self.lblValXPos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.