gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
from dolfin import tic, toc
import HiptmairSetup
import PETScIO as IO
import scipy.sparse as sp
import matplotlib.pylab as plt
import MatrixOperations as MO
import HiptmairSetup
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class Matrix(object):
def __init__(self):
pass
def create(self, mat):
pass
def destroy(self, mat):
pass
class InnerOuterMAGNETICinverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.Ct = A.getSubMatrix(self.b_is,self.u_is)
self.Bt = A.getSubMatrix(self.p_is,self.u_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
# MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = bu1.duplicate()
bu4 = bu1.duplicate()
self.Bt.multTranspose(xp,bu2)
self.Ct.multTranspose(xb,bu4)
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4+bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, -xp.array,xb.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class InnerOuterMAGNETICapprox(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.Ct = A.getSubMatrix(self.b_is,self.u_is)
self.Bt = A.getSubMatrix(self.p_is,self.u_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
# MO.StoreMatrix(B,"A")
# print FC.todense()
#self.kspF.setType('preonly')
#self.kspF.getPC().setType('lu')
#self.kspF.setFromOptions()
#self.kspF.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
xb = bb.duplicate()
#self.kspMX.solve(bb,xb)
xb, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, bb, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
bu1 = x.getSubVector(self.u_is)
bu2 = bu1.duplicate()
bu4 = bu1.duplicate()
self.Bt.multTranspose(xp,bu2)
self.Ct.multTranspose(xb,bu4)
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4+bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, -xp.array,xb.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class InnerOuter(BaseMyPC):
def __init__(self, AA,W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
self.A = AA
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
self.Dt = self.A.getSubMatrix(self.b_is,self.r_is)
self.Bt = self.A.getSubMatrix(self.u_is,self.p_is)
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
#self.kspMX.solve(bb,xb)
xb, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, bb, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class P(Matrix):
def __init__(self, Fspace,P,Mass,L,F,M):
self.Fspace = Fspace
self.P = P
self.Mass = Mass
self.L = L
self.kspFp = F
self.M = M
# self.N = (n, n, n)
# self.F = zeros([n+2]*3, order='f')
def create(self, A):
self.IS = MO.IndexSet(self.Fspace)
self.F = self.P.getSubMatrix(self.IS[0],self.IS[0])
self.Bt = self.P.getSubMatrix(self.IS[0],self.IS[2])
self.Ct = self.P.getSubMatrix(self.IS[0],self.IS[1])
self.C = self.P.getSubMatrix(self.IS[1],self.IS[0])
self.A = self.P.getSubMatrix(self.IS[3],self.IS[3])
# ksp = PETSc.KSP()
# ksp.create(comm=PETSc.COMM_WORLD)
# pc = ksp.getPC()
# ksp.setType('preonly')
# pc.setType('hypre')
# ksp.max_it = 1
# ksp.setOperators(self.FF)
# self.ksp = ksp
print 13333
def mult(self, A, x, y):
print 'multi apply'
print 333
u = x.getSubVector(self.IS[0])
p = x.getSubVector(self.IS[2])
b = x.getSubVector(self.IS[1])
r = x.getSubVector(self.IS[3])
FQp = p.duplicate()
uOut = self.F*u+self.Bt*p+self.Ct*b
Qp =self.Mass*p
self.kspFp.solve(Qp,FQp)
pOut = -self.L*FQp
bOut = self.C*u+self.M*b
rOut = self.A*r
y.array = (np.concatenate([uOut.array, bOut.array, pOut.array, rOut.array]))
print "$$$$$$$/$$$$$$$$"
# print x.array
def multTranspose(self, A, x, y):
"y <- A' * x"
self.mult(x, y)
# def getSubMatrix(self, isrow, iscol, submat=None):
# submat = self.P.get
| |
import decimal
import json
import unittest
import uuid
from django import forms
from django.core import exceptions, serializers, validators
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, override_settings
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
PostgreSQLModel,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertEqual(loaded.field, None)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=['text'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=['text']),
[instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]),
[instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0),
[obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]]
)
class TestDateTimeExactQuerying(PostgreSQLTestCase):
def setUp(self):
now = timezone.now()
self.datetimes = [now]
self.dates = [now.date()]
self.times = [now.time()]
self.objs = [
DateTimeArrayModel.objects.create(
datetimes=self.datetimes,
dates=self.dates,
times=self.times,
)
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes),
self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates),
self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times),
self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
def setUp(self):
self.ips = ['192.168.0.1', '::1']
self.uuids = [uuid.uuid4()]
self.decimals = [decimal.Decimal(1.25), 1.75]
self.objs = [
OtherTypesArrayModel.objects.create(
ips=self.ips,
uuids=self.uuids,
decimals=self.decimals,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips),
self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids),
self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals),
self.objs
)
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
})
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = 'postgres_tests_chartextarrayindexmodel'
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
like_constraint_field_names = [
c.rsplit('_', 2)[0][len(table_name) + 1:]
for c in connection.introspection.get_constraints(cursor, table_name)
if c.endswith('_like')
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_field_names, ['char2'])
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, table_name)
# All fields should have regular indexes.
self.assertIn('char', indexes)
self.assertIn('char2', indexes)
self.assertIn('text', indexes)
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
'Item 1 in the array did not validate: This field cannot be null.'
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc'], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 0, 'limit_value': 1, 'show_value': 0})
class TestSimpleFormField(PostgreSQLTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('abc,c,defg')
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(first_error.code, 'item_invalid')
self.assertEqual(first_error.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
second_error = errors[1]
self.assertEqual(
second_error.message,
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).'
)
self.assertEqual(second_error.code, 'item_invalid')
self.assertEqual(second_error.params, {'nth': 2, 'value': 'defg', 'limit_value': 2, 'show_value': 4})
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
class TestSplitFormField(PostgreSQLTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_invalid_integer(self):
msg = 'Item 1 in the array did not validate: Ensure this value is less than or equal to 100.'
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" />
<input id="id_array_1" name="array_1" type="text" />
<input id="id_array_2" name="array_2" type="text" />
</td>
</tr>
''')
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc', 'c', 'defg'])
self.assertEqual(cm.exception.messages, [
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).',
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).',
])
| |
import time
import argparse
import numpy as np
import tensorflow as tf
import reader
import model
import pickle
import os
def parsing_args():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
help='train or test')
parser.add_argument('--init_from', type=str, default=None,
help='init model path')
parser.add_argument('--init_method', type=str, default=None,
help='lstm/att init from lstm or full model')
parser.add_argument('--word_vector_path', type=str, default=None,
help='pretrain word2vector model')
parser.add_argument('--data_dir', type=str, default=None,
help='data directory containing train valid test data')
parser.add_argument('--save', type=str, default=None,
help='directory to store checkpointed models')
parser.add_argument('--model_result', type=str, default=None,
help='save model result')
parser.add_argument('--att_file', type=str, default=None,
help='file storing attention weights for analysis')
parser.add_argument('--rnn_size', type=int, default=300,
help='size of LSTM internal state')
parser.add_argument('--emb_size', type=int, default=300,
help='word embedding size')
parser.add_argument('--num_layers', type=int, default=1,
help='number of layers in the RNN')
parser.add_argument('--batch_size', type=int, default=20,
help='minibatch size')
parser.add_argument('--max_seq_length', type=int, default=60,
help='max number of timesteps to unroll during BPTT')
parser.add_argument('--min_seq_length', type=int, default=0,
help='min number of timesteps to unroll during BPTT')
parser.add_argument('--max_epochs', type=int, default=50,
help='number of full passes through the training data')
parser.add_argument('--dropout', type=float, default=1,
help='dropout for regularization, neuron keep probabitity. 1 = no dropout')
parser.add_argument('--max_grad_norm', type=float, default=5.,
help='clip gradients at this value')
parser.add_argument('--entropy_reg', type=float, default=0.1,
help='entropy regulizar')
parser.add_argument('--learning_rate', type=float, default=1.0,
help='learning rate')
parser.add_argument('--init_scale', type=float, default=0.1,
help='initialization scale')
parser.add_argument('--decay_rate', type=float, default=0.5,
help='decay rate')
parser.add_argument('--learning_rate_decay_after', type=int, default=10,
help='in number of epochs, when to start decaying the learning rate')
parser.add_argument('--gpu_id', type=float, default=0,
help='% of gpu memory to be allocated to this process. Default is 66.6%')
parser.add_argument('--print_every', type=int, default=200,
help='how many steps/minibatches between printing out the loss')
args = parser.parse_args()
return args
def run_epoch_training(sess, all_op, data, lr, dropout, print_every):
start_time = time.time()
nbatch = data.get_batch_number()
total_words_num = 0
total_cost = 0
fetches = {}
fetches['train'] = all_op['train']
fetches['total_label_loss'] = all_op['total_label_loss']
for idx in range(nbatch):
x, y = data.get_data(idx)
feed_dict = {
all_op['input_data']:x,
all_op['labels']:y,
all_op['learning_rate']:lr,
all_op['dropout']:dropout
}
result = sess.run(fetches,feed_dict=feed_dict)
total_cost += result['total_label_loss']
total_words_num += x.size
if (idx+1)%print_every == 0:
print (idx+1), '/', nbatch, ': ', 'perplexity: ', np.exp(result['total_label_loss']/x.size)
total_perplexity = np.exp(total_cost/total_words_num)
print 'training perplexity in this epoch: ' , total_perplexity
print 'epoch training time: ', (time.time() - start_time)
return total_perplexity
def evaluating(sess, all_op, data):
nbatch = data.get_batch_number()
total_words_num = 0
total_cost = 0
fetches = {}
fetches['total_label_loss'] = all_op['total_label_loss']
for idx in range(nbatch):
x, y = data.get_data(idx)
feed_dict = {
all_op['input_data']:x,
all_op['labels']:y,
all_op['dropout']:1
}
result = sess.run(fetches,feed_dict=feed_dict)
total_cost += result['total_label_loss']
total_words_num += x.size
total_perplexity = np.exp(total_cost/total_words_num)
return total_perplexity
def train(args):
#read data
train_data = reader.data(data_dir=args.data_dir,
batch_size=args.batch_size,
min_seq_length=args.min_seq_length,
max_seq_length=args.max_seq_length,
min_count=0)
train_data.load('train')
valid_data = reader.data(data_dir=args.data_dir,
batch_size=args.batch_size,
min_seq_length=args.min_seq_length,
max_seq_length=args.max_seq_length,
min_count=0)
valid_data.load('valid')
test_data = reader.data(data_dir=args.data_dir,
batch_size=args.batch_size,
min_seq_length=args.min_seq_length,
max_seq_length=args.max_seq_length,
min_count=0)
test_data.load('test')
#load model
if args.init_from:
if not os.path.isfile(args.init_from):
print 'init file not found'
os.exit()
#the placeholder need for training
input_data_ph = tf.placeholder(tf.int32, [None, None])
labels_ph = tf.placeholder(tf.int32, [None, None])
learning_rate_ph = tf.placeholder(tf.float32, [])
dropout_ph = tf.placeholder(tf.float32, [])
#build model
vocab_size=train_data.vocab_size
default_initializer = tf.random_uniform_initializer(-args.init_scale,
args.init_scale)
with tf.variable_scope('model',initializer=default_initializer):
logits, pretrain_list, output_linear_list, common_var, ten_var = model.inference(
input_x=input_data_ph,
embedding_dim=args.emb_size,
lstm_hidden_dim_1=args.rnn_size,
vocab_size=vocab_size,
dropout=dropout_ph)
total_label_loss, loss = model.loss(logits=logits, labels=labels_ph)
train_op = model.training(loss, learning_rate_ph, args.max_grad_norm, common_var, ten_var)
all_op = {'input_data':input_data_ph,
'labels':labels_ph,
'learning_rate':learning_rate_ph,
'dropout':dropout_ph,
'total_label_loss':total_label_loss,
'train':train_op}
#pretrain
if args.init_from:
if args.init_method == 'lstm':
with tf.variable_scope('model'):
with tf.variable_scope('output_lstm1_linear'):
lstm_linear_W = tf.get_variable('W', [args.rnn_size, vocab_size])
lstm_linear_b = tf.get_variable('b', [vocab_size], initializer=tf.constant_initializer(0.0))
pretrain_list += [lstm_linear_W,lstm_linear_b]
init_att_W = output_linear_list[0].assign(tf.concat(0,[lstm_linear_W,lstm_linear_W]))
init_att_b = output_linear_list[1].assign(lstm_linear_b)
saver_restore = tf.train.Saver(pretrain_list)
else:
saver_restore = tf.train.Saver()
#pretrain word embedding
if args.word_vector_path:
emb_matrix = pretrain_list[0]
pretrain_emb = emb_matrix.assign(train_data.generate_word_embedding_matrix(args.word_vector_path))
global_step = tf.Variable(0,name='global_step',trainable=False)
init = tf.initialize_all_variables()
saver_save = tf.train.Saver()
training_process_perplexity = {'train':[],'valid':[],'test':[],'best_val_test':[]}
file_name = 'rnn_size' + str(args.rnn_size)
with tf.Session() as sess:
sess.run(init)
#pretrain word embedding
if args.word_vector_path:
sess.run(pretrain_emb)
if args.init_from:
if args.init_method == 'lstm':
saver_restore.restore(sess, args.init_from)
sess.run(init_att_W)
sess.run(init_att_b)
else:
saver_restore.restore(sess, args.init_from)
#training
best_val_perplexity = np.inf
best_val_test_perplexity = np.inf
for i in range(args.max_epochs):
lr_decay = args.decay_rate ** max(i + 1 - args.learning_rate_decay_after, 0.0)
learning_rate = args.learning_rate * lr_decay
print("Epoch: %d Learning rate: %.3f" % (i + 1, learning_rate))
#training
training_perplexity = run_epoch_training(sess, all_op, train_data,
learning_rate, args.dropout, args.print_every)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, training_perplexity))
test_training_perplexity = evaluating(sess, all_op, train_data)
print("Epoch: %d test training Perplexity: %.3f" % (i + 1,
test_training_perplexity))
#validation
val_perplexity = evaluating(sess, all_op, valid_data)
print("Epoch: %d validation Perplexity: %.3f" % (i + 1, val_perplexity))
#peeking testing
test_perplexity = evaluating(sess, all_op, test_data)
print("Epoch: %d peeking testing Perplexity: %.3f" % (i + 1, test_perplexity))
if val_perplexity < best_val_perplexity :
best_val_perplexity = val_perplexity
best_val_test_perplexity = test_perplexity
#save
saver_save.save(sess, os.path.join(args.save,file_name), global_step=global_step)
print("So far best val testing Perplexity: %.3f" % (best_val_test_perplexity))
training_process_perplexity['train'].append(test_training_perplexity)
training_process_perplexity['valid'].append(val_perplexity)
training_process_perplexity['test'].append(test_perplexity)
training_process_perplexity['best_val_test'].append(best_val_test_perplexity)
with open(os.path.join(args.model_result,file_name),'wb') as f:
pickle.dump(training_process_perplexity, f)
def test(args):
test_data = reader.data(data_dir=args.data_dir,
batch_size=args.batch_size,
min_seq_length=args.min_seq_length,
max_seq_length=args.max_seq_length,
min_count=args.min_count)
test_data.load('test')
#load model
if args.init_from:
if not os.path.isfile(args.init_from):
print 'init file not found'
os.exit()
#the placeholder need for training
input_data_ph = tf.placeholder(tf.int32, [None, None])
labels_ph = tf.placeholder(tf.int32, [None, None])
learning_rate_ph = tf.placeholder(tf.float32, [])
dropout_ph = tf.placeholder(tf.float32, [])
#build model
vocab_size=test_data.vocab_size
logits, pretrain_list, output_linear_list = model.inference(input_x=input_data_ph,
embedding_dim=args.emb_size,
lstm_hidden_dim_1=args.rnn_size,
vocab_size=vocab_size,
dropout=dropout_ph)
total_label_loss, loss = model.loss(logits=logits, labels=labels_ph)
all_op = {'input_data':input_data_ph,
'labels':labels_ph,
'learning_rate':learning_rate_ph,
'dropout':dropout_ph,
'total_label_loss':total_label_loss}
#pretrain
if args.init_from:
saver_restore = tf.train.Saver()
#load model
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
saver_restore.restore(sess, args.init_from)
test_perplexity = evaluating(sess, all_op, test_data)
print ("Testing Perplexity: %.3f" % (test_perplexity))
if __name__ == "__main__":
args = parsing_args()
if args.mode == 'train':
train(args)
else :
test(args)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Geometric distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Geometric(distribution.Distribution):
"""Geometric distribution.
The Geometric distribution is parameterized by p, the probability of a
positive event. It represents the probability that in k + 1 Bernoulli trials,
the first k trials failed, before seeing a success.
The pmf of this distribution is:
#### Mathematical Details
```none
pmf(k; p) = (1 - p)**k * p
```
where:
* `p` is the success probability, `0 < p <= 1`, and,
* `k` is a non-negative integer.
"""
def __init__(self,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Geometric"):
"""Construct Geometric distributions.
Args:
logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0`
indicates the number of batch dimensions. Each entry represents logits
for the probability of success for independent Geometric distributions
and must be in the range `(-inf, inf]`. Only one of `logits` or `probs`
should be specified.
probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]`
where `b >= 0` indicates the number of batch dimensions. Each entry
represents the probability of success for independent Geometric
distributions and must be in the range `(0, 1]`. Only one of `logits`
or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(self._probs)] if validate_args else []):
self._probs = array_ops.identity(self._probs, name="probs")
super(Geometric, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._probs, self._logits],
name=name)
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._probs)
def _batch_shape(self):
return self.probs.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
array_ops.concat([[n], array_ops.shape(self._probs)], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return math_ops.floor(
math_ops.log(sampled) / math_ops.log1p(-self.probs))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
return array_ops.where(
x < 0.,
array_ops.zeros_like(x),
-math_ops.expm1((1. + x) * math_ops.log1p(-self.probs)))
def _log_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# For consistency with cdf, we take the floor.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(x)
safe_domain = array_ops.where(
math_ops.equal(x, 0.),
array_ops.zeros_like(probs),
probs)
return x * math_ops.log1p(-safe_domain) + math_ops.log(probs)
def _entropy(self):
probs = self._probs
if self.validate_args:
probs = control_flow_ops.with_dependencies(
[check_ops.assert_less(
probs,
constant_op.constant(1., probs.dtype),
message="Entropy is undefined when logits = inf or probs = 1.")],
probs)
# Claim: entropy(p) = softplus(s)/p - s
# where s=logits and p=probs.
#
# Proof:
#
# entropy(p)
# := -[(1-p)log(1-p) + plog(p)]/p
# = -[log(1-p) + plog(p/(1-p))]/p
# = -[-softplus(s) + ps]/p
# = softplus(s)/p - s
#
# since,
# log[1-sigmoid(s)]
# = log[1/(1+exp(s)]
# = -log[1+exp(s)]
# = -softplus(s)
#
# using the fact that,
# 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
return nn.softplus(self.logits) / probs - self.logits
def _mean(self):
return math_ops.exp(-self.logits)
def _variance(self):
return self._mean() / self.probs
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)
| |
# nuke imports
import nuke
import nukescripts
# python imports
import os
# used in tandem with floating windows in the node graph.
# Assigns a 'Close all' method to the alt+` hotkey combo so
# you don't need to click close on all of them.
def closeAllNodes():
"""
Closes all open nodes.
"""
for node in nuke.allNodes():
node.hideControlPanel()
nuke.menu('Node Graph').addCommand('FloatingTools/aldmbmtl.toolbox/Python/HatfieldKit/Close all nodes', closeAllNodes, 'alt+`')
# the default auto place fuction will always apply the snapping
# to all nodes. This version will only do it to the selected nodes
# if there are any. If nothing is selected, it reverts to the old
# method of snapping all nodes.
def smartSnap():
selNodes = nuke.selectedNodes()
if selNodes == []:
for node in nuke.allNodes():
nuke.autoplaceSnap(node)
else:
for node in selNodes:
nuke.autoplaceSnap(node)
nuke.menu('Node Graph').addCommand('Autoplace', smartSnap, shortcut='\\')
# creates a dialog box that points to a read node in the comp. The read node that is selected
# will be hooked to a postage stamp. Really useful for huge comps.
def createReadLink():
options = []
for node in nuke.allNodes():
node.setSelected(False)
if node.Class() == 'Read':
options.append(node.name() + ' ' + os.path.basename(node['file'].value()))
result = nuke.choice('Create link to Read...', 'Pick a read', options)
if result is None:
return
targetRead = nuke.toNode(options[result].split(' ')[0])
postageStamp = nuke.createNode('PostageStamp', inpanel=False)
postageStamp['label'].setValue('[basename [value [topnode].file]]')
postageStamp['hide_input'].setValue(True)
postageStamp.setInput(0, targetRead)
nuke.menu('Node Graph').addCommand('FloatingTools/aldmbmtl.toolbox/Python/HatfieldKit/Link to Read', createReadLink, shortcut='l')
# Just for shutting off callbacks if a studio overly loaded one up
# and is actively slowing shit down.
class CallbackManager(nukescripts.PythonPanel):
def __init__(self):
nukescripts.PythonPanel.__init__(self, 'Callback Manager')
self.addKnob(nuke.Text_Knob('onCreates', 'onCreates'))
for key in nuke.onCreates.keys():
func = str(list(nuke.onCreates[key][0])[0])
command = 'nuke.onCreates.pop("%s") ; print nuke.onCreates' % (key)
commandKnob = nuke.PyScript_Knob('killCommand', 'Remove: ' + func)
commandKnob.setCommand(command)
commandKnob.setFlag(0x00001000)
self.addKnob(commandKnob)
self.addKnob(nuke.Text_Knob('onDestroys', 'onDestroys'))
for key in nuke.onDestroys.keys():
func = str(list(nuke.onDestroys[key][0])[0])
command = 'nuke.onDestroys.pop("%s")' % (key)
commandKnob = nuke.PyScript_Knob('killCommand', 'Remove: ' + func)
commandKnob.setCommand(command)
commandKnob.setFlag(0x00001000)
self.addKnob(commandKnob)
self.addKnob(nuke.Text_Knob('onScriptCloses', 'onScriptCloses'))
for key in nuke.onScriptCloses.keys():
func = str(list(nuke.onScriptCloses[key][0])[0])
command = 'nuke.onScriptCloses.pop("%s")' % (key)
commandKnob = nuke.PyScript_Knob('killCommand', 'Remove: ' + func)
commandKnob.setCommand(command)
commandKnob.setFlag(0x00001000)
self.addKnob(commandKnob)
self.addKnob(nuke.Text_Knob('onScriptLoads', 'onScriptLoads'))
for key in nuke.onScriptLoads.keys():
func = str(list(nuke.onScriptLoads[key][0])[0])
command = 'nuke.onScriptLoads.pop("%s")' % (key)
commandKnob = nuke.PyScript_Knob('killCommand', 'Remove: ' + func)
commandKnob.setCommand(command)
commandKnob.setFlag(0x00001000)
self.addKnob(commandKnob)
self.addKnob(nuke.Text_Knob('onScriptSaves', 'onScriptSaves'))
for key in nuke.onScriptSaves.keys():
func = str(list(nuke.onScriptSaves[key][0])[0])
command = 'nuke.onScriptSaves.pop("%s")' % (key)
commandKnob = nuke.PyScript_Knob('killCommand', 'Remove: ' + func)
commandKnob.setCommand(command)
commandKnob.setFlag(0x00001000)
self.addKnob(commandKnob)
self.addKnob(nuke.Text_Knob('knobChangeds', 'knobChangeds'))
for key in nuke.knobChangeds.keys():
func = str(list(nuke.knobChangeds[key][0])[0])
command = 'nuke.knobChangeds.pop("%s")' % (key)
commandKnob = nuke.PyScript_Knob('killCommand', 'Remove: ' + func)
commandKnob.setCommand(command)
commandKnob.setFlag(0x00001000)
self.addKnob(commandKnob)
nuke.menu('Nuke').addCommand('FloatingTools/aldmbmtl.toolbox/Python/HatfieldKit/Callback Manager', 'import HatfieldKit ; HatfieldKit.CallbackManager().show()')
# Breaks out a single layer to rgb and then shuffles it back into the source node.
def breakOutLayer():
"""
Breaks out a layer to RGB and then shuffles it back in.
:return:
"""
node = nuke.selectedNode()
layers = []
for channel in node.channels():
layer = channel.split('.')[0]
if layer not in layers:
layers.append(layer)
index = nuke.choice('Break out layer...', 'Layer', layers)
if index:
anchor = nuke.nodes.Dot(xpos=node.xpos() + 34, ypos=node.ypos() + 150)
anchor.setInput(0, node)
shuffle = nuke.nodes.Shuffle(xpos=node.xpos() + 250, ypos=anchor.ypos() - 4)
shuffle['in'].setValue(layers[index])
shuffle.setInput(0, anchor)
pipeAnchor = nuke.nodes.Dot(xpos=node.xpos() + 250 + 34, ypos=node.ypos() + 500)
pipeAnchor.setInput(0, shuffle)
shuffleCopy = nuke.nodes.ShuffleCopy(red='red', green='green', blue='blue', xpos=node.xpos(),
ypos=node.ypos() + 500 - 4)
shuffleCopy['out'].setValue(layers[index])
shuffleCopy.setInput(0, anchor)
shuffleCopy.setInput(1, pipeAnchor)
nuke.menu('Node Graph').addCommand('FloatingTools/aldmbmtl.toolbox/Python/HatfieldKit/Break out layer', breakOutLayer, 'ctrl+b')
"""
Hatfield Node Kisser. Simulates the node kissing function from Flame.
"""
# this is assigned to the 'shift+z' key stroke
def node_kisser():
try:
selNode = nuke.selectedNode()
except ValueError:
return
xpos = selNode['xpos'].value()
ypos = selNode['ypos'].value()
connectedNodes = []
for input_int in xrange(0, selNode.maxInputs()):
pingedInput = selNode.input(input_int)
if pingedInput is None:
if input_int == selNode.optionalInput():
continue
else:
break
else:
connectedNodes.append(pingedInput)
possible_nodes = {}
point = xpos + ypos
for node in nuke.allNodes():
if node == selNode or node in connectedNodes:
continue
thresh_range = 50
ythresh_range = 100
node_xpos = node['xpos'].value()
node_ypos = node['ypos'].value()
#top handles inputs
if node_ypos <= ypos:
if abs(node_ypos - ypos) <= ythresh_range:
#left
if node_xpos <= xpos:
if abs(node_xpos - xpos) <= thresh_range:
possible_nodes[abs((node_xpos + node_ypos) - point)] = node
#right
if node_xpos >= xpos:
if abs(node_xpos - xpos) <= thresh_range:
possible_nodes[abs((node_xpos + node_ypos) - point)] = node
keys = possible_nodes.keys()
keys.sort()
try:
selNode.setInput(input_int, possible_nodes[keys[0]])
except:
pass
nuke.menu('Node Graph').addCommand('FloatingTools/aldmbmtl.toolbox/Python/HatfieldKit/Kiss', node_kisser, 'shift+z')
| |
#!/usr/bin/env python
"""
Copyright (c) 2014-2022 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function # Requires: Python >= 2.6
import sys
sys.dont_write_bytecode = True
import cProfile
import inspect
import math
import mmap
import optparse
import os
import platform
import re
import socket
import subprocess
import struct
import threading
import time
import traceback
import warnings
from core.addr import inet_ntoa6
from core.addr import addr_port
from core.attribdict import AttribDict
from core.common import check_connection
from core.common import check_sudo
from core.common import check_whitelisted
from core.common import get_ex_message
from core.common import get_text
from core.common import is_local
from core.common import load_trails
from core.common import patch_parser
from core.compat import xrange
from core.datatype import LRUDict
from core.enums import BLOCK_MARKER
from core.enums import CACHE_TYPE
from core.enums import PROTO
from core.enums import TRAIL
from core.log import create_log_directory
from core.log import flush_condensed_events
from core.log import get_error_log_handle
from core.log import log_error
from core.log import log_event
from core.parallel import worker
from core.parallel import write_block
from core.settings import config
from core.settings import CAPTURE_TIMEOUT
from core.settings import CHECK_CONNECTION_MAX_RETRIES
from core.settings import CONFIG_FILE
from core.settings import CONSONANTS
from core.settings import DLT_OFFSETS
from core.settings import DNS_EXHAUSTION_THRESHOLD
from core.settings import GENERIC_SINKHOLE_REGEX
from core.settings import HOMEPAGE
from core.settings import HOURLY_SECS
from core.settings import HTTP_TIME_FORMAT
from core.settings import IGNORE_DNS_QUERY_SUFFIXES
from core.settings import IPPROTO_LUT
from core.settings import IS_WIN
from core.settings import LOCALHOST_IP
from core.settings import LOCAL_SUBDOMAIN_LOOKUPS
from core.settings import MAX_CACHE_ENTRIES
from core.settings import MMAP_ZFILL_CHUNK_LENGTH
from core.settings import NAME
from core.settings import NO_SUCH_NAME_COUNTERS
from core.settings import NO_SUCH_NAME_PER_HOUR_THRESHOLD
from core.settings import INFECTION_SCANNING_THRESHOLD
from core.settings import PORT_SCANNING_THRESHOLD
from core.settings import POTENTIAL_INFECTION_PORTS
from core.settings import read_config
from core.settings import REGULAR_SENSOR_SLEEP_TIME
from core.settings import SNAP_LEN
from core.settings import SUSPICIOUS_CONTENT_TYPES
from core.settings import SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS
from core.settings import SUSPICIOUS_DIRECT_IP_URL_REGEX
from core.settings import SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD
from core.settings import SUSPICIOUS_HTTP_PATH_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION
from core.settings import SUSPICIOUS_HTTP_REQUEST_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS
from core.settings import SUSPICIOUS_PROXY_PROBE_PRE_CONDITION
from core.settings import SUSPICIOUS_UA_REGEX
from core.settings import VALID_DNS_NAME_REGEX
from core.settings import trails
from core.settings import VERSION
from core.settings import WEB_SCANNING_THRESHOLD
from core.settings import WHITELIST
from core.settings import WHITELIST_DIRECT_DOWNLOAD_KEYWORDS
from core.settings import WHITELIST_LONG_DOMAIN_NAME_KEYWORDS
from core.settings import WHITELIST_HTTP_REQUEST_PATHS
from core.settings import WHITELIST_UA_REGEX
from core.update import update_ipcat
from core.update import update_trails
from thirdparty import six
from thirdparty.six.moves import urllib as _urllib
warnings.filterwarnings(action="ignore", category=DeprecationWarning) # NOTE: https://github.com/helpsystems/pcapy/pull/67/files
_buffer = None
_caps = []
_connect_sec = 0
_connect_src_dst = {}
_connect_src_details = {}
_path_src_dst = {}
_path_src_dst_details = {}
_count = 0
_locks = AttribDict()
_multiprocessing = None
_n = None
_result_cache = LRUDict(MAX_CACHE_ENTRIES)
_local_cache = LRUDict(MAX_CACHE_ENTRIES)
_last_syn = None
_last_logged_syn = None
_last_udp = None
_last_logged_udp = None
_done_count = 0
_done_lock = threading.Lock()
_subdomains = {}
_subdomains_sec = None
_dns_exhausted_domains = set()
class _set(set):
pass
try:
import pcapy
except ImportError:
if IS_WIN:
exit("[!] please install 'WinPcap' (e.g. 'http://www.winpcap.org/install/') and Pcapy (e.g. 'https://breakingcode.wordpress.com/?s=pcapy')")
else:
msg = "[!] please install 'Pcapy' (e.g. 'sudo pip%s install pcapy-ng')" % ('3' if six.PY3 else '2')
exit(msg)
def _check_domain_member(query, domains):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in domains:
return True
return False
def _check_domain_whitelisted(query):
result = _result_cache.get((CACHE_TYPE.DOMAIN_WHITELISTED, query))
if result is None:
result = _check_domain_member(re.split(r"(?i)[^A-Z0-9._-]", query or "")[0], WHITELIST)
_result_cache[(CACHE_TYPE.DOMAIN_WHITELISTED, query)] = result
return result
def _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, proto, packet=None):
if query:
query = query.lower()
if ':' in query:
query = query.split(':', 1)[0]
if query.replace('.', "").isdigit(): # IP address
return
if _result_cache.get((CACHE_TYPE.DOMAIN, query)) is False:
return
result = False
if re.search(VALID_DNS_NAME_REGEX, query) is not None and not _check_domain_whitelisted(query):
parts = query.split('.')
if query.endswith(".ip-adress.com"): # Reference: https://www.virustotal.com/gui/domain/ip-adress.com/relations
_ = '.'.join(parts[:-2])
trail = "%s(.ip-adress.com)" % _
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
if not result:
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in trails:
if domain == query:
trail = domain
else:
_ = ".%s" % domain
trail = "(%s)%s" % (query[:-len(_)], _)
if not (re.search(r"(?i)\A([rd]?ns|nf|mx|nic)\d*\.", query) and any(_ in trails.get(domain, " ")[0] for _ in ("suspicious", "sinkhole"))): # e.g. ns2.nobel.su
if not ((query == trail or parts[0] == "www") and any(_ in trails.get(domain, " ")[0] for _ in ("dynamic", "free web"))): # e.g. noip.com
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[domain][0], trails[domain][1]), packet)
break
if not result and config.USE_HEURISTICS:
if len(parts[0]) > SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD and '-' not in parts[0]:
trail = None
if len(parts) > 2:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
trail = "(%s).%s" % (parts[0], parts[1])
else:
trail = query
if trail and not any(_ in trail for _ in WHITELIST_LONG_DOMAIN_NAME_KEYWORDS):
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, "long domain (suspicious)", "(heuristic)"), packet)
if not result and trails._regex:
match = re.search(trails._regex, query)
if match:
group, trail = [_ for _ in match.groupdict().items() if _[1] is not None][0]
candidate = trails._regex.split("(?P<")[int(group[1:]) + 1]
candidate = candidate.split('>', 1)[-1].rstrip('|')[:-1]
if candidate in trails:
result = True
trail = match.group(0)
prefix, suffix = query[:match.start()], query[match.end():]
if prefix:
trail = "(%s)%s" % (prefix, trail)
if suffix:
trail = "%s(%s)" % (trail, suffix)
trail = trail.replace(".)", ").")
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[candidate][0], trails[candidate][1]), packet)
if not result and ".onion." in query:
trail = re.sub(r"(\.onion)(\..*)", r"\1(\2)", query)
_ = trail.split('(')[0]
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
if result is False:
_result_cache[(CACHE_TYPE.DOMAIN, query)] = False
def _get_local_prefix():
_sources = set(_.split('~')[0] for _ in _connect_src_dst.keys())
_candidates = [re.sub(r"\d+\.\d+\Z", "", _) for _ in _sources]
_ = sorted(((_candidates.count(_), _) for _ in set(_candidates)), reverse=True)
result = _[0][1] if _ else ""
if result:
_result_cache[(CACHE_TYPE.LOCAL_PREFIX, "")] = result
else:
result = _result_cache.get((CACHE_TYPE.LOCAL_PREFIX, ""))
return result or '_'
def _process_packet(packet, sec, usec, ip_offset):
"""
Processes single (raw) IP layer data
"""
global _connect_sec
global _last_syn
global _last_logged_syn
global _last_udp
global _last_logged_udp
global _subdomains_sec
try:
if config.USE_HEURISTICS:
if _locks.connect_sec:
_locks.connect_sec.acquire()
connect_sec = _connect_sec
_connect_sec = sec
if _locks.connect_sec:
_locks.connect_sec.release()
if sec > connect_sec:
for key in _connect_src_dst:
_src_ip, _dst = key.split('~')
if not _dst.isdigit() and len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
if not check_whitelisted(_src_ip):
_dst_ip = _dst
for _ in _connect_src_details[key]:
log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)
elif len(_connect_src_dst[key]) > INFECTION_SCANNING_THRESHOLD:
_dst_port = _dst
_dst_ip = [_[-1] for _ in _connect_src_details[key]]
_src_port = [_[-2] for _ in _connect_src_details[key]]
if len(_dst_ip) == len(set(_dst_ip)):
if _src_ip.startswith(_get_local_prefix()):
log_event((sec, usec, _src_ip, _src_port[0], _dst_ip[0], _dst_port, PROTO.TCP, TRAIL.PORT, _dst_port, "potential infection", "(heuristic)"), packet)
_connect_src_dst.clear()
_connect_src_details.clear()
for key in _path_src_dst:
if len(_path_src_dst[key]) > WEB_SCANNING_THRESHOLD:
_src_ip, _dst_ip = key.split('~')
_sec, _usec, _src_port, _dst_port, _path = _path_src_dst_details[key].pop()
log_event((_sec, _usec, _src_ip, _src_port, _dst_ip, _dst_port, PROTO.TCP, TRAIL.PATH, "*", "potential web scanning", "(heuristic)"), packet)
_path_src_dst.clear()
_path_src_dst_details.clear()
ip_data = packet[ip_offset:]
ip_version = ord(ip_data[0:1]) >> 4
localhost_ip = LOCALHOST_IP[ip_version]
if ip_version == 0x04: # IPv4
ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
fragment_offset = ip_header[4] & 0x1fff
if fragment_offset != 0:
return
iph_length = (ip_header[0] & 0xf) << 2
protocol = ip_header[6]
src_ip = socket.inet_ntoa(ip_header[8])
dst_ip = socket.inet_ntoa(ip_header[9])
elif ip_version == 0x06: # IPv6
# Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
iph_length = 40
protocol = ip_header[4]
src_ip = inet_ntoa6(ip_header[6])
dst_ip = inet_ntoa6(ip_header[7])
else:
return
if protocol == socket.IPPROTO_TCP: # TCP
src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length + 14])
if flags != 2 and config.plugin_functions:
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
elif src_ip in trails and dst_ip != localhost_ip:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)
if flags == 2: # SYN set (only)
_ = _last_syn
_last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_syn: # skip bursts
return
if dst_ip in trails or addr_port(dst_ip, dst_port) in trails:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = addr_port(dst_ip, dst_port)
if trail not in trails:
trail = dst_ip
if not any(_ in trails[trail][0] for _ in ("attacker",)) and not ("parking site" in trails[trail][0] and dst_port not in (80, 443)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
elif (src_ip in trails or addr_port(src_ip, src_port) in trails) and dst_ip != localhost_ip:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = addr_port(src_ip, src_port)
if trail not in trails:
trail = src_ip
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
if config.USE_HEURISTICS:
if dst_ip != localhost_ip:
key = "%s~%s" % (src_ip, dst_ip)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_port)
_connect_src_details[key].add((sec, usec, src_port, dst_port))
if dst_port in POTENTIAL_INFECTION_PORTS:
key = "%s~%s" % (src_ip, dst_port)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_ip)
_connect_src_details[key].add((sec, usec, src_port, dst_ip))
else:
tcph_length = doff_reserved >> 4
h_size = iph_length + (tcph_length << 2)
tcp_data = get_text(ip_data[h_size:])
if tcp_data.startswith("HTTP/"):
match = re.search(GENERIC_SINKHOLE_REGEX, tcp_data[:2000])
if match:
trail = match.group(0)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "sinkhole response (malware)", "(heuristic)"), packet)
else:
index = tcp_data.find("<title>")
if index >= 0:
title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
if re.search(r"domain name has been seized by|Domain Seized|Domain Seizure", title):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, title, "seized domain (suspicious)", "(heuristic)"), packet)
content_type = None
first_index = tcp_data.find("\r\nContent-Type:")
if first_index >= 0:
first_index = first_index + len("\r\nContent-Type:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
content_type = tcp_data[first_index:last_index].strip().lower()
if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)
method, path = None, None
if " HTTP/" in tcp_data:
index = tcp_data.find("\r\n")
if index >= 0:
line = tcp_data[:index]
if line.count(' ') == 2 and " HTTP/" in line:
method, path, _ = line.split(' ')
if method and path:
post_data = None
host = dst_ip
first_index = tcp_data.find("\r\nHost:")
path = path.lower()
if first_index >= 0:
first_index = first_index + len("\r\nHost:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
host = tcp_data[first_index:last_index]
host = host.strip().lower()
if host.endswith(":80"):
host = host[:-3]
if host and host[0].isalpha() and dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif re.search(r"\A\d+\.[0-9.]+\Z", host or "") and re.search(SUSPICIOUS_DIRECT_IP_URL_REGEX, "%s%s" % (host, path)):
if not dst_ip.startswith(_get_local_prefix()):
trail = "(%s)%s" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential iot-malware download (suspicious)", "(heuristic)"), packet)
return
elif config.CHECK_HOST_DOMAINS:
_check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)
index = tcp_data.find("\r\n\r\n")
if index >= 0:
post_data = tcp_data[index + 4:]
url = None
if config.USE_HEURISTICS and path.startswith('/'):
_path = path.split('/')[1]
key = "%s~%s" % (src_ip, dst_ip)
if key not in _path_src_dst:
_path_src_dst[key] = set()
_path_src_dst[key].add(_path)
if key not in _path_src_dst_details:
_path_src_dst_details[key] = set()
_path_src_dst_details[key].add((sec, usec, src_port, dst_port, path))
elif config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and any(_ in path for _ in SUSPICIOUS_PROXY_PROBE_PRE_CONDITION) and not _check_domain_whitelisted(path.split('/')[2]):
trail = re.sub(r"(http://[^/]+/)(.+)", r"\g<1>(\g<2>)", path)
trail = re.sub(r"(http://)([^/(]+)", lambda match: "%s%s" % (match.group(1), match.group(2).split(':')[0].rstrip('.')), trail)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential proxy probe (suspicious)", "(heuristic)"), packet)
return
elif "://" in path:
unquoted_path = _urllib.parse.unquote(path)
key = "code execution"
if key not in _local_cache:
_local_cache[key] = next(_[1] for _ in SUSPICIOUS_HTTP_REQUEST_REGEXES if "code execution" in _[0])
if re.search(_local_cache[key], unquoted_path, re.I) is None: # NOTE: to prevent malware domain FPs in case of outside scanners
url = path.split("://", 1)[1]
if '/' not in url:
url = "%s/" % url
host, path = url.split('/', 1)
if host.endswith(":80"):
host = host[:-3]
path = "/%s" % path
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif method == "CONNECT":
if '/' in path:
host, path = path.split('/', 1)
path = "/%s" % path
else:
host, path = path, '/'
if host.endswith(":80"):
host = host[:-3]
url = "%s%s" % (host, path)
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
if url is None:
url = "%s%s" % (host, path)
if config.USE_HEURISTICS:
user_agent, result = None, None
first_index = tcp_data.find("\r\nUser-Agent:")
if first_index >= 0:
first_index = first_index + len("\r\nUser-Agent:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
user_agent = tcp_data[first_index:last_index]
user_agent = _urllib.parse.unquote(user_agent).strip()
if user_agent:
result = _result_cache.get((CACHE_TYPE.USER_AGENT, user_agent))
if result is None:
if re.search(WHITELIST_UA_REGEX, user_agent, re.I) is None:
match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
if match:
def _(value):
return value.rstrip('\\').replace('(', "\\(").replace(')', "\\)")
parts = user_agent.split(match.group(0), 1)
if len(parts) > 1 and parts[0] and parts[-1]:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = "%s (%s)" % (_(match.group(0)), _(user_agent))
else:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
if not result:
_result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)
if not _check_domain_whitelisted(host):
path = path.replace("//", '/')
unquoted_path = _urllib.parse.unquote(path)
unquoted_post_data = _urllib.parse.unquote(post_data or "")
checks = [path.rstrip('/')]
if '?' in path:
checks.append(path.split('?')[0].rstrip('/'))
if '=' in path:
checks.append(path[:path.index('=') + 1])
_ = re.sub(r"(\w+=)[^&=]+", r"\g<1>", path)
if _ not in checks:
checks.append(_)
if _.count('/') > 1:
checks.append("/%s" % _.split('/')[-1])
elif post_data:
checks.append("%s?%s" % (path, unquoted_post_data.lower()))
if checks[-1].count('/') > 1:
checks.append(checks[-1][:checks[-1].rfind('/')])
checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])
for check in filter(None, checks):
for _ in ("", host):
check = "%s%s" % (_, check)
if check in trails:
if '?' not in path and '?' in check and post_data:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, trails[check][0], trails[check][1]))
else:
parts = url.split(check)
other = ("(%s)" % _ if _ else _ for _ in parts)
trail = check.join(other)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
return
if "%s/" % host in trails:
trail = "%s/" % host
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[trail][0], trails[trail][1]))
return
if config.USE_HEURISTICS:
match = re.search(r"\b(CF-Connecting-IP|True-Client-IP|X-Forwarded-For):\s*([0-9.]+)".encode(), packet, re.I)
if match:
src_ip = "%s,%s" % (src_ip, match.group(1))
for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
path = path.replace(char, replacement)
if post_data:
post_data = post_data.replace(char, replacement)
if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.PATH, unquoted_path))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_path, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.PATH, unquoted_path)] = found or ""
if found and not ("data leakage" in found and is_local(dst_ip)):
trail = "%s(%s)" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.POST_DATA, unquoted_post_data))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.POST_DATA, unquoted_post_data)] = found or ""
if found:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if '.' in path:
_ = _urllib.parse.urlparse("http://%s" % url) # dummy scheme
path = path.lower()
filename = _.path.split('/')[-1]
name, extension = os.path.splitext(filename)
trail = "%s(%s)" % (host, path)
if extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not is_local(dst_ip) and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
else:
for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
if re.search(regex, filename, re.I):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
break
elif protocol == socket.IPPROTO_UDP: # UDP
_ = ip_data[iph_length:iph_length + 4]
if len(_) < 4:
return
src_port, dst_port = struct.unpack("!HH", _)
_ = _last_udp
_last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_udp: # skip bursts
return
if src_port != 53 and dst_port != 53: # not DNS
if dst_ip in trails:
trail = dst_ip
elif src_ip in trails:
trail = src_ip
else:
trail = None
if trail:
_ = _last_logged_udp
_last_logged_udp = _last_udp
if _ != _last_logged_udp:
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)
else:
dns_data = ip_data[iph_length + 8:]
# Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
if len(dns_data) > 6:
qdcount = struct.unpack("!H", dns_data[4:6])[0]
if qdcount > 0:
offset = 12
query = ""
while len(dns_data) > offset:
length = ord(dns_data[offset:offset + 1])
if not length:
query = query[:-1]
break
query += get_text(dns_data[offset + 1:offset + length + 1]) + '.'
offset += length + 1
query = query.lower()
if not query or re.search(VALID_DNS_NAME_REGEX, query) is None or any(_ in query for _ in (".intranet.",)) or query.split('.')[-1] in IGNORE_DNS_QUERY_SUFFIXES:
return
parts = query.split('.')
if ord(dns_data[2:3]) & 0xfa == 0x00: # standard query (both recursive and non-recursive)
type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])
if len(parts) > 2:
if len(parts) > 3 and len(parts[-2]) <= 3:
domain = '.'.join(parts[-3:])
else:
domain = '.'.join(parts[-2:])
if not _check_domain_whitelisted(domain): # e.g. <hash>.hashserver.cs.trendmicro.com
if (sec - (_subdomains_sec or 0)) > HOURLY_SECS:
_subdomains.clear()
_dns_exhausted_domains.clear()
_subdomains_sec = sec
subdomains = _subdomains.get(domain)
if not subdomains:
subdomains = _subdomains[domain] = _set()
subdomains._start = sec
if not re.search(r"\A\d+\-\d+\-\d+\-\d+\Z", parts[0]):
if sec - subdomains._start > 60:
subdomains._start = sec
subdomains.clear()
elif len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
subdomains.add('.'.join(parts[:-2]))
else:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
if re.search(r"bl\b", trail) is None: # generic check for DNSBLs
if not any(_ in subdomains for _ in LOCAL_SUBDOMAIN_LOOKUPS): # generic check for local DNS resolutions
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
_dns_exhausted_domains.add(domain)
return
# Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
if type_ not in (12, 28) and class_ == 1: # Type not in (PTR, AAAA), Class IN
if addr_port(dst_ip, dst_port) in trails:
trail = addr_port(dst_ip, dst_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IPORT, "%s (%s)" % (dst_ip, query), trails[trail][0], trails[trail][1]), packet)
elif dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
_check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)
elif config.USE_HEURISTICS:
if ord(dns_data[2:3]) & 0x80: # standard response
if ord(dns_data[3:4]) == 0x80: # recursion available, no error
_ = offset + 5
try:
while _ < len(dns_data):
if ord(dns_data[_:_ + 1]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01": # Type A
break
else:
_ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]
_ = dns_data[_ + 12:_ + 16]
if _:
answer = socket.inet_ntoa(_)
if answer in trails and not _check_domain_whitelisted(query):
_ = trails[answer]
if "sinkhole" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet) # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
elif "parking" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
except IndexError:
pass
elif ord(dns_data[3:4]) == 0x83: # recursion available, no such name
if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
if parts[-1].isdigit():
return
if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])): # generic check for DNSBL IP lookups
if not is_local(dst_ip): # prevent FPs caused by local queries
for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec // 3600:
NO_SUCH_NAME_COUNTERS[_] = [sec // 3600, 1, set()]
else:
NO_SUCH_NAME_COUNTERS[_][1] += 1
NO_SUCH_NAME_COUNTERS[_][2].add(query)
if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
if _.startswith("*."):
trail = "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:])
if not any(subdomain in trail for subdomain in LOCAL_SUBDOMAIN_LOOKUPS): # generic check for local DNS resolutions
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "excessive no such domain (suspicious)", "(heuristic)"), packet)
for item in NO_SUCH_NAME_COUNTERS[_][2]:
try:
del NO_SUCH_NAME_COUNTERS[item]
except KeyError:
pass
else:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)
try:
del NO_SUCH_NAME_COUNTERS[_]
except KeyError:
pass
break
if len(parts) == 2 and parts[0] and '-' not in parts[0]:
part = parts[0]
trail = "(%s).%s" % (parts[0], parts[1])
result = _result_cache.get(part)
if result is None:
# Reference: https://github.com/exp0se/dga_detector
probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
result = "entropy threshold no such domain (suspicious)"
if not result:
if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
result = "consonant threshold no such domain (suspicious)"
_result_cache[part] = result or False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)
elif protocol in IPPROTO_LUT: # non-TCP/UDP (e.g. ICMP)
if protocol == socket.IPPROTO_ICMP:
if ord(ip_data[iph_length:iph_length + 1]) != 0x08: # Non-echo request
return
elif protocol == socket.IPPROTO_ICMPV6:
if ord(ip_data[iph_length:iph_length + 1]) != 0x80: # Non-echo request
return
if dst_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
except struct.error:
pass
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
def init():
"""
Performs sensor initialization
"""
global _multiprocessing
try:
import multiprocessing
if config.PROCESS_COUNT > 1 and not config.profile:
_multiprocessing = multiprocessing
except (ImportError, OSError, NotImplementedError):
pass
def update_timer():
retries = 0
if not config.offline:
while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection():
sys.stdout.write("[!] can't update because of lack of Internet connection (waiting..." if not retries else '.')
sys.stdout.flush()
time.sleep(10)
retries += 1
if retries:
print(")")
if config.offline or retries == CHECK_CONNECTION_MAX_RETRIES:
if retries == CHECK_CONNECTION_MAX_RETRIES:
print("[x] going to continue without online update")
_ = update_trails(offline=True)
else:
_ = update_trails()
update_ipcat()
if _:
trails.clear()
trails.update(_)
elif not trails:
_ = load_trails()
trails.update(_)
_regex = ""
for trail in trails:
if "static" in trails[trail][1]:
if re.search(r"[\].][*+]|\[[a-z0-9_.\-]+\]", trail, re.I):
try:
re.compile(trail)
except re.error:
pass
else:
if re.escape(trail) != trail:
index = _regex.count("(?P<g")
if index < 100: # Reference: https://stackoverflow.com/questions/478458/python-regular-expressions-with-more-than-100-groups
_regex += "|(?P<g%s>%s)" % (index, trail)
trails._regex = _regex.strip('|')
thread = threading.Timer(config.UPDATE_PERIOD, update_timer)
thread.daemon = True
thread.start()
create_log_directory()
get_error_log_handle()
msg = "[i] using '%s' for trail storage" % config.TRAILS_FILE
if os.path.isfile(config.TRAILS_FILE):
mtime = time.gmtime(os.path.getmtime(config.TRAILS_FILE))
msg += " (last modification: '%s')" % time.strftime(HTTP_TIME_FORMAT, mtime)
print(msg)
update_timer()
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please run '%s' with root privileges" % __file__)
if config.plugins:
config.plugin_functions = []
for plugin in re.split(r"[,;]", config.plugins):
plugin = plugin.strip()
found = False
for _ in (plugin, os.path.join("plugins", plugin), os.path.join("plugins", "%s.py" % plugin)):
if os.path.isfile(_):
plugin = _
found = True
break
if not found:
exit("[!] plugin script '%s' not found" % plugin)
else:
dirname, filename = os.path.split(plugin)
dirname = os.path.abspath(dirname)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
exit("[!] empty file '__init__.py' required inside directory '%s'" % dirname)
if not filename.endswith(".py"):
exit("[!] plugin script '%s' should have an extension '.py'" % filename)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3])
except (ImportError, SyntaxError) as msg:
exit("[!] unable to import plugin script '%s' (%s)" % (filename, msg))
found = False
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "plugin" and not set(inspect.getargspec(function).args) & set(("event_tuple', 'packet")):
found = True
config.plugin_functions.append(function)
function.__name__ = module.__name__
if not found:
exit("[!] missing function 'plugin(event_tuple, packet)' in plugin script '%s'" % filename)
if config.pcap_file:
for _ in config.pcap_file.split(','):
_caps.append(pcapy.open_offline(_))
else:
interfaces = set(_.strip() for _ in config.MONITOR_INTERFACE.split(','))
if (config.MONITOR_INTERFACE or "").lower() == "any":
if IS_WIN or "any" not in pcapy.findalldevs():
print("[x] virtual interface 'any' missing. Replacing it with all interface names")
interfaces = pcapy.findalldevs()
else:
print("[?] in case of any problems with packet capture on virtual interface 'any', please put all monitoring interfaces to promiscuous mode manually (e.g. 'sudo ifconfig eth0 promisc')")
for interface in interfaces:
if interface.lower() != "any" and re.sub(r"(?i)\Anetmap:", "", interface) not in pcapy.findalldevs():
hint = "[?] available interfaces: '%s'" % ",".join(pcapy.findalldevs())
exit("[!] interface '%s' not found\n%s" % (interface, hint))
print("[i] opening interface '%s'" % interface)
try:
_caps.append(pcapy.open_live(interface, SNAP_LEN, True, CAPTURE_TIMEOUT))
except (socket.error, pcapy.PcapError):
if "permitted" in str(sys.exc_info()[1]):
exit("[!] permission problem occurred ('%s')" % sys.exc_info()[1])
elif "No such device" in str(sys.exc_info()[1]):
exit("[!] no such device '%s'" % interface)
else:
raise
if config.LOG_SERVER and ':' not in config.LOG_SERVER:
exit("[!] invalid configuration value for 'LOG_SERVER' ('%s')" % config.LOG_SERVER)
if config.SYSLOG_SERVER and not len(config.SYSLOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'SYSLOG_SERVER' ('%s')" % config.SYSLOG_SERVER)
if config.LOGSTASH_SERVER and not len(config.LOGSTASH_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'LOGSTASH_SERVER' ('%s')" % config.LOGSTASH_SERVER)
if config.REMOTE_SEVERITY_REGEX:
try:
re.compile(config.REMOTE_SEVERITY_REGEX)
except re.error:
exit("[!] invalid configuration value for 'REMOTE_SEVERITY_REGEX' ('%s')" % config.REMOTE_SEVERITY_REGEX)
if config.CAPTURE_FILTER:
print("[i] setting capture filter '%s'" % config.CAPTURE_FILTER)
for _cap in _caps:
try:
_cap.setfilter(config.CAPTURE_FILTER)
except:
pass
if _multiprocessing:
_init_multiprocessing()
if not IS_WIN and not config.DISABLE_CPU_AFFINITY:
try:
try:
mod = int(subprocess.check_output("grep -c ^processor /proc/cpuinfo", stderr=subprocess.STDOUT, shell=True).strip())
used = subprocess.check_output("for pid in $(ps aux | grep python | grep sensor.py | grep -E -o 'root[ ]*[0-9]*' | tr -d '[:alpha:] '); do schedtool $pid; done | grep -E -o 'AFFINITY .*' | cut -d ' ' -f 2 | grep -v 0xf", stderr=subprocess.STDOUT, shell=True).strip().split('\n')
max_used = max(int(_, 16) for _ in used)
affinity = max(1, (max_used << 1) % 2 ** mod)
except:
affinity = 1
p = subprocess.Popen("schedtool -n -2 -M 2 -p 10 -a 0x%02x %d" % (affinity, os.getpid()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if "not found" in stderr:
msg, _ = "[?] please install 'schedtool' for better CPU scheduling", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install schedtool", ("debian", "ubuntu"): "sudo apt-get install schedtool"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
print(msg)
except:
pass
def _init_multiprocessing():
"""
Inits worker processes used in multiprocessing mode
"""
global _buffer
global _multiprocessing
global _n
if _multiprocessing:
print("[i] preparing capture buffer...")
try:
_buffer = mmap.mmap(-1, config.CAPTURE_BUFFER) # http://www.alexonlinux.com/direct-io-in-python
_ = b"\x00" * MMAP_ZFILL_CHUNK_LENGTH
for i in xrange(config.CAPTURE_BUFFER // MMAP_ZFILL_CHUNK_LENGTH):
_buffer.write(_)
_buffer.seek(0)
except KeyboardInterrupt:
raise
except:
exit("[!] unable to allocate network capture buffer. Please adjust value of 'CAPTURE_BUFFER'")
_n = _multiprocessing.Value('L', lock=False)
try:
for i in xrange(config.PROCESS_COUNT - 1):
process = _multiprocessing.Process(target=worker, name=str(i), args=(_buffer, _n, i, config.PROCESS_COUNT - 1, _process_packet))
process.daemon = True
process.start()
except TypeError: # Note: https://github.com/stamparm/maltrail/issues/11823
_buffer = None
_multiprocessing = None
else:
print("[i] created %d more processes (out of total %d)" % (config.PROCESS_COUNT - 1, config.PROCESS_COUNT))
def monitor():
"""
Sniffs/monitors given capturing interface
"""
print("[^] running...")
def packet_handler(datalink, header, packet):
global _count
ip_offset = None
try:
dlt_offset = DLT_OFFSETS[datalink]
except KeyError:
log_error("Received unexpected datalink (%d)" % datalink, single=True)
return
try:
if datalink == pcapy.DLT_RAW:
ip_offset = dlt_offset
elif datalink == pcapy.DLT_PPP:
if packet[2:4] in (b"\x00\x21", b"\x00\x57"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif datalink == pcapy.DLT_NULL:
if packet[0:4] in (b"\x02\x00\x00\x00", b"\x23\x00\x00\x00"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif dlt_offset >= 2:
if packet[dlt_offset - 2:dlt_offset] == b"\x81\x00": # VLAN
dlt_offset += 4
if packet[dlt_offset - 2:dlt_offset] in (b"\x08\x00", b"\x86\xdd"): # (IPv4, IPv6)
ip_offset = dlt_offset
except IndexError:
pass
if ip_offset is None:
return
try:
if six.PY3: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
sec, usec = [int(_) for _ in ("%.6f" % time.time()).split('.')]
else:
sec, usec = header.getts()
if _multiprocessing:
block = struct.pack("=III", sec, usec, ip_offset) + packet
if _locks.count:
_locks.count.acquire()
write_block(_buffer, _count, block)
_n.value = _count = _count + 1
if _locks.count:
_locks.count.release()
else:
_process_packet(packet, sec, usec, ip_offset)
except socket.timeout:
pass
try:
def _(_cap):
global _done_count
datalink = _cap.datalink()
#
# NOTE: currently an issue with pcapy-png and loop()
#
# if six.PY3 and not config.pcap_file: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
# def _loop_handler(header, packet):
# packet_handler(datalink, header, packet)
#
# _cap.loop(-1, _loop_handler)
# else:
while True:
success = False
try:
(header, packet) = _cap.next()
if header is not None:
success = True
packet_handler(datalink, header, packet)
elif config.pcap_file:
with _done_lock:
_done_count += 1
break
except (pcapy.PcapError, socket.timeout):
pass
if not success:
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
if config.profile and len(_caps) == 1:
print("[=] will store profiling results to '%s'..." % config.profile)
_(_caps[0])
else:
if len(_caps) > 1:
if _multiprocessing:
_locks.count = threading.Lock()
_locks.connect_sec = threading.Lock()
for _cap in _caps:
threading.Thread(target=_, args=(_cap,)).start()
while _caps and not _done_count == (config.pcap_file or "").count(',') + 1:
time.sleep(1)
if not config.pcap_file:
print("[i] all capturing interfaces closed")
except SystemError as ex:
if "error return without" in str(ex):
print("\r[x] stopping (Ctrl-C pressed)")
else:
raise
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
finally:
print("\r[i] cleaning up...")
if _multiprocessing:
try:
for _ in xrange(config.PROCESS_COUNT - 1):
write_block(_buffer, _n.value, b"", BLOCK_MARKER.END)
_n.value = _n.value + 1
while _multiprocessing.active_children():
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
except KeyboardInterrupt:
pass
if config.pcap_file:
flush_condensed_events(True)
def main():
for i in xrange(1, len(sys.argv)):
if sys.argv[i] == "-q":
sys.stdout = open(os.devnull, 'w')
if sys.argv[i] == "-i":
for j in xrange(i + 2, len(sys.argv)):
value = sys.argv[j]
if os.path.isfile(value):
sys.argv[i + 1] += ",%s" % value
sys.argv[j] = ''
else:
break
print("%s (sensor) #v%s {%s}\n" % (NAME, VERSION, HOMEPAGE))
if "--version" in sys.argv:
raise SystemExit
parser = optparse.OptionParser(version=VERSION)
parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1])
parser.add_option("-r", dest="pcap_file", help="pcap file for offline analysis")
parser.add_option("-p", dest="plugins", help="plugin(s) to be used per event")
parser.add_option("-q", "--quiet", dest="quiet", action="store_true", help="turn off regular output")
parser.add_option("--console", dest="console", action="store_true", help="print events to console")
parser.add_option("--offline", dest="offline", action="store_true", help="disable (online) trail updates")
parser.add_option("--debug", dest="debug", action="store_true", help=optparse.SUPPRESS_HELP)
parser.add_option("--profile", dest="profile", help=optparse.SUPPRESS_HELP)
patch_parser(parser)
options, _ = parser.parse_args()
print("[*] starting @ %s\n" % time.strftime("%X /%Y-%m-%d/"))
read_config(options.config_file)
for option in dir(options):
if isinstance(getattr(options, option), (six.string_types, bool)) and not option.startswith('_'):
config[option] = getattr(options, option)
if options.debug:
config.console = True
config.PROCESS_COUNT = 1
config.SHOW_DEBUG = True
if options.pcap_file:
if options.pcap_file == '-':
print("[i] using STDIN")
else:
for _ in options.pcap_file.split(','):
if not os.path.isfile(_):
exit("[!] missing pcap file '%s'" % _)
print("[i] using pcap file(s) '%s'" % options.pcap_file)
if not config.DISABLE_CHECK_SUDO and not check_sudo():
exit("[!] please run '%s' with root privileges" % __file__)
try:
init()
if config.profile:
open(config.profile, "w+b").write("")
cProfile.run("monitor()", config.profile)
else:
monitor()
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
if __name__ == "__main__":
code = 0
try:
main()
except SystemExit as ex:
if isinstance(get_ex_message(ex), six.string_types) and get_ex_message(ex).strip('0'):
print(get_ex_message(ex))
code = 1
except IOError:
log_error("\n\n[!] session abruptly terminated\n[?] (hint: \"https://stackoverflow.com/a/20997655\")")
code = 1
except Exception:
msg = "\r[!] unhandled exception occurred ('%s')" % sys.exc_info()[1]
msg += "\n[x] please report the following details at 'https://github.com/stamparm/maltrail/issues':\n---\n'%s'\n---" % traceback.format_exc()
log_error("\n\n%s" % msg.replace("\r", ""))
print(msg)
code = 1
finally:
if not any(_ in sys.argv for _ in ("--version", "-h", "--help")):
print("\n[*] ending @ %s" % time.strftime("%X /%Y-%m-%d/"))
os._exit(code)
| |
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2018 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import struct
import re
import os
import os.path
import sys
import hashlib
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r", encoding="utf8")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| |
#!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Light Virtual Network Function."""
import types
import time
from empower.main import RUNTIME
import empower.logger
LOG = empower.logger.get_logger()
# add lvnf message sent, no status received
PROCESS_SPAWNING = "spawning"
# add lvnf message sent, status received (process is running)
PROCESS_RUNNING = "running"
# del lvnf message sent, no status received
PROCESS_STOPPING = "stopping"
# del lvnf message sent, status
PROCESS_STOPPED = "stopped"
# del lvnf message sent, no status received yet
PROCESS_MIGRATING_STOP = "migrating_stop"
# add lvnf message sent, no status received yet
PROCESS_MIGRATING_START = "migrating_start"
class LVNF(object):
"""A Light Virtual Network Function.
An object representing a Light Virtual Network Function. An LVNF is
an instance of a Image. Each Image consists of a click script
implementing the specific VNF. The following boilerplate code is
automatically generated by the EmPOWER Agent (one port):
in_0 :: FromHost(vnf-br0-0-0);
out_0 :: ToHost(vnf-br0-0-0);
vnf-<bridge>-<k>-<n> is the name of the virtual interface to be created by
this VNF. <bridge> is the name of the OVS bridge where the VNF is attached,
<k> is a counter incremented by the agent for each deployed VNF, <n> is
the virtual port id. Notice how the VNF developer needs not to
care about the specific value of X and Y, however he/she must use 'in_0'
and 'out_0' as respectivelly inputs and outputs of his/her VNF. For
example a valid VNF script for this case is the following:
in_0 -> null::Null() -> out_0
After an LVNF is created it is not automatically spawed in a CPP.
Developers must manually assign the cpp attribute in order to install the
LVNF in a specific CPP. If the LVNF was previously installed in a CPP,
then it is first undeployed from the original CPP and then deployed on the
new CPP. Setting the cpp is asynch, so the fact that the attribute was set
does not mean that the LVNF was deployed. The developer must either check
periodically the status of the LVNF or he/she must register a callback
for the LVNF status event.
Note, as opposed to LVAPs which can live outside a tenant, an LVNF is
bound to one and only one tenant. As a result the runtime does not have a
list of LVNFs which is instead keps by each tenant object.
Attributes:
cpp: Pointer to the CPP hosting this LVNF (CPP)
lvnf_id: The lvnf id (UUID)
tenant_id: The Tenant id (UUID)
image: The Image used by this LVNF (Image)
ports: The virtual ports supported by this LVNF (Map)
message: The error message retuned by Click (String)
returncode: The Click process return code, only if stopped (Integer)
process: The status of the process (running, migrating, migrated,
stopped, done)
"""
def __init__(self, lvnf_id, tenant_id, image, cpp):
self.lvnf_id = lvnf_id
self.tenant_id = tenant_id
self.image = image
self.ports = {}
self.returncode = None
self.context = None
self.__state = None
self.__cpp = cpp
self.__target_cpp = None
self.__migration_timer = None
self.__creation_timer = None
self.__chains = []
def start(self):
"""Spawn LVNF."""
tenant = RUNTIME.tenants[self.tenant_id]
if self.lvnf_id in tenant.lvnfs:
raise KeyError("Already defined %s", self.lvnf_id)
tenant.lvnfs[self.lvnf_id] = self
self.state = PROCESS_SPAWNING
def stop(self):
"""Remove LVNF."""
self.state = PROCESS_STOPPING
@property
def state(self):
"""Return the state."""
return self.__state
@state.setter
def state(self, state):
"""Set the CPP."""
LOG.info("LVNF %s transition %s->%s", self.lvnf_id, self.state, state)
if self.state:
method = "_%s_%s" % (self.state, state)
else:
method = "_none_%s" % state
if hasattr(self, method):
callback = getattr(self, method)
callback()
return
raise IOError("Invalid transistion %s -> %s" % (self.state, state))
def _running_spawning(self):
# set new state
self.__state = PROCESS_MIGRATING_STOP
# remove lvnf
self.cpp.connection.send_del_lvnf(self.lvnf_id)
def _running_stopping(self):
# set new state
self.__state = PROCESS_STOPPING
# send LVNF del message
self.cpp.connection.send_del_lvnf(self.lvnf_id)
def _stopping_stopped(self):
# set new state
self.__state = PROCESS_STOPPED
def _spawning_running(self):
delta = int((time.time() - self.__creation_timer) * 1000)
LOG.info("LVNF %s started in %sms", self.lvnf_id, delta)
self.__state = PROCESS_RUNNING
def _spawning_stopped(self):
self.__state = PROCESS_STOPPED
def _running_migrating_stop(self):
# set time
self.__migration_timer = time.time()
# set new state
self.__state = PROCESS_MIGRATING_STOP
# remove lvnf
self.cpp.connection.send_del_lvnf(self.lvnf_id)
# look for LVAPs that points to this LVNF
self.__chains = []
for lvap in RUNTIME.lvaps.values():
for out_port in lvap.ports:
for rule in list(lvap.ports[out_port].next):
LOG.info("rule lvnf %s",rule)
v_port = lvap.ports[out_port].next[rule]
in_port = v_port.virtual_port_id
if v_port in self.ports.values():
save = (lvap, rule, out_port, in_port)
self.__chains.append(save)
del lvap.ports[0].next[rule]
def _migrating_stop_migrating_start(self):
# set new cpp
self.cpp = self.__target_cpp
# set new state
self.__state = PROCESS_MIGRATING_START
# add lvnf
self.cpp.connection.send_add_lvnf(self.image, self.lvnf_id,
self.tenant_id, self.context)
def _migrating_start_running(self):
self.__state = PROCESS_RUNNING
delta = int((time.time() - self.__migration_timer) * 1000)
LOG.info("LVNF %s migration took %sms", self.lvnf_id, delta)
LOG.info("Restoring chains")
for chain in self.__chains:
vnf = chain[0]
rule = chain[1]
out_port = chain[2]
in_port = chain[3]
LOG.info("LVAP %s port [%u] next [%s] -> %u", vnf.addr,
out_port, rule, in_port)
vnf.ports[out_port].next[rule] = self.ports[in_port]
self.__chains = []
def _none_spawning(self):
# set timer
self.__creation_timer = time.time()
# set new state
self.__state = PROCESS_SPAWNING
# send LVNF add message
self.cpp.connection.send_add_lvnf(self.image,
self.lvnf_id,
self.tenant_id)
def _none_running(self):
self.__state = PROCESS_RUNNING
@property
def cpp(self):
"""Return the CPP."""
return self.__cpp
@cpp.setter
def cpp(self, cpp):
"""Set the CPP."""
if self.state == PROCESS_RUNNING:
# save target cpp
self.__target_cpp = cpp
# move to new state
self.state = PROCESS_MIGRATING_STOP
elif self.state == PROCESS_MIGRATING_STOP:
# set cpp
self.__cpp = cpp
self.__target_cpp = None
else:
IOError("Setting CPP on invalid state: %s" % self.state)
def to_dict(self):
"""Return a JSON-serializable dictionary representing the Poll."""
return {'lvnf_id': self.lvnf_id,
'image': self.image,
'tenant_id': self.tenant_id,
'cpp': self.cpp,
'state': self.state,
'returncode': self.returncode,
'ports': self.ports}
def __eq__(self, other):
if isinstance(other, LVNF):
return self.lvnf_id == other.lvnf_id
return False
def __str__(self):
return "LVNF %s (nb_ports=%u)\n%s" % \
(self.lvnf_id, self.image.nb_ports, self.image.vnf)
| |
import unittest
import os
from jira.client import JIRA
from jira.exceptions import JIRAError
from jira.resources import Resource, cls_for_resource, Issue, Project, Role
TEST_ROOT = os.path.dirname(__file__)
TEST_ICON_PATH = os.path.join(TEST_ROOT, 'icon.png')
TEST_ATTACH_PATH = os.path.join(TEST_ROOT, '__init__.py')
OAUTH = True
CONSUMER_KEY = 'oauth-consumer'
KEY_CERT_FILE = '/home/bspeakmon/src/atlassian-oauth-examples/rsa.pem'
KEY_CERT_DATA = None
with open(KEY_CERT_FILE, 'r') as cert:
KEY_CERT_DATA = cert.read()
def get_jira_admin_auth():
if OAUTH:
return JIRA(oauth={
'access_token': 'hTxcwsbUQiFuFALf7KZHDaeAJIo3tLUK',
'access_token_secret': 'aNCLQFP3ORNU6WY7HQISbqbhf0UudDAf',
'consumer_key': CONSUMER_KEY,
'key_cert': KEY_CERT_DATA,
})
else:
return JIRA(basic_auth=('admin', 'admin'))
def get_jira_sysadmin_auth():
if OAUTH:
return JIRA(oauth={
'access_token': '4ul1ETSFo7ybbIxAxzyRal39cTrwEGFv',
'access_token_secret': 'K83jBZnjnuVRcfjBflrKyThJa0KSjSs2',
'consumer_key': CONSUMER_KEY,
'key_cert': KEY_CERT_DATA,
})
else:
return JIRA(basic_auth=('eviladmin', 'eviladmin'))
def get_jira_schlub_auth():
if OAUTH:
return JIRA(oauth={
'access_token': 'ZVDgYDyIQqJY8IFlQ446jZaURIz5ECiB',
'access_token_secret': '5WbLBybPDg1lqqyFjyXSCsCtAWTwz1eD',
'consumer_key': CONSUMER_KEY,
'key_cert': KEY_CERT_DATA,
})
else:
return JIRA(basic_auth=('fred', 'fred'))
def find_by_key(seq, key):
for seq_item in seq:
if seq_item['key'] == key:
return seq_item
def find_by_id(seq, id):
for seq_item in seq:
if seq_item.id == id:
return seq_item
class UniversalResourceTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_universal_find_existing_resource(self):
resource = self.jira.find('issue/{0}', 'BULK-1')
issue = self.jira.issue('BULK-1')
self.assertEqual(resource.self, issue.self)
self.assertEqual(resource.key, issue.key)
def test_universal_find_custom_resource(self):
resource = Resource('nope/{0}', self.jira._options, None) # don't need an actual session
self.assertEqual('http://localhost:2990/jira/rest/api/2/nope/666', resource._url(('666',)))
def test_find_invalid_resource_raises_exception(self):
with self.assertRaises(JIRAError) as cm:
self.jira.find('woopsydoodle/{0}', '666')
ex = cm.exception
self.assertEqual(ex.status_code, 404)
self.assertIsNotNone(ex.text)
self.assertEqual(ex.url, 'http://localhost:2990/jira/rest/api/2/woopsydoodle/666')
def test_verify_works_with_https(self):
self.jira = JIRA(options={'server': 'https://jira.atlassian.com'})
def test_verify_fails_without_https(self):
# we need a server that doesn't do https
self.jira = JIRA(options={'server': 'https://www.yahoo.com'})
self.assertRaises(JIRAError, self.jira.issue, 'BULK-1')
class ResourceTests(unittest.TestCase):
def setUp(self):
pass
def test_cls_for_resource(self):
self.assertEqual(cls_for_resource('https://jira.atlassian.com/rest/api/2/issue/JRA-1330'), Issue)
self.assertEqual(cls_for_resource('http://localhost:2990/jira/rest/api/2/project/BULK'), Project)
self.assertEqual(cls_for_resource('http://imaginary-jira.com/rest/api/2/project/IMG/role/10002'), Role)
self.assertEqual(cls_for_resource('http://customized-jira.com/rest/plugin-resource/4.5/json/getMyObject'), Resource)
class ApplicationPropertiesTests(unittest.TestCase):
def setUp(self):
# this user has jira-system-administrators membership
self.jira = get_jira_sysadmin_auth()
def test_application_properties(self):
props = self.jira.application_properties()
self.assertEqual(len(props), 12)
def test_application_property(self):
clone_prefix = self.jira.application_properties(key='jira.clone.prefix')
self.assertEqual(clone_prefix['value'], 'CLONE -')
def test_set_application_property(self):
prop = 'jira.clone.prefix'
self.jira.set_application_property(prop, 'TCLONE -')
self.assertEqual(self.jira.application_properties(key=prop)['value'], 'TCLONE -')
self.jira.set_application_property(prop, 'CLONE -')
self.assertEqual(self.jira.application_properties(key=prop)['value'], 'CLONE -')
def test_setting_bad_property_raises(self):
prop = 'random.nonexistent.property'
self.assertRaises(JIRAError, self.jira.set_application_property, prop, '666')
class AttachmentTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_attachment(self):
attachment = self.jira.attachment('10030')
self.assertEqual(attachment.filename, 'AdditionalPylons.jpg')
self.assertEqual(attachment.size, 110787)
def test_attachment_meta(self):
meta = self.jira.attachment_meta()
self.assertTrue(meta['enabled'])
self.assertEqual(meta['uploadLimit'], 10485760)
def test_add_attachment(self):
issue = self.jira.issue('BULK-3')
attach_count = len(issue.fields.attachment)
attachment = self.jira.add_attachment(issue, open(TEST_ATTACH_PATH))
self.assertIsNotNone(attachment)
self.assertEqual(len(self.jira.issue('BULK-3').fields.attachment), attach_count + 1)
def test_delete(self):
attach_count = len(self.jira.issue('BULK-3').fields.attachment)
attachment = self.jira.add_attachment('BULK-3', open(TEST_ATTACH_PATH))
self.assertEqual(len(self.jira.issue('BULK-3').fields.attachment), attach_count + 1)
attachment.delete()
self.assertEqual(len(self.jira.issue('BULK-3').fields.attachment), attach_count)
class ComponentTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_component(self):
component = self.jira.component('10003')
self.assertEqual(component.name, 'Bacon')
def test_create_component(self):
bulk_proj = self.jira.project('BULK')
component = self.jira.create_component('Test Component', bulk_proj, description='testing!!', leadUserName='fred',
assigneeType='PROJECT_LEAD', isAssigneeTypeValid=False)
self.assertEqual(component.name, 'Test Component')
self.assertEqual(component.description, 'testing!!')
self.assertEqual(component.lead.name, 'fred')
self.assertEqual(component.assigneeType, 'PROJECT_LEAD')
self.assertTrue(component.isAssigneeTypeValid)
component.delete()
def test_component_count_related_issues(self):
issue_count = self.jira.component_count_related_issues('10002')
self.assertEqual(issue_count, 9)
def test_update(self):
component = self.jira.create_component('To be updated', 'BULK', description='stand by!', leadUserName='admin')
component.update(name='Updated!', description='It is done.', leadUserName='fred')
self.assertEqual(component.name, 'Updated!')
self.assertEqual(component.description, 'It is done.')
self.assertEqual(component.lead.name, 'fred')
component.delete()
def test_delete(self):
component = self.jira.create_component('To be deleted', 'BULK', description='not long for this world')
id = component.id
component.delete()
self.assertRaises(JIRAError, self.jira.component, id)
class CustomFieldOptionTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_custom_field_option(self):
option = self.jira.custom_field_option('10010')
self.assertEqual(option.value, 'Mehemet')
class DashboardTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_sysadmin_auth()
def test_dashboards(self):
dashboards = self.jira.dashboards()
self.assertEqual(len(dashboards), 3)
def test_dashboards_filter(self):
dashboards = self.jira.dashboards(filter='my')
self.assertEqual(len(dashboards), 1)
self.assertEqual(dashboards[0].id, '10031')
def test_dashboards_startAt(self):
dashboards = self.jira.dashboards(startAt=2, maxResults=2)
self.assertEqual(len(dashboards), 1)
def test_dashboards_maxResults(self):
dashboards = self.jira.dashboards(maxResults=1)
self.assertEqual(len(dashboards), 1)
def test_dashboard(self):
dashboard = self.jira.dashboard('10031')
self.assertEqual(dashboard.id, '10031')
self.assertEqual(dashboard.name, 'Evil\'O\'Administrator\'s "Funny DB"')
class FieldsTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_fields(self):
fields = self.jira.fields()
self.assertEqual(len(fields), 63)
class FilterTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_filter(self):
filter = self.jira.filter('10016')
self.assertEqual(filter.name, 'Bugs')
self.assertEqual(filter.owner.name, 'admin')
def test_favourite_filters(self):
filters = self.jira.favourite_filters()
self.assertEqual(len(filters), 1)
class GroupsTest(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_groups(self):
groups = self.jira.groups()
self.assertEqual(groups['total'], 7)
def test_groups_with_query(self):
groups = self.jira.groups('jira-')
self.assertEqual(groups['total'], 4)
def test_groups_with_exclude(self):
groups = self.jira.groups('jira-', exclude='jira-system-administrators')
self.assertEqual(groups['total'], 3)
class IssueTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_issue(self):
issue = self.jira.issue('BULK-1')
self.assertEqual(issue.key, 'BULK-1')
self.assertEqual(issue.fields.summary, 'Version 2.0 Bacon issue')
def test_issue_field_limiting(self):
issue = self.jira.issue('BULK-2', fields='summary,comment')
self.assertEqual(issue.fields.summary, 'Version 1.1.1 cheese issue')
self.assertEqual(issue.fields.comment.total, 4)
self.assertFalse(hasattr(issue.fields, 'reporter'))
self.assertFalse(hasattr(issue.fields, 'progress'))
def test_issue_expandos(self):
issue = self.jira.issue('BULK-3', expand=('editmeta', 'schema'))
self.assertTrue(hasattr(issue, 'editmeta'))
self.assertTrue(hasattr(issue, 'schema'))
self.assertFalse(hasattr(issue, 'changelog'))
def test_create_issue_with_fieldargs(self):
issue = self.jira.create_issue(project={'key': 'BULK'}, summary='Test issue created',
description='blahery', issuetype={'name': 'Bug'}, customfield_10540={'key': 'XSS'})
self.assertEqual(issue.fields.summary, 'Test issue created')
self.assertEqual(issue.fields.description, 'blahery')
self.assertEqual(issue.fields.issuetype.name, 'Bug')
self.assertEqual(issue.fields.project.key, 'BULK')
self.assertEqual(issue.fields.customfield_10540.key, 'XSS')
def test_create_issue_with_fielddict(self):
fields = {
'project': {
'key': 'BULK'
},
'summary': 'Issue created from field dict',
'description': "Microphone #1, is not this a lot of fun",
'issuetype': {
'name': 'Task'
},
'customfield_10540': {
'key': 'DOC'
},
'priority': {
'name': 'Critical'
}
}
issue = self.jira.create_issue(fields=fields)
self.assertEqual(issue.fields.summary, 'Issue created from field dict')
self.assertEqual(issue.fields.description, "Microphone #1, is not this a lot of fun")
self.assertEqual(issue.fields.issuetype.name, 'Task')
self.assertEqual(issue.fields.project.key, 'BULK')
self.assertEqual(issue.fields.customfield_10540.key, 'DOC')
self.assertEqual(issue.fields.priority.name, 'Critical')
def test_create_issue_without_prefetch(self):
issue = self.jira.create_issue(prefetch=False, project={'key': 'BULK'}, summary='Test issue created',
description='blahery', issuetype={'name': 'Bug'}, customfield_10540={'key': 'XSS'})
self.assertTrue(hasattr(issue, 'self'))
self.assertFalse(hasattr(issue, 'fields'))
def test_update_with_fieldargs(self):
issue = self.jira.create_issue(project={'key': 'BULK'}, summary='Test issue for updating',
description='Will be updated shortly', issuetype={'name': 'Bug'}, customfield_10540={'key': 'XSS'})
issue.update(summary='Updated summary', description='Now updated', issuetype={'name': 'Improvement'})
self.assertEqual(issue.fields.summary, 'Updated summary')
self.assertEqual(issue.fields.description, 'Now updated')
self.assertEqual(issue.fields.issuetype.name, 'Improvement')
self.assertEqual(issue.fields.customfield_10540.key, 'XSS')
self.assertEqual(issue.fields.project.key, 'BULK')
def test_update_with_fielddict(self):
issue = self.jira.create_issue(project={'key': 'BULK'}, summary='Test issue for updating',
description='Will be updated shortly', issuetype={'name': 'Bug'}, customfield_10540={'key': 'XSS'})
fields = {
'summary': 'Issue is updated',
'description': "it sure is",
'issuetype': {
'name': 'Task'
},
'customfield_10540': {
'key': 'DOC'
},
'priority': {
'name': 'Critical'
}
}
issue.update(fields=fields)
self.assertEqual(issue.fields.summary, 'Issue is updated')
self.assertEqual(issue.fields.description, 'it sure is')
self.assertEqual(issue.fields.issuetype.name, 'Task')
self.assertEqual(issue.fields.customfield_10540.key, 'DOC')
self.assertEqual(issue.fields.priority.name, 'Critical')
def test_delete(self):
issue = self.jira.create_issue(project={'key': 'BULK'}, summary='Test issue created',
description='Not long for this world', issuetype={'name': 'Bug'}, customfield_10540={'key': 'XSS'})
key = issue.key
issue.delete()
self.assertRaises(JIRAError, self.jira.issue, key)
def test_createmeta(self):
meta = self.jira.createmeta()
self.assertEqual(len(meta['projects']), 12)
xss_proj = find_by_key(meta['projects'], 'XSS')
self.assertEqual(len(xss_proj['issuetypes']), 12)
def test_createmeta_filter_by_projectkey_and_name(self):
meta = self.jira.createmeta(projectKeys='BULK', issuetypeNames='Bug')
self.assertEqual(len(meta['projects']), 1)
self.assertEqual(len(meta['projects'][0]['issuetypes']), 1)
def test_createmeta_filter_by_projectkeys_and_name(self):
meta = self.jira.createmeta(projectKeys=('BULK', 'XSS'), issuetypeNames='Improvement')
self.assertEqual(len(meta['projects']), 2)
for project in meta['projects']:
self.assertEqual(len(project['issuetypes']), 1)
def test_createmeta_filter_by_id(self):
meta = self.jira.createmeta(projectIds=('10001', '10040'), issuetypeIds=('3', '4', '5'))
self.assertEqual(len(meta['projects']), 2)
for project in meta['projects']:
self.assertEqual(len(project['issuetypes']), 3)
def test_createmeta_expando(self):
# limit to SCR project so the call returns promptly
meta = self.jira.createmeta(projectKeys=('SCR'), expand=('projects.issuetypes.fields'))
self.assertTrue('fields' in meta['projects'][0]['issuetypes'][0])
def test_assign_issue(self):
self.assertIsNone(self.jira.assign_issue('BULK-1', 'eviladmin'))
self.assertEqual(self.jira.issue('BULK-1').fields.assignee.name, 'eviladmin')
self.assertIsNone(self.jira.assign_issue('BULK-1', 'admin'))
self.assertEqual(self.jira.issue('BULK-1').fields.assignee.name, 'admin')
def test_assign_issue_with_issue_obj(self):
issue = self.jira.issue('BULK-1')
self.assertIsNone(self.jira.assign_issue(issue, 'eviladmin'))
self.assertEqual(self.jira.issue('BULK-1').fields.assignee.name, 'eviladmin')
self.assertIsNone(self.jira.assign_issue(issue, 'admin'))
self.assertEqual(self.jira.issue('BULK-1').fields.assignee.name, 'admin')
def test_assign_to_bad_issue_raises(self):
self.assertRaises(JIRAError, self.jira.assign_issue, 'NOPE-1', 'notauser')
def test_comments(self):
comments = self.jira.comments('BULK-1')
self.assertGreaterEqual(len(comments), 29)
comments = self.jira.comments('BULK-2')
self.assertGreaterEqual(len(comments), 4)
def test_comments_with_issue_obj(self):
issue = self.jira.issue('BULK-1')
self.assertGreaterEqual(len(self.jira.comments(issue)), 29)
issue = self.jira.issue('BULK-2')
self.assertGreaterEqual(len(self.jira.comments(issue)), 4)
def test_comment(self):
comment = self.jira.comment('BULK-1', '10072')
self.assertTrue(comment.body.startswith('Mr. Bennet was so odd a mixture of quick parts'))
def test_comment_with_issue_obj(self):
issue = self.jira.issue('BULK-1')
comment = self.jira.comment(issue, '10072')
self.assertTrue(comment.body.startswith('Mr. Bennet was so odd a mixture of quick parts'))
def test_add_comment(self):
comment = self.jira.add_comment('BULK-3', 'a test comment!',
visibility={'type': 'role', 'value': 'Administrators'})
self.assertEqual(comment.body, 'a test comment!')
self.assertEqual(comment.visibility.type, 'role')
self.assertEqual(comment.visibility.value, 'Administrators')
def test_add_comment_with_issue_obj(self):
issue = self.jira.issue('BULK-3')
comment = self.jira.add_comment(issue, 'a test comment!',
visibility={'type': 'role', 'value': 'Administrators'})
self.assertEqual(comment.body, 'a test comment!')
self.assertEqual(comment.visibility.type, 'role')
self.assertEqual(comment.visibility.value, 'Administrators')
def test_update_comment(self):
comment = self.jira.add_comment('BULK-3', 'updating soon!')
comment.update(body='updated now!', visibility={'type': 'role', 'value': 'Administrators'})
self.assertEqual(comment.body, 'updated now!')
self.assertEqual(comment.visibility.type, 'role')
self.assertEqual(comment.visibility.value, 'Administrators')
def test_delete_comment(self):
comment = self.jira.add_comment('BULK-3', 'To be deleted!')
id = comment.id
comment.delete()
self.assertRaises(JIRAError, self.jira.comment, id, '')
def test_editmeta(self):
meta = self.jira.editmeta('BULK-1')
self.assertEqual(len(meta['fields']), 38)
self.assertTrue('customfield_10642' in meta['fields'])
self.assertTrue('customfield_10240' in meta['fields'])
def test_editmeta_with_issue_obj(self):
issue = self.jira.issue('BULK-1')
meta = self.jira.editmeta(issue)
self.assertEqual(len(meta['fields']), 38)
self.assertTrue('customfield_10642' in meta['fields'])
self.assertTrue('customfield_10240' in meta['fields'])
def test_remote_links(self):
links = self.jira.remote_links('QA-44')
self.assertEqual(len(links), 1)
links = self.jira.remote_links('BULK-1')
self.assertEqual(len(links), 0)
def test_remote_links_with_issue_obj(self):
issue = self.jira.issue('QA-44')
links = self.jira.remote_links(issue)
self.assertEqual(len(links), 1)
issue = self.jira.issue('BULK-1')
links = self.jira.remote_links(issue)
self.assertEqual(len(links), 0)
def test_remote_link(self):
link = self.jira.remote_link('QA-44', '10000')
self.assertEqual(link.id, 10000)
self.assertTrue(hasattr(link, 'globalId'))
self.assertTrue(hasattr(link, 'relationship'))
def test_remote_link_with_issue_obj(self):
issue = self.jira.issue('QA-44')
link = self.jira.remote_link(issue, '10000')
self.assertEqual(link.id, 10000)
self.assertTrue(hasattr(link, 'globalId'))
self.assertTrue(hasattr(link, 'relationship'))
def test_add_remote_link(self):
link = self.jira.add_remote_link('BULK-3', globalId='python-test:story.of.horse.riding',
object={'url': 'http://google.com', 'title': 'googlicious!'},
application={'name': 'far too silly', 'type': 'sketch'}, relationship='mousebending')
# creation response doesn't include full remote link info, so we fetch it again using the new internal ID
link = self.jira.remote_link('BULK-3', link.id)
self.assertEqual(link.application.name, 'far too silly')
self.assertEqual(link.application.type, 'sketch')
self.assertEqual(link.object.url, 'http://google.com')
self.assertEqual(link.object.title, 'googlicious!')
self.assertEqual(link.relationship, 'mousebending')
self.assertEqual(link.globalId, 'python-test:story.of.horse.riding')
def test_add_remote_link_with_issue_obj(self):
issue = self.jira.issue('BULK-3')
link = self.jira.add_remote_link(issue, globalId='python-test:story.of.horse.riding',
object={'url': 'http://google.com', 'title': 'googlicious!'},
application={'name': 'far too silly', 'type': 'sketch'}, relationship='mousebending')
# creation response doesn't include full remote link info, so we fetch it again using the new internal ID
link = self.jira.remote_link(issue, link.id)
self.assertEqual(link.application.name, 'far too silly')
self.assertEqual(link.application.type, 'sketch')
self.assertEqual(link.object.url, 'http://google.com')
self.assertEqual(link.object.title, 'googlicious!')
self.assertEqual(link.relationship, 'mousebending')
self.assertEqual(link.globalId, 'python-test:story.of.horse.riding')
def test_update_remote_link(self):
link = self.jira.add_remote_link('BULK-3', globalId='python-test:story.of.horse.riding',
object={'url': 'http://google.com', 'title': 'googlicious!'},
application={'name': 'far too silly', 'type': 'sketch'}, relationship='mousebending')
# creation response doesn't include full remote link info, so we fetch it again using the new internal ID
link = self.jira.remote_link('BULK-3', link.id)
link.update(object={'url': 'http://yahoo.com', 'title': 'yahooery'}, globalId='python-test:updated.id',
relationship='cheesing')
self.assertEqual(link.globalId, 'python-test:updated.id')
self.assertEqual(link.relationship, 'cheesing')
self.assertEqual(link.object.url, 'http://yahoo.com')
self.assertEqual(link.object.title, 'yahooery')
link.delete()
def test_delete_remove_link(self):
link = self.jira.add_remote_link('BULK-3', globalId='python-test:story.of.horse.riding',
object={'url': 'http://google.com', 'title': 'googlicious!'},
application={'name': 'far too silly', 'type': 'sketch'}, relationship='mousebending')
id = link.id
link.delete()
self.assertRaises(JIRAError, self.jira.remote_link, 'BULK-3', id)
def test_transitions(self):
transitions = self.jira.transitions('BULK-2')
self.assertEqual(len(transitions), 2)
def test_transitions_with_issue_obj(self):
issue = self.jira.issue('BULK-2')
transitions = self.jira.transitions(issue)
self.assertEqual(len(transitions), 2)
def test_transition(self):
transition = self.jira.transitions('BULK-2', '701')
self.assertEqual(transition[0]['name'], 'Close Issue')
def test_transition_expand(self):
transition = self.jira.transitions('BULK-2', '701', expand=('transitions.fields'))
self.assertTrue('fields' in transition[0])
def test_transition_issue_with_fieldargs(self):
issue = self.jira.create_issue(project={'key': 'BULK'}, summary='Test issue for transition created',
description='blahery', issuetype={'name': 'Bug'}, customfield_10540={'key': 'XSS'})
self.jira.transition_issue(issue.key, '2', assignee={'name': 'fred'})
issue = self.jira.issue(issue.key)
self.assertEqual(issue.fields.assignee.name, 'fred')
self.assertEqual(issue.fields.status.id, '6') # issue now 'Closed'
def test_transition_issue_obj_with_fieldargs(self):
issue = self.jira.create_issue(project={'key': 'BULK'}, summary='Test issue for transition created',
description='blahery', issuetype={'name': 'Bug'}, customfield_10540={'key': 'XSS'})
self.jira.transition_issue(issue, '2', assignee={'name': 'fred'})
issue = self.jira.issue(issue.key)
self.assertEqual(issue.fields.assignee.name, 'fred')
self.assertEqual(issue.fields.status.id, '6') # issue now 'Closed'
def test_transition_issue_with_fielddict(self):
issue = self.jira.create_issue(project={'key': 'BULK'}, summary='Test issue for transition created',
description='blahery', issuetype={'name': 'Bug'}, customfield_10540={'key': 'XSS'})
fields = {
'assignee': {
'name': 'fred'
}
}
self.jira.transition_issue(issue.key, '5', fields=fields)
issue = self.jira.issue(issue.key)
self.assertEqual(issue.fields.assignee.name, 'fred')
self.assertEqual(issue.fields.status.id, '5') # issue now 'Resolved'
@unittest.skip('test data doesn\'t support voting')
def test_votes(self):
votes = self.jira.votes('BULK-1')
self.assertEqual(votes.votes, 5)
@unittest.skip('test data doesn\'t support voting')
def test_votes_with_issue_obj(self):
issue = self.jira.issue('BULK-1')
votes = self.jira.votes(issue)
self.assertEqual(votes.votes, 5)
@unittest.skip('test data doesn\'t support voting')
def test_add_vote(self):
votes = self.jira.votes('QA-44')
self.assertEqual(votes.votes, 0)
self.jira.add_vote('QA-44')
votes = self.jira.votes('QA-44')
self.assertEqual(votes.votes, 1)
@unittest.skip('test data doesn\'t support voting')
def test_add_vote_with_issue_obj(self):
issue = self.jira.issue('QA-44')
votes = self.jira.votes(issue)
self.assertEqual(votes.votes, 0)
self.jira.add_vote(issue)
votes = self.jira.votes(issue)
self.assertEqual(votes.votes, 1)
@unittest.skip('test data doesn\'t support voting')
def test_remove_vote(self):
votes = self.jira.votes('QA-44')
self.assertEqual(votes.votes, 1)
self.jira.remove_vote('QA-44')
votes = self.jira.votes('QA-44')
self.assertEqual(votes.votes, 0)
@unittest.skip('test data doesn\'t support voting')
def test_remove_vote(self):
issue = self.jira.issue('QA-44')
votes = self.jira.votes(issue)
self.assertEqual(votes.votes, 1)
self.jira.remove_vote(issue)
votes = self.jira.votes(issue)
self.assertEqual(votes.votes, 0)
@unittest.skip('test data doesn\'t support watching')
def test_watchers(self):
watchers = self.jira.watchers('BULK-1')
self.assertEqual(watchers.watchCount, 18)
@unittest.skip('test data doesn\'t support watching')
def test_watchers_with_issue_obj(self):
issue = self.jira.issue('BULK-1')
watchers = self.jira.watchers(issue)
self.assertEqual(watchers.watchCount, 18)
@unittest.skip('test data doesn\'t support watching')
def test_add_watcher(self):
self.assertEqual(self.jira.watchers('QA-44').watchCount, 0)
self.jira.add_watcher('QA-44', 'fred')
self.assertEqual(self.jira.watchers('QA-44').watchCount, 1)
@unittest.skip('test data doesn\'t support watching')
def test_remove_watcher(self):
self.assertEqual(self.jira.watchers('QA-44').watchCount, 1)
self.jira.remove_watcher('QA-44', 'fred')
self.assertEqual(self.jira.watchers('QA-44').watchCount, 0)
@unittest.skip('test data doesn\'t support watching')
def test_add_watcher_with_issue_obj(self):
issue = self.jira.issue('QA-44')
self.assertEqual(self.jira.watchers(issue).watchCount, 0)
self.jira.add_watcher(issue, 'fred')
self.assertEqual(self.jira.watchers(issue).watchCount, 1)
@unittest.skip('test data doesn\'t support watching')
def test_remove_watcher_with_issue_obj(self):
issue = self.jira.issue('QA-44')
self.assertEqual(self.jira.watchers(issue).watchCount, 1)
self.jira.remove_watcher(issue, 'fred')
self.assertEqual(self.jira.watchers(issue).watchCount, 0)
def test_worklogs(self):
worklogs = self.jira.worklogs('BULK-1')
self.assertEqual(len(worklogs), 6)
def test_worklogs_with_issue_obj(self):
issue = self.jira.issue('BULK-1')
worklogs = self.jira.worklogs(issue)
self.assertEqual(len(worklogs), 6)
def test_worklog(self):
worklog = self.jira.worklog('BULK-1', '10045')
self.assertEqual(worklog.author.name, 'admin')
self.assertEqual(worklog.timeSpent, '4d')
def test_worklog_with_issue_obj(self):
issue = self.jira.issue('BULK-1')
worklog = self.jira.worklog(issue, '10045')
self.assertEqual(worklog.author.name, 'admin')
self.assertEqual(worklog.timeSpent, '4d')
def test_add_worklog(self):
worklog_count = len(self.jira.worklogs('BULK-2'))
worklog = self.jira.add_worklog('BULK-2', '2h')
self.assertIsNotNone(worklog)
self.assertEqual(len(self.jira.worklogs('BULK-2')), worklog_count + 1)
worklog.delete()
def test_add_worklog_with_issue_obj(self):
issue = self.jira.issue('BULK-2')
worklog_count = len(self.jira.worklogs(issue))
worklog = self.jira.add_worklog(issue, '2h')
self.assertIsNotNone(worklog)
self.assertEqual(len(self.jira.worklogs(issue)), worklog_count + 1)
worklog.delete()
def test_update_worklog(self):
worklog = self.jira.add_worklog('BULK-2', '3h')
worklog.update(comment='Updated comment!', timeSpent='1h')
self.assertEqual(worklog.comment, 'Updated comment!')
self.assertEqual(worklog.timeSpent, '1h')
worklog.delete()
def test_delete_worklog(self):
issue = self.jira.issue('BULK-2', fields='worklog,timetracking')
rem_estimate = issue.fields.timetracking.remainingEstimate
worklog = self.jira.add_worklog('BULK-2', '4h')
worklog.delete()
issue = self.jira.issue('BULK-2', fields='worklog,timetracking')
self.assertEqual(issue.fields.timetracking.remainingEstimate, rem_estimate)
class IssueLinkTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_issue_link(self):
link = self.jira.issue_link('10220')
self.assertEqual(link.id, '10220')
self.assertEqual(link.inwardIssue.id, '10924')
def test_create_issue_link(self):
self.jira.create_issue_link('Duplicate', 'BULK-1', 'BULK-2',
comment={'body': 'Link comment!', 'visibility': {'type': 'role', 'value': 'Administrators'}})
def test_create_issue_link_with_issue_objs(self):
inwardIssue = self.jira.issue('BULK-1')
outwardIssue = self.jira.issue('BULK-2')
self.jira.create_issue_link('Duplicate', inwardIssue, outwardIssue,
comment={'body': 'Link comment!', 'visibility': {'type': 'role', 'value': 'Administrators'}})
@unittest.skip("Creating an issue link doesn't return its ID, so can't easily test delete")
def test_delete_issue_link(self):
pass
class IssueLinkTypeTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_issue_link_types(self):
link_types = self.jira.issue_link_types()
self.assertEqual(len(link_types), 4)
duplicate = find_by_id(link_types, '10001')
self.assertEqual(duplicate.name, 'Duplicate')
def test_issue_link_type(self):
link_type = self.jira.issue_link_type('10002')
self.assertEqual(link_type.id, '10002')
self.assertEqual(link_type.name, 'Very long one')
class IssueTypesTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_issue_types(self):
types = self.jira.issue_types()
self.assertEqual(len(types), 12)
unq_issues = find_by_id(types, '6')
self.assertEqual(unq_issues.name, 'UNQ-ISSUES')
def test_issue_type(self):
type = self.jira.issue_type('4')
self.assertEqual(type.id, '4')
self.assertEqual(type.name, 'Improvement')
class MyPermissionsTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_schlub_auth()
def test_my_permissions(self):
perms = self.jira.my_permissions()
self.assertEqual(len(perms['permissions']), 38)
def test_my_permissions_by_project(self):
perms = self.jira.my_permissions(projectKey='BULK')
self.assertEqual(len(perms['permissions']), 38)
perms = self.jira.my_permissions(projectId='10031')
self.assertEqual(len(perms['permissions']), 38)
def test_my_permissions_by_issue(self):
perms = self.jira.my_permissions(issueKey='BLUK-7')
self.assertEqual(len(perms['permissions']), 38)
perms = self.jira.my_permissions(issueId='11021')
self.assertEqual(len(perms['permissions']), 38)
class PrioritiesTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_priorities(self):
priorities = self.jira.priorities()
self.assertEqual(len(priorities), 5)
def test_priority(self):
priority = self.jira.priority('2')
self.assertEqual(priority.id, '2')
self.assertEqual(priority.name, 'Critical')
class ProjectTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_projects(self):
projects = self.jira.projects()
self.assertEqual(len(projects), 12)
def test_project(self):
project = self.jira.project('BOOK')
self.assertEqual(project.id, '10540')
self.assertEqual(project.name, 'Book Request')
def test_project_avatars(self):
avatars = self.jira.project_avatars('BULK')
self.assertEqual(len(avatars['custom']), 1)
self.assertEqual(len(avatars['system']), 12)
def test_project_avatars_with_project_obj(self):
project = self.jira.project('BULK')
avatars = self.jira.project_avatars(project)
self.assertEqual(len(avatars['custom']), 1)
self.assertEqual(len(avatars['system']), 12)
def test_create_project_avatar(self):
# Tests the end-to-end project avatar creation process: upload as temporary, confirm after cropping,
# and selection.
project = self.jira.project('XSS')
size = os.path.getsize(TEST_ICON_PATH)
filename = os.path.basename(TEST_ICON_PATH)
with open(TEST_ICON_PATH, "rb") as icon:
props = self.jira.create_temp_project_avatar(project, filename, size, icon.read())
self.assertIn('cropperOffsetX', props)
self.assertIn('cropperOffsetY', props)
self.assertIn('cropperWidth', props)
self.assertTrue(props['needsCropping'])
props['needsCropping'] = False
avatar_props = self.jira.confirm_project_avatar(project, props)
self.assertIn('id', avatar_props)
self.jira.set_project_avatar('XSS', avatar_props['id'])
def test_delete_project_avatar(self):
size = os.path.getsize(TEST_ICON_PATH)
filename = os.path.basename(TEST_ICON_PATH)
with open(TEST_ICON_PATH, "rb") as icon:
props = self.jira.create_temp_project_avatar('XSS', filename, size, icon.read(), auto_confirm=True)
self.jira.delete_project_avatar('XSS', props['id'])
def test_delete_project_avatar_with_project_obj(self):
project = self.jira.project('XSS')
size = os.path.getsize(TEST_ICON_PATH)
filename = os.path.basename(TEST_ICON_PATH)
with open(TEST_ICON_PATH, "rb") as icon:
props = self.jira.create_temp_project_avatar(project, filename, size, icon.read(), auto_confirm=True)
self.jira.delete_project_avatar(project, props['id'])
def test_set_project_avatar(self):
def find_selected_avatar(avatars):
for avatar in avatars['system']:
if avatar['isSelected']:
return avatar
else:
raise
self.jira.set_project_avatar('XSS', '10000')
avatars = self.jira.project_avatars('XSS')
self.assertEqual(find_selected_avatar(avatars)['id'], '10000')
project = self.jira.project('XSS')
self.jira.set_project_avatar(project, '10001')
avatars = self.jira.project_avatars(project)
self.assertEqual(find_selected_avatar(avatars)['id'], '10001')
def test_project_components(self):
components = self.jira.project_components('BULK')
self.assertGreaterEqual(len(components), 2)
bacon = find_by_id(components, '10003')
self.assertEqual(bacon.id, '10003')
self.assertEqual(bacon.name, 'Bacon')
def test_project_components_with_project_obj(self):
project = self.jira.project('BULK')
components = self.jira.project_components(project)
self.assertGreaterEqual(len(components), 2)
bacon = find_by_id(components, '10003')
self.assertEqual(bacon.id, '10003')
self.assertEqual(bacon.name, 'Bacon')
def test_project_versions(self):
versions = self.jira.project_versions('BULK')
self.assertGreaterEqual(len(versions), 6)
love = find_by_id(versions, '10012')
self.assertEqual(love.id, '10012')
self.assertEqual(love.name, 'I love versions')
def test_project_versions_with_project_obj(self):
project = self.jira.project('BULK')
versions = self.jira.project_versions(project)
self.assertGreaterEqual(len(versions), 6)
love = find_by_id(versions, '10012')
self.assertEqual(love.id, '10012')
self.assertEqual(love.name, 'I love versions')
def test_project_roles(self):
roles = self.jira.project_roles('XSS')
self.assertEqual(len(roles), 4)
self.assertIn('Users', roles)
def test_project_roles_with_project_obj(self):
project = self.jira.project('XSS')
roles = self.jira.project_roles(project)
self.assertEqual(len(roles), 4)
self.assertIn('Users', roles)
def test_project_role(self):
role = self.jira.project_role('XSS', '10010')
self.assertEqual(role.id, 10010)
self.assertEqual(role.name, 'Doco Team')
def test_project_role_with_project_obj(self):
project = self.jira.project('XSS')
role = self.jira.project_role(project, '10010')
self.assertEqual(role.id, 10010)
self.assertEqual(role.name, 'Doco Team')
def test_update_project_role(self):
role = self.jira.project_role('XSS', '10010')
role.update(users='fred', groups=['jira-developers', 'jira-users'])
self.assertEqual(role.actors[0].name, 'fred')
class ResolutionTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_resolutions(self):
resolutions = self.jira.resolutions()
self.assertEqual(len(resolutions), 5)
def test_resolution(self):
resolution = self.jira.resolution('2')
self.assertEqual(resolution.id, '2')
self.assertEqual(resolution.name, 'Won\'t Fix')
class SearchTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_search_issues(self):
issues = self.jira.search_issues('project=BULK')
self.assertEqual(len(issues), 50) # default maxResults
for issue in issues:
self.assertTrue(issue.key.startswith('BULK'))
def test_search_issues_maxResults(self):
issues = self.jira.search_issues('project=XSS', maxResults=10)
self.assertEqual(len(issues), 10)
def test_search_issues_startAt(self):
issues = self.jira.search_issues('project=BULK', startAt=90, maxResults=500)
self.assertGreaterEqual(len(issues), 12) # all but 12 issues in BULK
def test_search_issues_field_limiting(self):
issues = self.jira.search_issues('key=BULK-1', fields='summary,comment')
self.assertTrue(hasattr(issues[0].fields, 'summary'))
self.assertTrue(hasattr(issues[0].fields, 'comment'))
self.assertFalse(hasattr(issues[0].fields, 'reporter'))
self.assertFalse(hasattr(issues[0].fields, 'progress'))
@unittest.skip('Skipping until I know how to handle the expandos')
def test_search_issues_expandos(self):
issues = self.jira.search_issues('key=BULK-1', expand=('names'))
self.assertTrue(hasattr(issues[0], 'names'))
self.assertFalse(hasattr(issues[0], 'schema'))
class SecurityLevelTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_security_level(self):
sec_level = self.jira.security_level('10001')
self.assertEqual(sec_level.id, '10001')
self.assertEqual(sec_level.name, 'eee')
class ServerInfoTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_server_info(self):
server_info = self.jira.server_info()
self.assertIn('baseUrl', server_info)
self.assertIn('version', server_info)
class StatusTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_statuses(self):
stati = self.jira.statuses()
self.assertEqual(len(stati), 20)
def test_status(self):
status = self.jira.status('10004')
self.assertEqual(status.id, '10004')
self.assertEqual(status.name, '5555')
class UserTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_user(self):
user = self.jira.user('fred')
self.assertEqual(user.name, 'fred')
self.assertEqual(user.emailAddress, 'fred@example.com')
def test_search_assignable_users_for_projects(self):
users = self.jira.search_assignable_users_for_projects('fred', 'BULK,XSS')
self.assertEqual(len(users), 3)
usernames = map(lambda user: user.name, users)
self.assertIn('fred', usernames)
self.assertIn('fred2', usernames)
self.assertIn('fred&george', usernames)
def test_search_assignable_users_for_projects_maxResults(self):
users = self.jira.search_assignable_users_for_projects('fred', 'BULK,XSS', maxResults=1)
self.assertEqual(len(users), 1)
def test_search_assignable_users_for_projects_startAt(self):
users = self.jira.search_assignable_users_for_projects('fred', 'BULK,XSS', startAt=1)
self.assertEqual(len(users), 2)
def test_search_assignable_users_for_issues_by_project(self):
users = self.jira.search_assignable_users_for_issues('b', project='DMN')
self.assertEqual(len(users), 2)
usernames = map(lambda user: user.name, users)
self.assertIn('admin', usernames)
self.assertIn('aaa', usernames)
def test_search_assignable_users_for_issues_by_project_maxResults(self):
users = self.jira.search_assignable_users_for_issues('b', project='DMN', maxResults=1)
self.assertEqual(len(users), 1)
def test_search_assignable_users_for_issues_by_project_startAt(self):
users = self.jira.search_assignable_users_for_issues('b', project='DMN', startAt=1)
self.assertEqual(len(users), 1)
def test_search_assignable_users_for_issues_by_issue(self):
users = self.jira.search_assignable_users_for_issues('b', issueKey='BULK-1')
self.assertEqual(len(users), 4)
usernames = map(lambda user: user.name, users)
self.assertIn('admin', usernames)
self.assertIn('aaa', usernames)
self.assertIn('hamish', usernames)
self.assertIn('veenu', usernames)
def test_search_assignable_users_for_issues_by_issue_maxResults(self):
users = self.jira.search_assignable_users_for_issues('b', issueKey='BULK-1', maxResults=2)
self.assertEqual(len(users), 2)
def test_search_assignable_users_for_issues_by_issue_startAt(self):
users = self.jira.search_assignable_users_for_issues('b', issueKey='BULK-1', startAt=2)
self.assertEqual(len(users), 2)
def test_user_avatars(self):
avatars = self.jira.user_avatars('fred')
self.assertEqual(len(avatars['system']), 24)
self.assertEqual(len(avatars['custom']), 0)
def test_create_user_avatar(self):
# Tests the end-to-end user avatar creation process: upload as temporary, confirm after cropping,
# and selection.
size = os.path.getsize(TEST_ICON_PATH)
filename = os.path.basename(TEST_ICON_PATH)
with open(TEST_ICON_PATH, "rb") as icon:
props = self.jira.create_temp_user_avatar('admin', filename, size, icon.read())
self.assertIn('cropperOffsetX', props)
self.assertIn('cropperOffsetY', props)
self.assertIn('cropperWidth', props)
self.assertTrue(props['needsCropping'])
props['needsCropping'] = False
avatar_props = self.jira.confirm_user_avatar('admin', props)
self.assertIn('id', avatar_props)
self.assertEqual(avatar_props['owner'], 'admin')
self.jira.set_user_avatar('admin', avatar_props['id'])
def test_set_user_avatar(self):
def find_selected_avatar(avatars):
for avatar in avatars['system']:
if avatar['isSelected']:
return avatar
else:
raise
self.jira.set_user_avatar('fred', '10070')
avatars = self.jira.user_avatars('fred')
self.assertEqual(find_selected_avatar(avatars)['id'], '10070')
self.jira.set_user_avatar('fred', '10071')
avatars = self.jira.user_avatars('fred')
self.assertEqual(find_selected_avatar(avatars)['id'], '10071')
def test_delete_user_avatar(self):
size = os.path.getsize(TEST_ICON_PATH)
filename = os.path.basename(TEST_ICON_PATH)
with open(TEST_ICON_PATH, "rb") as icon:
props = self.jira.create_temp_user_avatar('admin', filename, size, icon.read(), auto_confirm=True)
self.jira.delete_user_avatar('admin', props['id'])
def test_search_users(self):
users = self.jira.search_users('f')
self.assertEqual(len(users), 3)
usernames = map(lambda user: user.name, users)
self.assertIn('fred&george', usernames)
self.assertIn('fred', usernames)
self.assertIn('fred2', usernames)
def test_search_users_maxResults(self):
users = self.jira.search_users('f', maxResults=2)
self.assertEqual(len(users), 2)
def test_search_users_startAt(self):
users = self.jira.search_users('f', startAt=2)
self.assertEqual(len(users), 1)
def test_search_allowed_users_for_issue_by_project(self):
users = self.jira.search_allowed_users_for_issue('w', projectKey='EVL')
self.assertEqual(len(users), 5)
def test_search_allowed_users_for_issue_by_issue(self):
users = self.jira.search_allowed_users_for_issue('b', issueKey='BULK-1')
self.assertEqual(len(users), 4)
def test_search_allowed_users_for_issue_maxResults(self):
users = self.jira.search_allowed_users_for_issue('w', projectKey='EVL', maxResults=2)
self.assertEqual(len(users), 2)
def test_search_allowed_users_for_issue_startAt(self):
users = self.jira.search_allowed_users_for_issue('w', projectKey='EVL', startAt=4)
self.assertEqual(len(users), 1)
class VersionTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_create_version(self):
version = self.jira.create_version('new version 1', 'BULK', releaseDate='2013-03-11',
description='test version!')
self.assertEqual(version.name, 'new version 1')
self.assertEqual(version.description, 'test version!')
self.assertEqual(version.releaseDate, '2013-03-11')
version.delete()
def test_create_version_with_project_obj(self):
project = self.jira.project('BULK')
version = self.jira.create_version('new version 1', project, releaseDate='2013-03-11',
description='test version!')
self.assertEqual(version.name, 'new version 1')
self.assertEqual(version.description, 'test version!')
self.assertEqual(version.releaseDate, '2013-03-11')
version.delete()
def test_update(self):
version = self.jira.create_version('update version 1', 'BULK', releaseDate='2013-03-11',
description='to be updated!')
version.update(name='updated version name', description='updated!')
self.assertEqual(version.name, 'updated version name')
self.assertEqual(version.description, 'updated!')
version.delete()
def test_delete(self):
version = self.jira.create_version('To be deleted', 'BULK', releaseDate='2013-03-11',
description='not long for this world')
id = version.id
version.delete()
self.assertRaises(JIRAError, self.jira.version, id)
def test_move_version(self):
self.jira.move_version('10004', after=self.jira._get_url('version/10011'))
self.jira.move_version('10004', position='Later')
# trying to move a version in a different project should fail
self.assertRaises(JIRAError, self.jira.move_version, '10003', self.jira._get_url('version/10011'))
def test_version(self):
version = self.jira.version('10003')
self.assertEqual(version.id, '10003')
self.assertEqual(version.name, '2.0')
@unittest.skip('Versions don\'t seem to need expandos')
def test_version_expandos(self):
pass
def test_version_count_related_issues(self):
counts = self.jira.version_count_related_issues('10003')
self.assertEqual(counts['issuesFixedCount'], 1)
self.assertEqual(counts['issuesAffectedCount'], 1)
def test_version_count_unresolved_issues(self):
self.assertEqual(self.jira.version_count_unresolved_issues('10004'), 4)
class SessionTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_session(self):
user = self.jira.session()
self.assertEqual(user.name, 'admin')
def test_session_with_no_logged_in_user_raises(self):
anon_jira = JIRA()
self.assertRaises(JIRAError, anon_jira.session)
@unittest.expectedFailure
def test_kill_session(self):
self.jira.kill_session()
self.jira.session()
class WebsudoTests(unittest.TestCase):
def setUp(self):
self.jira = get_jira_admin_auth()
def test_kill_websudo(self):
self.jira.kill_websudo()
def test_kill_websudo_without_login_raises(self):
anon_jira = JIRA()
self.assertRaises(JIRAError, anon_jira.kill_websudo)
| |
# -*- coding: utf-8 -*-
"""
tests.cache
~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import random
from werkzeug.contrib import cache
try:
import redis
except ImportError:
redis = None
try:
import pylibmc as memcache
except ImportError:
try:
from google.appengine.api import memcache
except ImportError:
try:
import memcache
except ImportError:
memcache = None
class CacheTests(object):
_can_use_fast_sleep = True
@pytest.fixture
def make_cache(self):
'''Return a cache class or factory.'''
raise NotImplementedError()
@pytest.fixture
def fast_sleep(self, monkeypatch):
if self._can_use_fast_sleep:
def sleep(delta):
orig_time = cache.time
monkeypatch.setattr(cache, 'time', lambda: orig_time() + delta)
return sleep
else:
import time
return time.sleep
@pytest.fixture
def c(self, make_cache):
'''Return a cache instance.'''
return make_cache()
def test_generic_get_dict(self, c):
assert c.set('a', 'a')
assert c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_generic_set_get(self, c):
for i in range(3):
assert c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i, result
def test_generic_get_set(self, c):
assert c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_generic_get_many(self, c):
assert c.set('foo', ['bar'])
assert c.set('spam', 'eggs')
assert list(c.get_many('foo', 'spam')) == [['bar'], 'eggs']
def test_generic_set_many(self, c):
assert c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_generic_expire(self, c, fast_sleep):
assert c.set('foo', 'bar', 1)
fast_sleep(5)
assert c.get('foo') is None
def test_generic_add(self, c):
# sanity check that add() works like set()
assert c.add('foo', 'bar')
assert c.get('foo') == 'bar'
assert not c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_generic_delete(self, c):
assert c.add('foo', 'bar')
assert c.get('foo') == 'bar'
assert c.delete('foo')
assert c.get('foo') is None
def test_generic_delete_many(self, c):
assert c.add('foo', 'bar')
assert c.add('spam', 'eggs')
assert c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_generic_inc_dec(self, c):
assert c.set('foo', 1)
assert c.inc('foo') == c.get('foo') == 2
assert c.dec('foo') == c.get('foo') == 1
assert c.delete('foo')
def test_generic_true_false(self, c):
assert c.set('foo', True)
assert c.get('foo') in (True, 1)
assert c.set('bar', False)
assert c.get('bar') in (False, 0)
def test_generic_no_timeout(self, c, fast_sleep):
# Timeouts of zero should cause the cache to never expire
c.set('foo', 'bar', 0)
fast_sleep(random.randint(1, 5))
assert c.get('foo') == 'bar'
def test_generic_timeout(self, c, fast_sleep):
# Check that cache expires when the timeout is reached
timeout = random.randint(1, 5)
c.set('foo', 'bar', timeout)
assert c.get('foo') == 'bar'
# sleep a bit longer than timeout to ensure there are no
# race conditions
fast_sleep(timeout + 5)
assert c.get('foo') is None
def test_generic_has(self, c):
assert c.has('foo') in (False, 0)
assert c.has('spam') in (False, 0)
assert c.set('foo', 'bar')
assert c.has('foo') in (True, 1)
assert c.has('spam') in (False, 0)
c.delete('foo')
assert c.has('foo') in (False, 0)
assert c.has('spam') in (False, 0)
class TestSimpleCache(CacheTests):
@pytest.fixture
def make_cache(self):
return cache.SimpleCache
def test_purge(self):
c = cache.SimpleCache(threshold=2)
c.set('a', 'a')
c.set('b', 'b')
c.set('c', 'c')
c.set('d', 'd')
# Cache purges old items *before* it sets new ones.
assert len(c._cache) == 3
class TestFileSystemCache(CacheTests):
@pytest.fixture
def make_cache(self, tmpdir):
return lambda **kw: cache.FileSystemCache(cache_dir=str(tmpdir), **kw)
def test_filesystemcache_prune(self, make_cache):
THRESHOLD = 13
c = make_cache(threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
assert c.set(str(i), i)
cache_files = os.listdir(c._path)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self, c):
assert c.set('foo', 'bar')
cache_files = os.listdir(c._path)
assert len(cache_files) == 1
assert c.clear()
cache_files = os.listdir(c._path)
assert len(cache_files) == 0
# Don't use pytest marker
# https://bitbucket.org/hpk42/pytest/issue/568
if redis is not None:
class TestRedisCache(CacheTests):
_can_use_fast_sleep = False
@pytest.fixture(params=[
([], dict()),
([redis.Redis()], dict()),
([redis.StrictRedis()], dict())
])
def make_cache(self, xprocess, request):
def preparefunc(cwd):
return 'server is now ready', ['redis-server']
xprocess.ensure('redis_server', preparefunc)
args, kwargs = request.param
c = cache.RedisCache(*args, key_prefix='werkzeug-test-case:',
**kwargs)
request.addfinalizer(c.clear)
return lambda: c
def test_compat(self, c):
assert c._client.set(c.key_prefix + 'foo', 'Awesome')
assert c.get('foo') == b'Awesome'
assert c._client.set(c.key_prefix + 'foo', '42')
assert c.get('foo') == 42
# Don't use pytest marker
# https://bitbucket.org/hpk42/pytest/issue/568
if memcache is not None:
class TestMemcachedCache(CacheTests):
_can_use_fast_sleep = False
@pytest.fixture
def make_cache(self, xprocess, request):
def preparefunc(cwd):
return '', ['memcached']
xprocess.ensure('memcached', preparefunc)
c = cache.MemcachedCache(key_prefix='werkzeug-test-case:')
request.addfinalizer(c.clear)
return lambda: c
def test_compat(self, c):
assert c._client.set(c.key_prefix + 'foo', 'bar')
assert c.get('foo') == 'bar'
def test_huge_timeouts(self, c):
# Timeouts greater than epoch are interpreted as POSIX timestamps
# (i.e. not relative to now, but relative to epoch)
import random
epoch = 2592000
timeout = epoch + random.random() * 100
c.set('foo', 'bar', timeout)
assert c.get('foo') == 'bar'
def _running_in_uwsgi():
try:
import uwsgi # NOQA
except ImportError:
return False
else:
return True
@pytest.mark.skipif(not _running_in_uwsgi(),
reason="uWSGI module can't be imported outside of uWSGI")
class TestUWSGICache(CacheTests):
_can_use_fast_sleep = False
@pytest.fixture
def make_cache(self, xprocess, request):
c = cache.UWSGICache(cache='werkzeugtest')
request.addfinalizer(c.clear)
return lambda: c
| |
from __future__ import print_function
import os
import sys
import argparse
import requests
import random
import json
from typtop.dbaccess import (
UserTypoDB,
call_check, is_user,
get_time, get_machine_id
)
from typtop.config import (
NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN,
WARM_UP_CACHE, VERSION, DISTRO, BINDIR, first_msg,
LOG_DIR, DB_NAME, TEST,
SEC_DB_PATH)
from typtop.validate_parent import is_valid_parent
import subprocess
USER = ""
ALLOW_TYPO_LOGIN = True
# To deal with python3 mess
def _raw_input(prompt):
if sys.version_info >= (3,):
result = input(prompt)
else:
result = raw_input(prompt)
return result
class AbortSettings(RuntimeError):
pass
def _get_login_user():
# gets the username of the logging user
pp = subprocess.Popen('who', stdout=subprocess.PIPE)
output = pp.stdout.read()
first_line = output.splitlines()[0]
user = first_line.split()[0]
return user
def _get_username():
# trying to go over the problem of
if USER:
print("Designated user: {}".format(USER))
return USER
uid = os.getuid()
is_root = uid == 0
user = _get_login_user()
if is_root:
r = _raw_input("Setting will be done for login user: {}.\n"
"Please confirm. (Yn) ".format(user))
abort = r and r.lower() == "n"
if abort:
raise AbortSettings()
else:
print("Designated user: {}".format(user))
return user
def _get_typoDB():
user = _get_username()
try:
typoDB = UserTypoDB(user)
except Exception as e:
print(
"It seems you have not initialized the db. Try running"\
" \"sudo {} --init\" to initialize the db.\nThe error "\
"I ran into is the following:\n{}"
.format(sys.argv[0], e)
)
return None
if not typoDB.is_typtop_init():
raise Exception("{}:{} not initiated".format(
str(typoDB),
typoDB.get_db_path())
)
return typoDB
def root_only_operation():
if os.getuid() != 0:
print("ERROR!! You need root privilege to run this operation")
raise AbortSettings
def call_update():
cmd = """export PIP_FORMAT=columns;
pip list --outdated|grep typtop;
if [ "$?" = "0" ]; then
pip -q uninstall --yes typtop zxcvbn
pip install -U --ignore-installed typtop && typtops.py --init
else
echo "Already up-to-date! No need to update."
fi
"""
os.system(cmd)
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
CERT_FILE = os.path.join(THIS_FOLDER, 'typtopserver.crt')
def call_send_logs(args):
user = args.send_log[0]
users = [user]
force = True if (len(args.send_log) > 1 and
args.send_log[1] == 'force') \
else False
if user == 'all': # run for all users
users = [
d for d in os.listdir(SEC_DB_PATH)
if os.path.isdir(os.path.join(SEC_DB_PATH, d))\
and is_user(d)
]
for user in users:
typo_db = UserTypoDB(user)
send_logs(typo_db, force)
def send_logs(typo_db, force=False):
need_to_send, iter_data = typo_db.get_last_unsent_logs_iter(force)
logger = typo_db.logger
if not need_to_send:
logger.info("No need to send logs now.")
return
list_of_logs = list(iter_data)
install_id = str(typo_db.get_installation_id())
dbdata = json.dumps(list_of_logs)
url = 'https://ec2-54-209-30-18.compute-1.amazonaws.com/submit'
sent_successfully = False
try:
r = requests.post(
url,
data=dict(
# urlsafe-base64 does not have '#'
uid=install_id.strip() + '#' + str(VERSION),
data=dbdata,
test=int(TEST),
),
allow_redirects=True,
verify=CERT_FILE
)
sent_successfully = (r.status_code == 200)
logger.info("Sent logs status {} ({}) (sent_successfully={})"
.format(r.status_code, r.text, sent_successfully))
except requests.exceptions.ConnectionError as e:
logger.info(e)
# deletes the logs that we have sent
if sent_successfully:
typo_db.update_last_log_sent_time(
sent_time=get_time(),
delete_old_logs=True
)
# truncate log file to last 200 lines and look for update if available
# if random.randint(0, 100) <= 20:
# call_update()
cmd = """
tail -n500 {0}/{1}.log > /tmp/t.log && mv /tmp/t.log {0}/{1}.log;
""".format(LOG_DIR, DB_NAME)
subprocess.Popen(cmd, shell=True)
def initiate_typodb():
root_only_operation()
if False:
pass
else:
branch = "master"
subdir, download_bin, makecmd = '', '', ''
if DISTRO == 'darwin':
# TODO: Cleanup this directories. e.g., pam_opendirectory
subdir = 'csrcs/osx/prebuilt'
download_bin = "curl -LO"
makecmd = './install.sh'
elif DISTRO in ('debian', 'fedora', 'arch'):
subdir = 'csrcs/linux/'
download_bin = "wget"
makecmd = "make && make install"
download_url = "https://github.com/rchatterjee/pam-typopw/archive/"\
"{0}.zip".format(VERSION)
cmd = """
cd /tmp/ && {download_bin} {download_url} && unzip -qq -o {version}.zip \
&& cd pam-typopw-{version}/{subdir} && {makecmd};
cd /tmp && rm -rf {version}.zip pam-typopw*
""".format(branch=branch, subdir=subdir, download_url=download_url,
download_bin=download_bin, makecmd=makecmd, version=VERSION)
os.system(cmd)
common_auth = { # Not used
'debian': '/etc/pam.d/common-auth',
'fedora': '/etc/pam.d/system-auth',
'darwin': '',
'arch' : '/etc/pam.d/system-auth',
}[DISTRO]
def uninstall_pam_typtop():
# Last try to send logs
root_only_operation()
typtop_uninstall_script = BINDIR + '/typtop-uninstall.sh'
print(DISTRO)
subprocess.call(typtop_uninstall_script)
parser = argparse.ArgumentParser("typtop ")
# parser.add_argument(
# "--user",
# help="To set the username. Otherwise login user will be the target"
# )
parser.add_argument(
"--init", action="store_true",
help="To initialize the DB. You have to run this once you install pam_typtop"
)
parser.add_argument(
"--allowtypo", type=str.lower, choices=['yes','no'],
help='Allow login with typos of the password'
)
parser.add_argument(
"--allowupload", type=str.lower, choices=['yes', 'no'],
help="Allow uploading the non-sensitive anonymous "\
"data into the server for research purposes."
)
parser.add_argument("--id", action="store_true", help="Get Installation id")
# parser.add_argument(
# "--installid", action="store_true",
# help="Prints the installation id, which you have to submit while filling up the google form"
# )
parser.add_argument(
"--send-log", nargs="*", type=str, action="store",
metavar=("user", "force"),
help="Send the logs to the server"
)
parser.add_argument(
"--status", action="store", nargs="+",
metavar="user",
help="Prints current states of the typo-tolerance."\
"Needs a username as argument."
)
parser.add_argument(
"--uninstall", action="store_true",
help="Uninstall TypToP from your machine. Will delete all the data related to TypTop too."
)
# parser.add_argument(
# "--reinit", action="store_true",
# help="To re-initiate the DB, especially after the user's pw has changed"
# )
parser.add_argument(
"--update", action="store_true",
help="Updates TypTop to the latest released version"
)
parser.add_argument(
"--check", action="store", nargs=3,
help="(INTERNAL FUNCTION. PLEASE DON'T CALL THIS.)"
)
parser.add_argument(
"--debug", action="store_true",
help="Prepare report for debugging"
)
parser.add_argument(
"--version", action="store_true",
help="What is the version of TypTop"
)
def main():
args = parser.parse_args()
if len(sys.argv) <=1:
print(parser.print_help())
sys.exit(0)
# ITS IMPORTANT THIS ONE WILL BE FIRST
# if args.user:
# global USER
# USER = args.user
# print("User settings have been set to {}".format(USER))
try:
# root_only_operation()
if args.allowtypo:
typoDB = _get_typoDB()
if args.allowtypo == "no":
typoDB.allow_login(False)
print(
"""
Turning OFF login with typos. The software will still monitor your
typos and build cache of popular typos. You can switch on this
whenever you want.
""") # :{}".format(typoDB.is_allowed_login())
elif args.allowtypo == "yes":
print("Turning ON login with typos...",)
typoDB.allow_login(True)
if args.allowupload:
typoDB = _get_typoDB()
if args.allowupload == "yes":
typoDB.allow_upload(True)
print("Uploading data is enabled. You are awesome. Thanks!!")
elif args.allowupload == "no":
typoDB.allow_upload(False)
print("Uploading data is disabled. :( :'( :-(!")
print("Thanks for using the software anyway.")
if args.init:
print(first_msg, file=sys.stderr)
print("Initializing the typo database..")
initiate_typodb()
# if args.reinit:
# print("RE-initiating pam_typtop")
# initiate_typodb(RE_INIT=True)
if args.status :
users = args.status
if not users:
users.add(_get_username())
for user in users:
typoDB = UserTypoDB(user)
print("\n** TYPO-TOLERANCE STATUS **\n")
print(">> User: {}".format(user))
print("\tLogin with typos: {}".format(typoDB.is_allowed_login()))
print("\tParticipate in the study: {}"\
.format(typoDB.is_allowed_upload()))
print("\tIs enough logins to allow typos: {}"\
.format(typoDB.check_login_count(update=False)))
print("\tInstall Id: {}".format(typoDB.get_installation_id().strip()))
print("\tSoftware Version: {}".format(VERSION))
print("\tNum entries before typo-login allowed: {}".format(NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN))
print("\tWarmup cache: {}".format(WARM_UP_CACHE))
if args.uninstall:
r = _raw_input("Uninstalling pam_typtop. Will delete all the "
"databases.\nPlease confirm. (yN)")
if r and r.lower() == "y":
uninstall_pam_typtop()
if args.update: # delete all old data
call_update()
if args.id:
print("Install-id:", get_machine_id())
if args.debug:
p = subprocess.Popen(
'pip show numpy', shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
numpypath = ''
for l in p.stdout:
l = l.strip().split(': ', 1)
if l[0] == 'Location':
numpypath = l[1]
break
print("Is numpy in path: {}".format(numpypath in sys.path))
proc = subprocess.Popen(
# TODO: Add numpy path constraint
"""
set -x
set -u
users
# su $USER -c "which su"
# <enter correct password>
# if [ $? -neq 0 ]; then exit; else "echo password incorrect"; fi
typtop --status $USER
sudo ls -altrh /usr/local/etc/typtop.d/$USER/typtop.json
ls -altrh $(which su) $(which typtop)
python -c "import pwd; print pwd.getpwnam('$USER')"
tail -n50 /var/log/typtop.log
""", shell=True,
stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT
stderr=sys.stdout.fileno()
)
print(proc.stdout.read())
if args.check:
# ensure the parent is pam_opendirectory_typo.so
assert is_valid_parent()
failed, user, pw = args.check
ret = call_check(failed, user, pw)
sys.stdout.write(str(ret))
# if ret==0:
# p = subprocess.Popen([SEND_LOGS_SCRIPT, user])
if args.send_log:
call_send_logs(args)
if args.version:
print("Typtop#{}".format(VERSION))
except AbortSettings:
print("Settings' change had been aborted.")
if __name__ == '__main__':
main()
| |
# Copyright (c) 2014 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_config import cfg
from oslo_log import log as logging
import pkg_resources as pkg
import six
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import exceptions as ex
from sahara.plugins.hdp import clusterspec as cs
from sahara.plugins.hdp import configprovider as cfgprov
from sahara.plugins.hdp.versions import abstractversionhandler as avm
from sahara.plugins.hdp.versions.version_2_0_6 import edp_engine
from sahara.plugins.hdp.versions.version_2_0_6 import services
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import poll_utils
from sahara import version
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VersionHandler(avm.AbstractVersionHandler):
config_provider = None
version = None
client = None
def _set_version(self, version):
self.version = version
def _get_config_provider(self):
if self.config_provider is None:
self.config_provider = cfgprov.ConfigurationProvider(
json.load(pkg.resource_stream(
version.version_info.package,
'plugins/hdp/versions/version_2_0_6/resources/'
'ambari-config-resource.json')),
hadoop_version='2.0.6')
return self.config_provider
def get_version(self):
return self.version
def get_ambari_client(self):
if not self.client:
self.client = AmbariClient(self)
return self.client
def get_config_items(self):
return self._get_config_provider().get_config_items()
def get_applicable_target(self, name):
return self._get_config_provider().get_applicable_target(name)
def get_cluster_spec(self, cluster, user_inputs,
scaled_groups=None, cluster_template=None):
if cluster_template:
cluster_spec = cs.ClusterSpec(cluster_template, '2.0.6')
else:
cluster_spec = self.get_default_cluster_configuration()
cluster_spec.create_operational_config(
cluster, user_inputs, scaled_groups)
cs.validate_number_of_datanodes(
cluster, scaled_groups, self.get_config_items())
return cluster_spec
def get_default_cluster_configuration(self):
return cs.ClusterSpec(self._get_default_cluster_template(), '2.0.6')
def _get_default_cluster_template(self):
return pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
def get_node_processes(self):
node_processes = {}
for service in self.get_default_cluster_configuration().services:
components = []
for component in service.components:
components.append(component.name)
node_processes[service.name] = components
return node_processes
def install_swift_integration(self, servers):
if servers:
cpo.add_provisioning_step(
servers[0].cluster_id, _("Install swift integration"),
len(servers))
for server in servers:
server.install_swift_integration()
def get_services_processor(self):
return services
def get_edp_engine(self, cluster, job_type):
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
return edp_engine.EdpOozieEngine(cluster)
return None
def get_edp_job_types(self):
return edp_engine.EdpOozieEngine.get_supported_job_types()
def get_edp_config_hints(self, job_type):
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
def get_open_ports(self, node_group):
ports = [8660] # for Ganglia
ports_map = {
'AMBARI_SERVER': [8080, 8440, 8441],
'NAMENODE': [50070, 50470, 8020, 9000],
'DATANODE': [50075, 50475, 50010, 8010],
'SECONDARY_NAMENODE': [50090],
'HISTORYSERVER': [19888],
'RESOURCEMANAGER': [8025, 8041, 8050, 8088],
'NODEMANAGER': [45454],
'HIVE_SERVER': [10000],
'HIVE_METASTORE': [9083],
'HBASE_MASTER': [60000, 60010],
'HBASE_REGIONSERVER': [60020, 60030],
'WEBHCAT_SERVER': [50111],
'GANGLIA_SERVER': [8661, 8662, 8663, 8651],
'MYSQL_SERVER': [3306],
'OOZIE_SERVER': [11000, 11001],
'ZOOKEEPER_SERVER': [2181, 2888, 3888],
'NAGIOS_SERVER': [80]
}
for process in node_group.node_processes:
if process in ports_map:
ports.extend(ports_map[process])
return ports
class AmbariClient(object):
def __init__(self, handler):
# add an argument for neutron discovery
self.handler = handler
def _get_http_session(self, host, port):
return host.remote().get_http_client(port)
def _get_standard_headers(self):
return {"X-Requested-By": "sahara"}
def _post(self, url, ambari_info, data=None):
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.post(url, data=data,
auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _delete(self, url, ambari_info):
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.delete(url,
auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _put(self, url, ambari_info, data=None):
session = self._get_http_session(ambari_info.host, ambari_info.port)
auth = (ambari_info.user, ambari_info.password)
return session.put(url, data=data, auth=auth,
headers=self._get_standard_headers())
def _get(self, url, ambari_info):
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.get(url, auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _add_cluster(self, ambari_info, name):
add_cluster_url = 'http://{0}/api/v1/clusters/{1}'.format(
ambari_info.get_address(), name)
result = self._post(add_cluster_url, ambari_info,
data='{"Clusters": {"version" : "HDP-' +
self.handler.get_version() + '"}}')
if result.status_code != 201:
LOG.error(_LE('Create cluster command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add cluster: %s') % result.text)
@cpo.event_wrapper(True, step=_("Add configurations to cluster"),
param=('ambari_info', 2))
def _add_configurations_to_cluster(
self, cluster_spec, ambari_info, name):
existing_config_url = ('http://{0}/api/v1/clusters/{1}?fields='
'Clusters/desired_configs'.format(
ambari_info.get_address(), name))
result = self._get(existing_config_url, ambari_info)
json_result = json.loads(result.text)
existing_configs = json_result['Clusters']['desired_configs']
configs = cluster_spec.get_deployed_configurations()
if 'ambari' in configs:
configs.remove('ambari')
if len(configs) == len(existing_configs):
# nothing to do
return
config_url = 'http://{0}/api/v1/clusters/{1}'.format(
ambari_info.get_address(), name)
body = {}
clusters = {}
version = 1
body['Clusters'] = clusters
for config_name in configs:
if config_name in existing_configs:
if config_name == 'core-site' or config_name == 'global':
existing_version = (existing_configs[config_name]['tag']
.lstrip('v'))
version = int(existing_version) + 1
else:
continue
config_body = {}
clusters['desired_config'] = config_body
config_body['type'] = config_name
config_body['tag'] = 'v%s' % version
config_body['properties'] = (
cluster_spec.configurations[config_name])
result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Set configuration command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to set configurations on cluster: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add services to cluster"), param=('ambari_info', 2))
def _add_services_to_cluster(self, cluster_spec, ambari_info, name):
services = cluster_spec.services
add_service_url = 'http://{0}/api/v1/clusters/{1}/services/{2}'
for service in services:
# Make sure the service is deployed and is managed by Ambari
if service.deployed and service.ambari_managed:
result = self._post(add_service_url.format(
ambari_info.get_address(), name, service.name),
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create service command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add services to cluster: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add components to services"), param=('ambari_info', 2))
def _add_components_to_services(self, cluster_spec, ambari_info, name):
add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{'
'2}/components/{3}')
for service in cluster_spec.services:
# Make sure the service is deployed and is managed by Ambari
if service.deployed and service.ambari_managed:
for component in service.components:
result = self._post(add_component_url.format(
ambari_info.get_address(), name, service.name,
component.name),
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create component command failed. {result}')
.format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add components to services: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add host and components"), param=('ambari_info', 3))
def _add_hosts_and_components(
self, cluster_spec, servers, ambari_info, name):
add_host_url = 'http://{0}/api/v1/clusters/{1}/hosts/{2}'
add_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}')
for host in servers:
hostname = host.instance.fqdn().lower()
result = self._post(
add_host_url.format(ambari_info.get_address(), name, hostname),
ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host: %s') % result.text)
node_group_name = host.node_group.name
# TODO(jspeidel): ensure that node group exists
node_group = cluster_spec.node_groups[node_group_name]
for component in node_group.components:
# Don't add any AMBARI or HUE components
# TODO(rlevas): Pragmatically determine if component is
# managed by Ambari
if (component.find('AMBARI') != 0
and component.find('HUE') != 0):
result = self._post(add_host_component_url.format(
ambari_info.get_address(), name, hostname, component),
ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host_component command failed. '
'{result}').format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host component: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Install services"), param=('ambari_info', 2))
def _install_services(self, cluster_name, ambari_info):
ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{'
'1}/services?ServiceInfo/state=INIT'.format(
ambari_address, cluster_name))
body = ('{"RequestInfo" : { "context" : "Install all services" },'
'"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')
result = self._put(install_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(self._get_async_request_uri(
ambari_info, cluster_name, request_id),
ambari_info)
if success:
LOG.info(_LI("Install of Hadoop stack successful."))
self._finalize_ambari_state(ambari_info)
else:
LOG.error(_LE('Install command failed.'))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Install command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
def _get_async_request_uri(self, ambari_info, cluster_name, request_id):
return ('http://{0}/api/v1/clusters/{1}/requests/{'
'2}/tasks?fields=Tasks/status'.format(
ambari_info.get_address(), cluster_name,
request_id))
# Returns the top-level requests API URI
def _get_command_request_uri(self, ambari_info, cluster_name):
return ('http://{0}/api/v1/clusters/{1}/requests'.format(
ambari_info.get_address(), cluster_name))
def _wait_for_async_request(self, request_url, ambari_info):
started = False
while not started:
result = self._get(request_url, ambari_info)
LOG.debug('Async request url: {url} response:\n{response}'.format(
url=request_url, response=result.text))
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
status = items['Tasks']['status']
if (status == 'FAILED' or status == 'ABORTED' or
status == 'TIMEDOUT'):
return False
else:
if status != 'COMPLETED':
started = False
context.sleep(5)
return started
def _finalize_ambari_state(self, ambari_info):
persist_state_uri = 'http://{0}/api/v1/persist'.format(
ambari_info.get_address())
# this post data has non-standard format because persist
# resource doesn't comply with Ambari API standards
persist_data = ('{ "CLUSTER_CURRENT_STATUS":'
'"{\\"clusterState\\":\\"CLUSTER_STARTED_5\\"}" }')
result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202:
LOG.warning(_LW('Ambari cluster state not finalized. {result}').
format(result=result.text))
raise ex.HadoopProvisionError(
_('Unable to finalize Ambari state.'))
LOG.info(_LI('Ambari cluster state finalized.'))
@cpo.event_wrapper(
True, step=_("Start services"), param=('ambari_info', 3))
def start_services(self, cluster_name, cluster_spec, ambari_info):
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name))
body = ('{"RequestInfo" : { "context" : "Start all services" },'
'"Body" : {"ServiceInfo": {"state" : "STARTED"}}}')
self._fire_service_start_notifications(
cluster_name, cluster_spec, ambari_info)
result = self._put(start_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(
self._get_async_request_uri(ambari_info, cluster_name,
request_id), ambari_info)
if success:
LOG.info(
_LI("Successfully started Hadoop cluster '{name}'.")
.format(name=cluster_name))
LOG.info(_LI('Cluster name: {cluster_name}, Ambari server '
'address: {server_address}').format(
cluster_name=cluster_name,
server_address=ambari_info.get_address()))
else:
LOG.error(_LE('Failed to start Hadoop cluster.'))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Start command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
def _exec_ambari_command(self, ambari_info, body, cmd_uri):
LOG.debug('PUT URI: {uri}'.format(uri=cmd_uri))
result = self._put(cmd_uri, ambari_info, data=body)
if result.status_code == 202:
LOG.debug(
'PUT response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
href = json_result['href'] + '/tasks?fields=Tasks/status'
success = self._wait_for_async_request(href, ambari_info)
if success:
LOG.info(
_LI("Successfully changed state of Hadoop components "))
else:
LOG.error(_LE('Failed to change state of Hadoop components'))
raise ex.HadoopProvisionError(
_('Failed to change state of Hadoop components'))
else:
LOG.error(
_LE('Command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.'))
def _get_host_list(self, servers):
host_list = [server.instance.fqdn().lower() for server in servers]
return ",".join(host_list)
def _install_and_start_components(self, cluster_name, servers,
ambari_info, cluster_spec):
auth = (ambari_info.user, ambari_info.password)
self._install_components(ambari_info, auth, cluster_name, servers)
self.handler.install_swift_integration(servers)
self._start_components(ambari_info, auth, cluster_name,
servers, cluster_spec)
def _install_components(self, ambari_info, auth, cluster_name, servers):
# query for the host components on the given hosts that are in the
# INIT state
# TODO(jspeidel): provide request context
body = '{"HostRoles": {"state" : "INSTALLED"}}'
install_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INIT&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
self._exec_ambari_command(ambari_info, body, install_uri)
LOG.info(_LI('Started Hadoop components while scaling up'))
LOG.info(_LI('Cluster name {cluster_name}, Ambari server ip {ip}')
.format(cluster_name=cluster_name,
ip=ambari_info.get_address()))
def _start_components(self, ambari_info, auth, cluster_name, servers,
cluster_spec):
# query for all the host components in the INSTALLED state,
# then get a list of the client services in the list
installed_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
result = self._get(installed_uri, ambari_info)
if result.status_code == 200:
LOG.debug(
'GET response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
items = json_result['items']
client_set = cluster_spec.get_components_for_type('CLIENT')
inclusion_list = list(set([x['HostRoles']['component_name']
for x in items
if x['HostRoles']['component_name']
not in client_set]))
# query and start all non-client components on the given set of
# hosts
# TODO(jspeidel): Provide request context
body = '{"HostRoles": {"state" : "STARTED"}}'
start_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'
'&HostRoles/component_name.in({3})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers),
",".join(inclusion_list)))
self._exec_ambari_command(ambari_info, body, start_uri)
else:
raise ex.HadoopProvisionError(
_('Unable to determine installed service '
'components in scaled instances. status'
' code returned = {0}').format(result.status))
def _check_host_registrations(self, num_hosts, ambari_info):
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
try:
result = self._get(url, ambari_info)
json_result = json.loads(result.text)
LOG.debug('Registered Hosts: {current_number} '
'of {final_number}'.format(
current_number=len(json_result['items']),
final_number=num_hosts))
for hosts in json_result['items']:
LOG.debug('Registered Host: {host}'.format(
host=hosts['Hosts']['host_name']))
return result and len(json_result['items']) >= num_hosts
except Exception:
LOG.debug('Waiting to connect to ambari server')
return False
@cpo.event_wrapper(True, step=_("Wait for all Ambari agents to register"),
param=('ambari_info', 2))
def wait_for_host_registrations(self, num_hosts, ambari_info):
cluster = ambari_info.get_cluster()
poll_utils.plugin_option_poll(
cluster, self._check_host_registrations,
cfgprov.HOST_REGISTRATIONS_TIMEOUT,
_("Wait for host registrations"), 5, {
'num_hosts': num_hosts, 'ambari_info': ambari_info})
def update_ambari_admin_user(self, password, ambari_info):
old_pwd = ambari_info.password
user_url = 'http://{0}/api/v1/users/admin'.format(
ambari_info.get_address())
update_body = ('{{"Users":{{"roles":"admin","password":"{0}",'
'"old_password":"{1}"}} }}'.format(password, old_pwd))
result = self._put(user_url, ambari_info, data=update_body)
if result.status_code != 200:
raise ex.HadoopProvisionError(_('Unable to update Ambari admin '
'user credentials: {0}').format(
result.text))
def add_ambari_user(self, user, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user.name)
create_body = ('{{"Users":{{"password":"{0}","roles":"{1}"}} }}'.
format(user.password, '%s' % ','.
join(map(str, user.groups))))
result = self._post(user_url, ambari_info, data=create_body)
if result.status_code != 201:
raise ex.HadoopProvisionError(
_('Unable to create Ambari user: {0}').format(result.text))
def delete_ambari_user(self, user_name, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user_name)
result = self._delete(user_url, ambari_info)
if result.status_code != 200:
raise ex.HadoopProvisionError(
_('Unable to delete Ambari user: %(user_name)s'
' : %(text)s') %
{'user_name': user_name, 'text': result.text})
def configure_scaled_cluster_instances(self, name, cluster_spec,
num_hosts, ambari_info):
self.wait_for_host_registrations(num_hosts, ambari_info)
self._add_configurations_to_cluster(
cluster_spec, ambari_info, name)
self._add_services_to_cluster(
cluster_spec, ambari_info, name)
self._add_components_to_services(
cluster_spec, ambari_info, name)
self._install_services(name, ambari_info)
def start_scaled_cluster_instances(self, name, cluster_spec, servers,
ambari_info):
self.start_services(name, cluster_spec, ambari_info)
self._add_hosts_and_components(
cluster_spec, servers, ambari_info, name)
self._install_and_start_components(
name, servers, ambari_info, cluster_spec)
@cpo.event_wrapper(
True, step=_("Decommission nodes"), param=('cluster', 1))
def decommission_cluster_instances(self, cluster, clusterspec, instances,
ambari_info):
request_uri = self._get_command_request_uri(ambari_info, cluster.name)
hosts_to_decommission = []
# Decommission HDFS datanodes to avoid loss of data
# during decommissioning process
for instance in instances:
ng_name = instance.node_group.name
if "DATANODE" in clusterspec.node_groups[ng_name].components:
# determine the instances that include HDFS support
hosts_to_decommission.append(instance.fqdn())
LOG.debug('AmbariClient: hosts_to_decommission = {hosts}'.format(
hosts=str(hosts_to_decommission)))
# template for request body
body_header = ('{"RequestInfo" : { "context": "Decommission DataNode",'
' "command" : "DECOMMISSION", "service_name" : "HDFS",'
' "component_name" : "NAMENODE", '
' "parameters" : { "slave_type" : "DATANODE", ')
excluded_hosts_request = '"excluded_hosts" : "{0}"'
# generate comma-separated list of hosts to de-commission
list_of_hosts = ",".join(hosts_to_decommission)
LOG.debug('AmbariClient: list_of_hosts = {hosts}'.format(
hosts=list_of_hosts))
# create the request body
request_body = (
body_header +
excluded_hosts_request.format(list_of_hosts)
+ '}}'
+ ', "Requests/resource_filters":[{"service_name":"HDFS",'
'"component_name":"NAMENODE"}]}')
LOG.debug('AmbariClient: about to make decommission request, uri = '
'{uri}'.format(uri=request_uri))
LOG.debug('AmbariClient: about to make decommission request, '
'request body = {body}'.format(body=request_body))
# ask Ambari to decommission the datanodes
result = self._post(request_uri, ambari_info, request_body)
if result.status_code != 202:
LOG.error(_LE('AmbariClient: error while making decommission post '
'request. Error is = {result}').format(
result=result.text))
raise ex.DecommissionError(
_('An error occurred while trying to '
'decommission the DataNode instances that are '
'being shut down. '
'Please consult the Ambari server logs on the '
'master node for '
'more information about the failure.'))
else:
LOG.info(_LI('AmbariClient: decommission post request succeeded!'))
status_template = ('http://{0}/api/v1/clusters/{1}/hosts/{2}/'
'host_components/{3}')
# find the host that the NameNode is deployed on
name_node_host = clusterspec.determine_component_hosts(
'NAMENODE').pop()
status_request = status_template.format(
ambari_info.get_address(),
cluster.name, name_node_host.fqdn(),
'NAMENODE')
LOG.debug('AmbariClient: about to make decommission status request,'
'uri = {uri}'.format(uri=status_request))
poll_utils.plugin_option_poll(
ambari_info.get_cluster(),
self.process_decommission,
cfgprov.DECOMMISSIONING_TIMEOUT, _("Decommission nodes"), 5,
{'status_request': status_request, 'ambari_info': ambari_info,
'hosts_to_decommission': hosts_to_decommission})
def process_decommission(self, status_request, ambari_info,
hosts_to_decommission):
if len(hosts_to_decommission) == 0:
# Nothing for decommissioning
return True
LOG.debug('AmbariClient: number of hosts waiting for '
'decommissioning to complete = {count}'.format(
count=str(len(hosts_to_decommission))))
result = self._get(status_request, ambari_info)
if result.status_code != 200:
LOG.error(_LE('AmbariClient: error in making decommission '
'status request, error = {result}').format(
result=result.text))
else:
LOG.info(_LI('AmbariClient: decommission status request ok, '
'result = {result}').format(result=result.text))
json_result = json.loads(result.text)
live_nodes = (
json_result['metrics']['dfs']['namenode']['LiveNodes'])
# parse out the map of live hosts associated with the NameNode
json_result_nodes = json.loads(live_nodes)
for node, val in six.iteritems(json_result_nodes):
admin_state = val['adminState']
if admin_state == 'Decommissioned':
LOG.debug('AmbariClient: node = {node} is '
'now in adminState = {admin_state}'.format(
node=node, admin_state=admin_state))
# remove from list, to track which nodes
# are now in Decommissioned state
hosts_to_decommission.remove(node)
return False
def provision_cluster(self, cluster_spec, servers, ambari_info, name):
self._add_cluster(ambari_info, name)
self._add_configurations_to_cluster(cluster_spec, ambari_info, name)
self._add_services_to_cluster(cluster_spec, ambari_info, name)
self._add_components_to_services(cluster_spec, ambari_info, name)
self._add_hosts_and_components(
cluster_spec, servers, ambari_info, name)
self._install_services(name, ambari_info)
self.handler.install_swift_integration(servers)
def cleanup(self, ambari_info):
try:
ambari_info.host.remote().close_http_session(ambari_info.port)
except exc.NotFoundException:
LOG.debug("HTTP session is not cached")
def _get_services_in_state(self, cluster_name, ambari_info, state):
services_url = ('http://{0}/api/v1/clusters/{1}/services?'
'ServiceInfo/state.in({2})'.format(
ambari_info.get_address(), cluster_name, state))
result = self._get(services_url, ambari_info)
json_result = json.loads(result.text)
services = []
for service in json_result['items']:
services.append(service['ServiceInfo']['service_name'])
return services
def _fire_service_start_notifications(self, cluster_name,
cluster_spec, ambari_info):
started_services = self._get_services_in_state(
cluster_name, ambari_info, 'STARTED')
for service in cluster_spec.services:
if service.deployed and service.name not in started_services:
service.pre_service_start(cluster_spec, ambari_info,
started_services)
def setup_hdfs_ha(self, cluster_spec, servers, ambari_info, name):
# Get HA cluster map
hac = self._hdfs_ha_cluster_map(cluster_spec, servers,
ambari_info, name)
# start active namenode in order to format and save namesapce
self._hdfs_ha_update_host_component(hac, hac['nn_active'],
'NAMENODE', 'STARTED')
hac['server_active'].set_namenode_safemode(hac['java_home'])
hac['server_active'].save_namenode_namespace(hac['java_home'])
# shutdown active namenode
self._hdfs_ha_update_host_component(hac, hac['nn_active'],
'NAMENODE', 'INSTALLED')
# Install HDFS_CLIENT on namenodes, to be used later for updating
# HDFS configs
if hac['nn_active'] not in hac['hdfsc_hosts']:
self._hdfs_ha_add_host_component(hac, hac['nn_active'],
'HDFS_CLIENT')
if hac['nn_standby'] not in hac['hdfsc_hosts']:
self._hdfs_ha_add_host_component(hac, hac['nn_standby'],
'HDFS_CLIENT')
# start the journal_nodes
for jn in hac['jn_hosts']:
self._hdfs_ha_update_host_component(hac, jn,
'JOURNALNODE', 'STARTED')
# disable any secondary namnodes
for snn in hac['snn_hosts']:
self._hdfs_ha_update_host_component(hac, snn,
'SECONDARY_NAMENODE',
'DISABLED')
# get hdfs-site config tag
hdfs_site_tag = self._hdfs_ha_get_config_tag(hac, 'hdfs-site')
# get hdfs-site config
hdfs_site = self._hdfs_ha_get_config(hac, 'hdfs-site', hdfs_site_tag)
# update hdfs-site with HDFS HA properties
hdfs_site_ha = self._hdfs_ha_update_hdfs_site(hac, hdfs_site)
# put new hdfs-site config
self._hdfs_ha_put_config(hac, 'hdfs-site', hac['config_ver'],
hdfs_site_ha)
# get core-site tag
core_site_tag = self._hdfs_ha_get_config_tag(hac, 'core-site')
# get core-site config
core_site = self._hdfs_ha_get_config(hac, 'core-site', core_site_tag)
# update core-site with HDFS HA properties
core_site_ha = self._hdfs_ha_update_core_site(hac, core_site)
# put new HA core-site config
self._hdfs_ha_put_config(hac, 'core-site', hac['config_ver'],
core_site_ha)
# update hbase-site if Hbase is installed
if hac['hbase_hosts']:
hbase_site_tag = self._hdfs_ha_get_config_tag(hac, 'hbase-site')
hbase_site = self._hdfs_ha_get_config(hac, 'hbase-site',
hbase_site_tag)
hbase_site_ha = self._hdfs_ha_update_hbase_site(hac, hbase_site)
self._hdfs_ha_put_config(hac, 'hbase_site', hac['config_ver'],
hbase_site_ha)
# force the deployment of HDFS HA configs on namenodes by re-installing
# hdfs-client
self._hdfs_ha_update_host_component(hac, hac['nn_active'],
'HDFS_CLIENT', 'INSTALLED')
self._hdfs_ha_update_host_component(hac, hac['nn_standby'],
'HDFS_CLIENT', 'INSTALLED')
# initialize shared edits on the active namenode
hac['server_active'].initialize_shared_edits(hac['java_home'])
# start zookeeper servers
for zk in hac['zk_hosts']:
self._hdfs_ha_update_host_component(hac, zk,
'ZOOKEEPER_SERVER', 'STARTED')
# start active namenode
self._hdfs_ha_update_host_component(hac, hac['nn_active'],
'NAMENODE', 'STARTED')
# setup active namenode automatic failover
hac['server_active'].format_zookeeper_fc(hac['java_home'])
# format standby namenode
hac['server_standby'].bootstrap_standby_namenode(hac['java_home'])
# start namenode process on standby namenode
self._hdfs_ha_update_host_component(hac, hac['nn_standby'],
'NAMENODE', 'STARTED')
# add, install and start ZKFC on namenodes for automatic fail-over
for nn in hac['nn_hosts']:
self._hdfs_ha_add_host_component(hac, nn, 'ZKFC')
self._hdfs_ha_update_host_component(hac, nn, 'ZKFC', 'INSTALLED')
self._hdfs_ha_update_host_component(hac, nn, 'ZKFC', 'STARTED')
# delete any secondary namenodes
for snn in hac['snn_hosts']:
self._hdfs_ha_delete_host_component(hac, snn, 'SECONDARY_NAMENODE')
# stop journalnodes and namenodes before terminating
# not doing so causes warnings in Ambari for stale config
for jn in hac['jn_hosts']:
self._hdfs_ha_update_host_component(hac, jn, 'JOURNALNODE',
'INSTALLED')
for nn in hac['nn_hosts']:
self._hdfs_ha_update_host_component(hac, nn, 'NAMENODE',
'INSTALLED')
# install httpfs and write temp file if HUE is installed
if hac['hue_host']:
self._hdfs_ha_setup_hue(hac)
def _hdfs_ha_cluster_map(self, cluster_spec, servers, ambari_info, name):
hacluster = {}
hacluster['name'] = name
hacluster['config_ver'] = 'v2'
# set JAVA_HOME
global_config = cluster_spec.configurations.get('global', None)
global_config_jh = (global_config.get('java64_home', None) or
global_config.get('java_home', None) if
global_config else None)
hacluster['java_home'] = global_config_jh or '/opt/jdk1.6.0_31'
# set namnode ports
hacluster['nn_rpc'] = '8020'
hacluster['nn_ui'] = '50070'
hacluster['ambari_info'] = ambari_info
# get host lists
hacluster['nn_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'NAMENODE')]
hacluster['snn_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'SECONDARY_NAMENODE')]
hacluster['jn_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'JOURNALNODE')]
hacluster['zk_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'ZOOKEEPER_SERVER')]
hacluster['hdfsc_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'HDFS_CLIENT')]
hacluster['hbase_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'HBASE_MASTER')]
hacluster['hue_host'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts('HUE')]
# get servers for remote command execution
# consider hacluster['nn_hosts'][0] as active namenode
hacluster['nn_active'] = hacluster['nn_hosts'][0]
hacluster['nn_standby'] = hacluster['nn_hosts'][1]
# get the 2 namenode servers and hue server
for server in servers:
if server.instance.fqdn().lower() == hacluster['nn_active']:
hacluster['server_active'] = server
if server.instance.fqdn().lower() == hacluster['nn_standby']:
hacluster['server_standby'] = server
if hacluster['hue_host']:
if server.instance.fqdn().lower() == hacluster['hue_host'][0]:
hacluster['server_hue'] = server
return hacluster
def _hdfs_ha_delete_host_component(self, hac, host, component):
delete_service_component_url = ('http://{0}/api/v1/clusters/{1}/hosts'
'/{2}/host_components/{3}').format(
hac['ambari_info'].get_address(),
hac['name'], host, component)
result = self._delete(delete_service_component_url, hac['ambari_info'])
if result.status_code != 200:
LOG.error(_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_add_host_component(self, hac, host, component):
add_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}').format(
hac['ambari_info'].get_address(),
hac['name'], host, component)
result = self._post(add_host_component_url, hac['ambari_info'])
if result.status_code != 201:
LOG.error(_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_update_host_component(self, hac, host, component, state):
update_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}').format(
hac['ambari_info'].get_address(),
hac['name'], host, component)
component_state = {"HostRoles": {"state": state}}
body = json.dumps(component_state)
result = self._put(update_host_component_url,
hac['ambari_info'], data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(self._get_async_request_uri(
hac['ambari_info'], hac['name'], request_id),
hac['ambari_info'])
if success:
LOG.info(_LI("HDFS-HA: Host component updated successfully: "
"{host} {component}").format(host=host,
component=component))
else:
LOG.error(_LE("HDFS-HA: Host component update failed: "
"{host} {component}").format(
host=host, component=component))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
elif result.status_code != 200:
LOG.error(
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_get_config_tag(self, hac, config_name):
config_url = ('http://{0}/api/v1/clusters/{1}'
'/configurations?type={2}').format(
hac['ambari_info'].get_address(), hac['name'],
config_name)
result = self._get(config_url, hac['ambari_info'])
if result.status_code == 200:
json_result = json.loads(result.text)
items = json_result['items']
return items[0]['tag']
else:
LOG.error(
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_get_config(self, hac, config_name, tag):
config_url = ('http://{0}/api/v1/clusters/{1}'
'/configurations?type={2}&tag={3}').format(
hac['ambari_info'].get_address(), hac['name'],
config_name, tag)
result = self._get(config_url, hac['ambari_info'])
if result.status_code == 200:
json_result = json.loads(result.text)
items = json_result['items']
return items[0]['properties']
else:
LOG.error(
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_put_config(self, hac, config_name, tag, properties):
config_url = ('http://{0}/api/v1/clusters/{1}').format(
hac['ambari_info'].get_address(), hac['name'])
body = {}
clusters = {}
body['Clusters'] = clusters
body['Clusters']['desired_config'] = {}
body['Clusters']['desired_config']['type'] = config_name
body['Clusters']['desired_config']['tag'] = tag
body['Clusters']['desired_config']['properties'] = properties
LOG.debug("body: {body}".format(body=body))
result = self._put(config_url, hac['ambari_info'],
data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_update_hdfs_site(self, hac, hdfs_site):
hdfs_site['dfs.nameservices'] = hac['name']
hdfs_site['dfs.ha.namenodes.{0}'.format(
hac['name'])] = hac['nn_active'] + ',' + hac['nn_standby']
hdfs_site['dfs.namenode.rpc-address.{0}.{1}'.format(
hac['name'], hac['nn_active'])] = '{0}:{1}'.format(
hac['nn_active'], hac['nn_rpc'])
hdfs_site['dfs.namenode.rpc-address.{0}.{1}'.format(
hac['name'], hac['nn_standby'])] = '{0}:{1}'.format(
hac['nn_standby'], hac['nn_rpc'])
hdfs_site['dfs.namenode.http-address.{0}.{1}'.format(
hac['name'], hac['nn_active'])] = '{0}:{1}'.format(
hac['nn_active'], hac['nn_ui'])
hdfs_site['dfs.namenode.http-address.{0}.{1}'.format(
hac['name'], hac['nn_standby'])] = '{0}:{1}'.format(
hac['nn_standby'], hac['nn_ui'])
qjournal = ';'.join([x+':8485' for x in hac['jn_hosts']])
hdfs_site['dfs.namenode.shared.edits.dir'] = ('qjournal://{0}/{1}'.
format(qjournal,
hac['name']))
hdfs_site['dfs.client.failover.proxy.provider.{0}'.format(
hac['name'])] = ("org.apache.hadoop.hdfs.server.namenode.ha."
"ConfiguredFailoverProxyProvider")
hdfs_site['dfs.ha.fencing.methods'] = 'shell(/bin/true)'
hdfs_site['dfs.ha.automatic-failover.enabled'] = 'true'
return hdfs_site
def _hdfs_ha_update_core_site(self, hac, core_site):
core_site['fs.defaultFS'] = 'hdfs://{0}'.format(hac['name'])
core_site['ha.zookeeper.quorum'] = '{0}'.format(
','.join([x+':2181' for x in hac['zk_hosts']]))
# if HUE is installed add some httpfs configs
if hac['hue_host']:
core_site['hadoop.proxyuser.httpfs.groups'] = '*'
core_site['hadoop.proxyuser.httpfs.hosts'] = '*'
return core_site
def _hdfs_ha_update_hbase_site(self, hac, hbase_site):
hbase_site['hbase.rootdir'] = 'hdfs://{0}/apps/hbase/data'.format(
hac['name'])
return hbase_site
def _hdfs_ha_setup_hue(self, hac):
hac['server_hue'].install_httpfs()
# write a temp file and
# use it when starting HUE with HDFS HA enabled
hac['server_hue'].write_hue_temp_file('/tmp/hueini-hdfsha',
hac['name'])
| |
import pandas as pd
import json
import io
from ..core import mac_address_to_id
def _id_to_member_mapping_fill_gaps(idmap, time_bins_size='1min'):
""" Fill gaps in a idmap
Parameters
----------
idmap : id mapping object
time_bins_size : str
The size of the time bins used for resampling. Defaults to '1min'.
Returns
-------
pd.DataFrame :
idmap, after filling gaps.
"""
df = idmap.to_frame().reset_index()
df.set_index('datetime', inplace=True)
s = df.groupby(['id'])['member'].resample(time_bins_size).fillna(method='ffill')
s = s.reorder_levels((1,0)).sort_index()
return s
def legacy_id_to_member_mapping(fileobject, time_bins_size='1min', tz='US/Eastern', fill_gaps=True):
"""Creates a mapping from badge id to member, for each time bin, from proximity data file.
Depending on the version of the logfile (and it's content), it will either use the member_id
field to generate the mapping (newer version), or calculate an ID form the MAC address (this
was the default behavior of the older version of the hubs and badges)
Parameters
----------
fileobject : file or iterable list of str
The proximity data, as an iterable of JSON strings.
time_bins_size : str
The size of the time bins used for resampling. Defaults to '1min'.
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
fill_gaps : boolean
If True, the code will ensure that a value exists for every time by by filling the gaps
with the last seen value
Returns
-------
pd.Series :
A mapping from badge id to member, indexed by datetime and id.
"""
def readfile(fileobject):
no_id_warning = False
for line in fileobject:
data = json.loads(line)['data']
member_id = None
if 'member_id' in data:
member_id = data['member_id']
else:
member_id = mac_address_to_id(data['badge_address'])
if not no_id_warning:
print("Warning - no id provided in data. Calculating id from MAC address")
no_id_warning = True
yield (data['timestamp'],
member_id,
str(data['member']))
df = pd.DataFrame(readfile(fileobject), columns=['timestamp', 'id', 'member'])
# Convert the timestamp to a datetime, localized in UTC
df['datetime'] = pd.to_datetime(df['timestamp'], unit='s', utc=True) \
.dt.tz_localize('UTC').dt.tz_convert(tz)
del df['timestamp']
# Group by id and resample
df = df.groupby([
pd.TimeGrouper(time_bins_size, key='datetime'),
'id'
]).first()
# Extract series
s = df.sort_index()['member']
# Fill in gaps, if requested to do so
if fill_gaps:
s = _id_to_member_mapping_fill_gaps(s, time_bins_size=time_bins_size)
return s
def id_to_member_mapping(mapper, time_bins_size='1min', tz='US/Eastern', fill_gaps=True):
"""Creates a pd.Series mapping member numeric IDs to the string
member key associated with them.
If the 'mapper' provided is a DataFrame, assumes it's metadata and that ID's
do not change mapping throughout the project, and proceeds to create a
Series with only a member index.
If the 'mapper' provided is a file object, assumes the old version of id_map
and creates a Series with a datetime and member index.
Parameters
----------
fileobject : file object
A file to read to determine the mapping.
members_metadata : pd.DataFrame
Metadata dataframe, as downloaded from the server, to map IDs to keys.
Returns
-------
pd.Series :
The ID to member key mapping.
"""
if isinstance(mapper, io.BufferedIOBase) | isinstance(mapper, file):
idmap = legacy_id_to_member_mapping(mapper, time_bins_size=time_bins_size, tz=tz, fill_gaps=fill_gaps)
return idmap
elif isinstance(mapper, pd.DataFrame):
idmap = {row.member_id: row.member for row in mapper.itertuples()}
return pd.DataFrame.from_dict(idmap, orient='index')[0].rename('member')
else:
raise ValueError("You must provide either a fileobject or metadata dataframe as the mapper.")
def voltages(fileobject, time_bins_size='1min', tz='US/Eastern', skip_errors=False):
"""Creates a DataFrame of voltages, for each member and time bin.
Parameters
----------
fileobject : file or iterable list of str
The proximity data, as an iterable of JSON strings.
time_bins_size : str
The size of the time bins used for resampling. Defaults to '1min'.
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
skip_errors : boolean
If set to True, skip errors in the data file
Returns
-------
pd.Series :
Voltages, indexed by datetime and member.
"""
def readfile(fileobject, skip_errors):
i = 0
for line in fileobject:
i = i + 1
try:
data = json.loads(line)['data']
yield (data['timestamp'],
str(data['member']),
float(data['voltage']))
except:
print("Error in line#:", i, line)
if skip_errors:
continue
else:
raise
df = pd.DataFrame(readfile(fileobject, skip_errors), columns=['timestamp', 'member', 'voltage'])
# Convert the timestamp to a datetime, localized in UTC
df['datetime'] = pd.to_datetime(df['timestamp'], unit='s', utc=True) \
.dt.tz_localize('UTC').dt.tz_convert(tz)
del df['timestamp']
# Group by id and resample
df = df.groupby([
pd.TimeGrouper(time_bins_size, key='datetime'),
'member'
]).mean()
df.sort_index(inplace=True)
return df['voltage']
def sample_counts(fileobject, tz='US/Eastern', keep_type=False, skip_errors=False):
"""Creates a DataFrame of sample counts, for each member and raw record
Parameters
----------
fileobject : file or iterable list of str
The proximity or audio data, as an iterable of JSON strings.
tz : str
The time zone used for localization of dates. Defaults to 'US/Eastern'.
keep_type : boolean
If set to True, the type of the record will be returned as well
skip_errors : boolean
If set to True, skip errors in the data file
Returns
-------
pd.Series :
Counts, indexed by datetime, type and member.
"""
def readfile(fileobject, skip_errors=False):
i = 0
for line in fileobject:
i = i + 1
try:
raw_data = json.loads(line)
data = raw_data['data']
type = raw_data['type']
if type == 'proximity received':
cnt = len(data['rssi_distances'])
elif type == 'audio received':
cnt = len(data['samples'])
else:
cnt = -1
yield (data['timestamp'],
str(type),
str(data['member']),
int(cnt))
except:
print("Error in line#:", i, line)
if skip_errors:
continue
else:
raise
df = pd.DataFrame(readfile(fileobject, skip_errors), columns=['timestamp' ,'type', 'member',
'cnt'])
# Convert the timestamp to a datetime, localized in UTC
df['datetime'] = pd.to_datetime(df['timestamp'], unit='s', utc=True) \
.dt.tz_localize('UTC').dt.tz_convert(tz)
del df['timestamp']
if keep_type:
df.set_index(['datetime','type','member'],inplace=True)
else:
del df['type']
df.set_index(['datetime', 'member'], inplace=True)
df.sort_index(inplace=True)
return df
| |
##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
from twistedcaldav.stdconfig import config
from calendarserver.tools.principals import (
parseCreationArgs, matchStrings,
recordForPrincipalID, getProxies, setProxies
)
from twext.python.filepath import CachingFilePath as FilePath
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from twistedcaldav.test.util import (
TestCase, StoreTestCase, CapturingProcessProtocol, ErrorOutput
)
class ManagePrincipalsTestCase(TestCase):
def setUp(self):
super(ManagePrincipalsTestCase, self).setUp()
# # Since this test operates on proxy db, we need to assign the service:
# calendaruserproxy.ProxyDBService = calendaruserproxy.ProxySqliteDB(os.path.abspath(self.mktemp()))
testRoot = os.path.join(os.path.dirname(__file__), "principals")
templateName = os.path.join(testRoot, "caldavd.plist")
templateFile = open(templateName)
template = templateFile.read()
templateFile.close()
databaseRoot = os.path.abspath("_spawned_scripts_db" + str(os.getpid()))
newConfig = template % {
"ServerRoot": os.path.abspath(config.ServerRoot),
"DataRoot": os.path.abspath(config.DataRoot),
"DatabaseRoot": databaseRoot,
"DocumentRoot": os.path.abspath(config.DocumentRoot),
"LogRoot": os.path.abspath(config.LogRoot),
}
configFilePath = FilePath(os.path.join(config.ConfigRoot, "caldavd.plist"))
configFilePath.setContent(newConfig)
self.configFileName = configFilePath.path
config.load(self.configFileName)
origUsersFile = FilePath(
os.path.join(
os.path.dirname(__file__),
"principals",
"users-groups.xml"
)
)
copyUsersFile = FilePath(os.path.join(config.DataRoot, "accounts.xml"))
origUsersFile.copyTo(copyUsersFile)
origResourcesFile = FilePath(
os.path.join(
os.path.dirname(__file__),
"principals",
"resources-locations.xml"
)
)
copyResourcesFile = FilePath(os.path.join(config.DataRoot, "resources.xml"))
origResourcesFile.copyTo(copyResourcesFile)
origAugmentFile = FilePath(
os.path.join(
os.path.dirname(__file__),
"principals",
"augments.xml"
)
)
copyAugmentFile = FilePath(os.path.join(config.DataRoot, "augments.xml"))
origAugmentFile.copyTo(copyAugmentFile)
# Make sure trial puts the reactor in the right state, by letting it
# run one reactor iteration. (Ignore me, please.)
d = Deferred()
reactor.callLater(0, d.callback, True)
return d
@inlineCallbacks
def runCommand(self, *additional):
"""
Run calendarserver_manage_principals, passing additional as args.
"""
sourceRoot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
cmd = "calendarserver_manage_principals" # assumes it's on PATH
args = [cmd, "-f", self.configFileName]
args.extend(additional)
cwd = sourceRoot
deferred = Deferred()
reactor.spawnProcess(CapturingProcessProtocol(deferred, None), cmd, args, env=os.environ, path=cwd)
output = yield deferred
returnValue(output)
@inlineCallbacks
def test_help(self):
results = yield self.runCommand("--help")
self.assertTrue(results.startswith("usage:"))
@inlineCallbacks
def test_principalTypes(self):
results = yield self.runCommand("--list-principal-types")
self.assertTrue("groups" in results)
self.assertTrue("users" in results)
self.assertTrue("locations" in results)
self.assertTrue("resources" in results)
self.assertTrue("addresses" in results)
@inlineCallbacks
def test_listPrincipals(self):
results = yield self.runCommand("--list-principals=users")
for i in xrange(1, 10):
self.assertTrue("user%02d" % (i,) in results)
@inlineCallbacks
def test_search(self):
results = yield self.runCommand("--search=user")
self.assertTrue("10 matches found" in results)
for i in xrange(1, 10):
self.assertTrue("user%02d" % (i,) in results)
@inlineCallbacks
def test_addRemove(self):
results = yield self.runCommand(
"--add", "resources",
"New Resource", "newresource", "newresourceuid"
)
self.assertTrue("Added 'New Resource'" in results)
results = yield self.runCommand(
"--get-auto-schedule-mode",
"resources:newresource"
)
self.assertTrue(
results.startswith(
'Auto-schedule mode for "New Resource" newresourceuid (resource) newresource is accept if free, decline if busy'
)
)
results = yield self.runCommand("--list-principals=resources")
self.assertTrue("newresource" in results)
results = yield self.runCommand(
"--add", "resources", "New Resource",
"newresource1", "newresourceuid"
)
self.assertTrue("UID already in use: newresourceuid" in results)
results = yield self.runCommand(
"--add", "resources", "New Resource",
"newresource", "uniqueuid"
)
self.assertTrue("Record name already in use" in results)
results = yield self.runCommand("--remove", "resources:newresource")
self.assertTrue("Removed 'New Resource'" in results)
results = yield self.runCommand("--list-principals=resources")
self.assertFalse("newresource" in results)
def test_parseCreationArgs(self):
self.assertEquals(
("full name", "short name", "uid"),
parseCreationArgs(("full name", "short name", "uid"))
)
def test_matchStrings(self):
self.assertEquals("abc", matchStrings("a", ("abc", "def")))
self.assertEquals("def", matchStrings("de", ("abc", "def")))
self.assertRaises(
ValueError,
matchStrings, "foo", ("abc", "def")
)
@inlineCallbacks
def test_modifyWriteProxies(self):
results = yield self.runCommand(
"--add-write-proxy=users:user01", "locations:location01"
)
self.assertTrue(
results.startswith('Added "User 01" user01 (user) user01 as a write proxy for "Room 01" location01 (location) location01')
)
results = yield self.runCommand(
"--list-write-proxies", "locations:location01"
)
self.assertTrue("User 01" in results)
results = yield self.runCommand(
"--remove-proxy=users:user01", "locations:location01"
)
results = yield self.runCommand(
"--list-write-proxies", "locations:location01"
)
self.assertTrue(
'No write proxies for "Room 01" location01 (location) location01' in results
)
@inlineCallbacks
def test_modifyReadProxies(self):
results = yield self.runCommand(
"--add-read-proxy=users:user01", "locations:location01"
)
self.assertTrue(
results.startswith('Added "User 01" user01 (user) user01 as a read proxy for "Room 01" location01 (location) location01')
)
results = yield self.runCommand(
"--list-read-proxies", "locations:location01"
)
self.assertTrue("User 01" in results)
results = yield self.runCommand(
"--remove-proxy=users:user01", "locations:location01"
)
results = yield self.runCommand(
"--list-read-proxies", "locations:location01"
)
self.assertTrue(
'No read proxies for "Room 01" location01 (location) location01' in results
)
@inlineCallbacks
def test_autoScheduleMode(self):
results = yield self.runCommand(
"--get-auto-schedule-mode", "locations:location01"
)
self.assertTrue(
results.startswith('Auto-schedule mode for "Room 01" location01 (location) location01 is accept if free, decline if busy')
)
results = yield self.runCommand(
"--set-auto-schedule-mode=accept-if-free", "locations:location01"
)
self.assertTrue(
results.startswith('Setting auto-schedule-mode to accept if free for "Room 01" location01 (location) location01')
)
results = yield self.runCommand(
"--get-auto-schedule-mode",
"locations:location01"
)
self.assertTrue(
results.startswith('Auto-schedule mode for "Room 01" location01 (location) location01 is accept if free')
)
results = yield self.runCommand(
"--set-auto-schedule-mode=decline-if-busy", "users:user01"
)
self.assertTrue(results.startswith('Setting auto-schedule-mode for "User 01" user01 (user) user01 is not allowed.'))
try:
results = yield self.runCommand(
"--set-auto-schedule-mode=bogus",
"users:user01"
)
except ErrorOutput:
pass
else:
self.fail("Expected command failure")
class SetProxiesTestCase(StoreTestCase):
@inlineCallbacks
def test_setProxies(self):
"""
Read and Write proxies can be set en masse
"""
directory = self.directory
record = yield recordForPrincipalID(directory, "users:user01")
readProxies, writeProxies = yield getProxies(record)
self.assertEquals(readProxies, []) # initially empty
self.assertEquals(writeProxies, []) # initially empty
readProxies = [
(yield recordForPrincipalID(directory, "users:user03")),
(yield recordForPrincipalID(directory, "users:user04")),
]
writeProxies = [
(yield recordForPrincipalID(directory, "users:user05")),
]
yield setProxies(record, readProxies, writeProxies)
readProxies, writeProxies = yield getProxies(record)
self.assertEquals(set([r.uid for r in readProxies]), set(["user03", "user04"]))
self.assertEquals(set([r.uid for r in writeProxies]), set(["user05"]))
# Using None for a proxy list indicates a no-op
yield setProxies(record, [], None)
readProxies, writeProxies = yield getProxies(record)
self.assertEquals(readProxies, []) # now empty
self.assertEquals(set([r.uid for r in writeProxies]), set(["user05"])) # unchanged
| |
#!/usr/bin/env python
# coding=utf-8
import unittest
from mock import Mock
from src.gene import Gene
from src.sequence import Sequence
from src.filters import *
class TestFilters(unittest.TestCase):
def test_min_cds_length_filter(self):
cds_length = MinCDSLengthFilter(30)
# Create a mock sequence
seq = Sequence()
# Give the sequence some genes
seq.genes = [Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo1'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo2'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo3')]
# Give the mock mrnas some cds's
test_mrna0 = Mock()
test_mrna0.identifier = 'foo1-RA'
test_mrna0.death_flagged = False
test_mrna0.cds = Mock()
test_mrna0.cds.length = Mock(return_value=20)
test_mrna1 = Mock()
test_mrna1.identifier = 'foo2-RA'
test_mrna1.death_flagged = False
test_mrna1.cds = None
test_mrna2 = Mock()
test_mrna2.identifier = 'foo2-RB'
test_mrna2.death_flagged = False
test_mrna2.cds = Mock()
test_mrna2.cds.length = Mock(return_value=30)
test_mrna3 = Mock()
test_mrna3.identifier = 'foo3-RA'
test_mrna3.death_flagged = False
test_mrna3.cds = Mock()
test_mrna3.cds.length = Mock(return_value=40)
# Give the mock genes some mrnas
seq.genes[0].mrnas = [test_mrna0]
seq.genes[0].death_flagged = False
seq.genes[1].mrnas = [test_mrna1, test_mrna2]
seq.genes[1].death_flagged = False
seq.genes[2].mrnas = [test_mrna3]
seq.genes[2].death_flagged = False
# Apply the filter
cds_length.apply(seq)
self.assertEqual(len(seq.genes), 3)
self.assertEqual(seq.genes[1].mrnas, [test_mrna1, test_mrna2])
self.assertEqual(seq.genes[2].mrnas, [test_mrna3])
def test_max_cds_length_filter(self):
cds_length = MaxCDSLengthFilter(100)
# Create a mock sequence
seq = Sequence()
# Give the sequence some genes
seq.genes = [Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo1'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo2'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo3')]
# Give the mock mrnas some cds's
test_mrna0 = Mock()
test_mrna0.identifier = 'foo1-RA'
test_mrna0.death_flagged = False
test_mrna0.cds = Mock()
test_mrna0.cds.length = Mock(return_value=90)
test_mrna1 = Mock()
test_mrna1.identifier = 'foo2-RA'
test_mrna1.death_flagged = False
test_mrna1.cds = None
test_mrna2 = Mock()
test_mrna2.identifier = 'foo2-RB'
test_mrna2.death_flagged = False
test_mrna2.cds = Mock()
test_mrna2.cds.length = Mock(return_value=100)
test_mrna3 = Mock()
test_mrna3.identifier = 'foo3-RA'
test_mrna3.death_flagged = False
test_mrna3.cds = Mock()
test_mrna3.cds.length = Mock(return_value=110)
# Give the mock genes some mrnas
seq.genes[0].mrnas = [test_mrna0]
seq.genes[0].death_flagged = False
seq.genes[1].mrnas = [test_mrna1, test_mrna2]
seq.genes[1].death_flagged = False
seq.genes[2].mrnas = [test_mrna3]
seq.genes[2].death_flagged = False
# Apply the filter
cds_length.apply(seq)
self.assertEqual(len(seq.genes), 3)
self.assertEqual(seq.genes[0].mrnas, [test_mrna0])
self.assertEqual(seq.genes[1].mrnas, [test_mrna1, test_mrna2])
def test_min_exon_length_filter(self):
exon_length = MinExonLengthFilter(30)
# Create a mock sequence
seq = Sequence()
# Give the sequence some genes
seq.genes = [Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo1'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo2'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo3')]
# Give the mock mrnas some exon's
test_mrna0 = Mock()
test_mrna0.identifier = 'foo1-RA'
test_mrna0.death_flagged = False
test_mrna0.exon = Mock()
test_mrna0.get_shortest_exon = Mock(return_value=20)
test_mrna0.get_longest_exon = Mock(return_value=20)
test_mrna1 = Mock()
test_mrna1.identifier = 'foo2-RA'
test_mrna1.death_flagged = False
test_mrna1.exon = Mock()
test_mrna1.get_shortest_exon = Mock(return_value=30)
test_mrna1.get_longest_exon = Mock(return_value=30)
test_mrna2 = Mock()
test_mrna2.identifier = 'foo2-RB'
test_mrna2.death_flagged = False
test_mrna2.exon = None
test_mrna3 = Mock()
test_mrna3.identifier = 'foo3-RA'
test_mrna3.death_flagged = False
test_mrna3.exon = Mock()
test_mrna3.get_shortest_exon = Mock(return_value=40)
test_mrna3.get_longest_exon = Mock(return_value=40)
# Give the mock genes some mrnas
seq.genes[0].mrnas = [test_mrna0]
seq.genes[0].death_flagged = False
seq.genes[1].mrnas = [test_mrna1, test_mrna2]
seq.genes[1].death_flagged = False
seq.genes[2].mrnas = [test_mrna3]
seq.genes[2].death_flagged = False
# Apply the filter
exon_length.apply(seq)
self.assertEqual(len(seq.genes), 3)
self.assertEqual(seq.genes[1].mrnas, [test_mrna1, test_mrna2])
self.assertEqual(seq.genes[2].mrnas, [test_mrna3])
def test_max_exon_length_filter(self):
exon_length = MaxExonLengthFilter(30)
# Create a mock sequence
seq = Sequence()
# Give the sequence some genes
seq.genes = [Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo1'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo2'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo3')]
# Give the mock mrnas some exon's
test_mrna0 = Mock()
test_mrna0.identifier = 'foo1-RA'
test_mrna0.death_flagged = False
test_mrna0.exon = Mock()
test_mrna0.get_shortest_exon = Mock(return_value=20)
test_mrna0.get_longest_exon = Mock(return_value=20)
test_mrna1 = Mock()
test_mrna1.identifier = 'foo2-RA'
test_mrna1.death_flagged = False
test_mrna1.exon = Mock()
test_mrna1.get_shortest_exon = Mock(return_value=30)
test_mrna1.get_longest_exon = Mock(return_value=30)
test_mrna2 = Mock()
test_mrna2.identifier = 'foo2-RB'
test_mrna2.death_flagged = False
test_mrna2.exon = None
test_mrna3 = Mock()
test_mrna3.identifier = 'foo3-RA'
test_mrna3.death_flagged = False
test_mrna3.exon = Mock()
test_mrna3.get_shortest_exon = Mock(return_value=40)
test_mrna3.get_longest_exon = Mock(return_value=40)
# Give the mock genes some mrnas
seq.genes[0].mrnas = [test_mrna0]
seq.genes[0].death_flagged = False
seq.genes[1].mrnas = [test_mrna1, test_mrna2]
seq.genes[1].death_flagged = False
seq.genes[2].mrnas = [test_mrna3]
seq.genes[2].death_flagged = False
# Apply the filter
exon_length.apply(seq)
self.assertEqual(len(seq.genes), 3)
self.assertEqual(seq.genes[0].mrnas, [test_mrna0])
self.assertEqual(seq.genes[1].mrnas, [test_mrna1, test_mrna2])
def test_min_intron_length_filter(self):
intron_length = MinIntronLengthFilter(30)
# Create a mock sequence
seq = Sequence()
# Give the sequence some genes
seq.genes = [Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo1'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo2'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo3')]
# Give the mock mrnas some exon's
test_mrna0 = Mock()
test_mrna0.identifier = 'foo1-RA'
test_mrna0.death_flagged = False
test_mrna0.exon = Mock()
test_mrna0.get_shortest_intron = Mock(return_value=20)
test_mrna0.get_longest_intron = Mock(return_value=20)
test_mrna1 = Mock()
test_mrna1.identifier = 'foo2-RA'
test_mrna1.death_flagged = False
test_mrna1.exon = Mock()
test_mrna1.get_shortest_intron = Mock(return_value=30)
test_mrna1.get_longest_intron = Mock(return_value=30)
test_mrna2 = Mock()
test_mrna2.identifier = 'foo2-RB'
test_mrna2.death_flagged = False
test_mrna2.exon = None
test_mrna3 = Mock()
test_mrna3.identifier = 'foo3-RA'
test_mrna3.death_flagged = False
test_mrna3.exon = Mock()
test_mrna3.get_shortest_intron = Mock(return_value=40)
test_mrna3.get_longest_intron = Mock(return_value=40)
# Give the mock genes some mrnas
seq.genes[0].mrnas = [test_mrna0]
seq.genes[0].death_flagged = False
seq.genes[1].mrnas = [test_mrna1, test_mrna2]
seq.genes[1].death_flagged = False
seq.genes[2].mrnas = [test_mrna3]
seq.genes[2].death_flagged = False
# Apply the filter
intron_length.apply(seq)
self.assertEqual(len(seq.genes), 3)
self.assertEqual(seq.genes[1].mrnas, [test_mrna1, test_mrna2])
self.assertEqual(seq.genes[2].mrnas, [test_mrna3])
def test_min_intron_length_filter_doesnt_remove_single_exon_mrnas(self):
intron_length = MinIntronLengthFilter(30)
seq = Sequence()
gene, mrna, exon = Mock(), Mock(), Mock()
gene.death_flagged = False
seq.genes = [gene]
gene.mrnas = [mrna]
mrna.identifier = "badmrna"
mrna.get_shortest_intron.return_value = 0
mrna.death_flagged = False
mrna.exon = exon
self.assertEquals(1, len(seq.genes[0].mrnas))
intron_length.apply(seq)
# Filter shouldn't remove mRNA based on a shortest_intron = 0 return value
self.assertEquals(1, len(seq.genes[0].mrnas))
assert not gene.remove_mrna.called
def test_max_intron_length_filter(self):
intron_length = MaxIntronLengthFilter(30)
# Create a mock sequence
seq = Sequence()
# Give the sequence some genes
seq.genes = [Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo1'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo2'),
Gene('foo_seq', 'geib_labs', [1, 2], '+', 'foo3')]
# Give the mock mrnas some exon's
test_mrna0 = Mock()
test_mrna0.identifier = 'foo1-RA'
test_mrna0.death_flagged = False
test_mrna0.exon = Mock()
test_mrna0.get_shortest_intron = Mock(return_value=20)
test_mrna0.get_longest_intron = Mock(return_value=20)
test_mrna1 = Mock()
test_mrna1.identifier = 'foo2-RA'
test_mrna1.death_flagged = False
test_mrna1.exon = Mock()
test_mrna1.get_shortest_intron = Mock(return_value=30)
test_mrna1.get_longest_intron = Mock(return_value=30)
test_mrna2 = Mock()
test_mrna2.identifier = 'foo2-RB'
test_mrna2.death_flagged = False
test_mrna2.exon = None
test_mrna3 = Mock()
test_mrna3.identifier = 'foo3-RA'
test_mrna3.death_flagged = False
test_mrna3.exon = Mock()
test_mrna3.get_shortest_intron = Mock(return_value=40)
test_mrna3.get_longest_intron = Mock(return_value=40)
# Give the mock genes some mrnas
seq.genes[0].mrnas = [test_mrna0]
seq.genes[0].death_flagged = False
seq.genes[1].mrnas = [test_mrna1, test_mrna2]
seq.genes[1].death_flagged = False
seq.genes[2].mrnas = [test_mrna3]
seq.genes[2].death_flagged = False
# Apply the filter
intron_length.apply(seq)
self.assertEqual(len(seq.genes), 3)
self.assertEqual(seq.genes[0].mrnas, [test_mrna0])
self.assertEqual(seq.genes[1].mrnas, [test_mrna1, test_mrna2])
def test_min_gene_length_filter(self):
gene_length_range = MinGeneLengthFilter(30)
# Create a mock sequence
seq = Sequence()
test_gene0 = Mock(identifier='foo1')
test_gene1 = Mock(identifier='foo2')
test_gene2 = Mock(identifier='foo3')
test_gene0.death_flagged = False
test_gene1.death_flagged = False
test_gene2.death_flagged = False
test_gene0.length = Mock(return_value=20)
test_gene1.length = Mock(return_value=30)
test_gene2.length = Mock(return_value=40)
# Give the sequence some genes
seq.genes = [test_gene0, test_gene1, test_gene2]
# Apply the filter
gene_length_range.apply(seq)
self.assertEqual(seq.genes, [test_gene1, test_gene2])
def test_max_gene_length_filter(self):
gene_length_range = MaxGeneLengthFilter(30)
# Create a mock sequence
seq = Sequence()
test_gene0 = Mock(identifier='foo1')
test_gene1 = Mock(identifier='foo2')
test_gene2 = Mock(identifier='foo3')
test_gene0.death_flagged = False
test_gene1.death_flagged = False
test_gene2.death_flagged = False
test_gene0.length = Mock(return_value=20)
test_gene1.length = Mock(return_value=30)
test_gene2.length = Mock(return_value=40)
# Give the sequence some genes
seq.genes = [test_gene0, test_gene1, test_gene2]
# Apply the filter
gene_length_range.apply(seq)
self.assertEqual(seq.genes, [test_gene0, test_gene1])
def suite():
_suite = unittest.TestSuite()
_suite.addTest(unittest.makeSuite(TestFilters))
return _suite
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the artifact stages."""
from __future__ import print_function
import json
import os
import sys
import mock
from chromite.cbuildbot import cbuildbot_unittest
from chromite.cbuildbot import commands
from chromite.lib import constants
from chromite.lib import failures_lib
from chromite.cbuildbot import prebuilts
from chromite.lib import results_lib
from chromite.cbuildbot.stages import artifact_stages
from chromite.cbuildbot.stages import build_stages_unittest
from chromite.cbuildbot.stages import generic_stages_unittest
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import parallel_unittest
from chromite.lib import partial_mock
from chromite.lib import path_util
from chromite.lib.buildstore import FakeBuildStore
from chromite.cbuildbot.stages.generic_stages_unittest import patch
from chromite.cbuildbot.stages.generic_stages_unittest import patches
# pylint: disable=too-many-ancestors
class ArchiveStageTest(generic_stages_unittest.AbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Exercise ArchiveStage functionality."""
# pylint: disable=protected-access
RELEASE_TAG = ''
VERSION = '3333.1.0'
def _PatchDependencies(self):
"""Patch dependencies of ArchiveStage.PerformStage()."""
to_patch = [(parallel, 'RunParallelSteps'), (commands, 'PushImages'),
(commands, 'UploadArchivedFile')]
self.AutoPatch(to_patch)
def setUp(self):
self._PatchDependencies()
self._Prepare()
self.buildstore = FakeBuildStore()
# Our API here is not great when it comes to kwargs passing.
def _Prepare(self, bot_id=None, **kwargs): # pylint: disable=arguments-differ
extra_config = {'upload_symbols': True, 'push_image': True}
super(ArchiveStageTest, self)._Prepare(
bot_id, extra_config=extra_config, **kwargs)
def ConstructStage(self):
self._run.GetArchive().SetupArchivePath()
return artifact_stages.ArchiveStage(self._run, self.buildstore,
self._current_board)
def testArchive(self):
"""Simple did-it-run test."""
# TODO(davidjames): Test the individual archive steps as well.
self.RunStage()
# TODO(build): This test is not actually testing anything real. It confirms
# that PushImages is not called, but the mock for RunParallelSteps already
# prevents PushImages from being called, regardless of whether this is a
# trybot flow.
def testNoPushImagesForRemoteTrybot(self):
"""Test that remote trybot overrides work to disable push images."""
self._Prepare(cmd_args=[
'--remote-trybot', '-r', self.build_root, '--buildnumber=1234',
'eve-release'
])
self.RunStage()
# pylint: disable=no-member
self.assertEqual(commands.PushImages.call_count, 0)
def ConstructStageForArchiveStep(self):
"""Stage construction for archive steps."""
stage = self.ConstructStage()
self.PatchObject(stage._upload_queue, 'put', autospec=True)
self.PatchObject(path_util, 'ToChrootPath', return_value='', autospec=True)
return stage
class UploadPrebuiltsStageTest(
generic_stages_unittest.RunCommandAbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Tests for the UploadPrebuilts stage."""
cmd = 'upload_prebuilts'
RELEASE_TAG = ''
# Our API here is not great when it comes to kwargs passing.
def _Prepare(self, bot_id=None, **kwargs): # pylint: disable=arguments-differ
super(UploadPrebuiltsStageTest, self)._Prepare(bot_id, **kwargs)
self.cmd = os.path.join(self.build_root, constants.CHROMITE_BIN_SUBDIR,
'upload_prebuilts')
self._run.options.prebuilts = True
self.buildstore = FakeBuildStore()
def ConstructStage(self):
return artifact_stages.UploadPrebuiltsStage(self._run, self.buildstore,
self._run.config.boards[-1])
def _VerifyBoardMap(self,
bot_id,
count,
board_map,
public_args=None,
private_args=None):
"""Verify that the prebuilts are uploaded for the specified bot.
Args:
bot_id: Bot to upload prebuilts for.
count: Number of assert checks that should be performed.
board_map: Map from slave boards to whether the bot is public.
public_args: List of extra arguments for public boards.
private_args: List of extra arguments for private boards.
"""
self._Prepare(bot_id)
self.RunStage()
public_prefix = [self.cmd] + (public_args or [])
private_prefix = [self.cmd] + (private_args or [])
for board, public in board_map.items():
if public or public_args:
public_cmd = public_prefix + ['--slave-board', board]
self.assertCommandContains(public_cmd, expected=public)
count -= 1
private_cmd = private_prefix + ['--slave-board', board, '--private']
self.assertCommandContains(private_cmd, expected=not public)
count -= 1
if board_map:
self.assertCommandContains(
[self.cmd, '--set-version',
self._run.GetVersion()],)
count -= 1
self.assertEqual(
count, 0,
'Number of asserts performed does not match (%d remaining)' % count)
def testFullPrebuiltsUpload(self):
"""Test uploading of full builder prebuilts."""
self._VerifyBoardMap('amd64-generic-full', 0, {})
self.assertCommandContains([self.cmd, '--git-sync'])
def testIncorrectCount(self):
"""Test that _VerifyBoardMap asserts when the count is wrong."""
self.assertRaises(AssertionError, self._VerifyBoardMap,
'amd64-generic-full', 1, {})
class UploadDevInstallerPrebuiltsStageTest(
generic_stages_unittest.AbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Tests for the UploadDevInstallerPrebuilts stage."""
RELEASE_TAG = 'RT'
def setUp(self):
self.upload_mock = self.PatchObject(prebuilts,
'UploadDevInstallerPrebuilts')
self._Prepare()
# Our API here is not great when it comes to kwargs passing.
def _Prepare(self, bot_id=None, **kwargs): # pylint: disable=arguments-differ
super(UploadDevInstallerPrebuiltsStageTest, self)._Prepare(bot_id, **kwargs)
self._run.options.prebuilts = True
self._run.config['dev_installer_prebuilts'] = True
self._run.config['binhost_bucket'] = 'gs://testbucket'
self._run.config['binhost_key'] = 'dontcare'
self._run.config['binhost_base_url'] = 'https://dontcare/here'
self.buildstore = FakeBuildStore()
def ConstructStage(self):
return artifact_stages.DevInstallerPrebuiltsStage(
self._run, self.buildstore, self._current_board)
def testDevInstallerUpload(self):
"""Basic sanity test testing uploads of dev installer prebuilts."""
self.RunStage()
self.upload_mock.assert_called_with(
binhost_bucket=self._run.config.binhost_bucket,
binhost_key=self._run.config.binhost_key,
binhost_base_url=self._run.config.binhost_base_url,
buildroot=self.build_root,
board=self._current_board,
extra_args=mock.ANY)
class CPEExportStageTest(generic_stages_unittest.AbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Test CPEExportStage"""
def setUp(self):
self.CreateMockOverlay('amd64-generic')
self.StartPatcher(generic_stages_unittest.ArchivingStageMixinMock())
self.StartPatcher(parallel_unittest.ParallelMock())
self.rc_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
self.rc_mock.SetDefaultCmdResult(output='')
self.stage = None
self.buildstore = FakeBuildStore()
def ConstructStage(self):
"""Create a CPEExportStage instance for testing"""
self._run.GetArchive().SetupArchivePath()
return artifact_stages.CPEExportStage(self._run, self.buildstore,
self._current_board)
def assertBoardAttrEqual(self, attr, expected_value):
"""Assert the value of a board run |attr| against |expected_value|."""
value = self.stage.board_runattrs.GetParallel(attr)
self.assertEqual(expected_value, value)
def _TestPerformStage(self):
"""Run PerformStage for the stage."""
self._Prepare()
self._run.attrs.release_tag = self.VERSION
self.stage = self.ConstructStage()
self.stage.PerformStage()
def testCPEExport(self):
"""Test that CPEExport stage runs without syntax errors."""
self._TestPerformStage()
class DebugSymbolsStageTest(generic_stages_unittest.AbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Test DebugSymbolsStage"""
# pylint: disable=protected-access
def setUp(self):
self.CreateMockOverlay('amd64-generic')
self.StartPatcher(generic_stages_unittest.ArchivingStageMixinMock())
self.gen_mock = self.PatchObject(commands, 'GenerateBreakpadSymbols')
self.gen_android_mock = self.PatchObject(commands,
'GenerateAndroidBreakpadSymbols')
self.upload_mock = self.PatchObject(commands, 'UploadSymbols')
self.tar_mock = self.PatchObject(commands, 'GenerateDebugTarball')
self.rc_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
self.rc_mock.SetDefaultCmdResult(output='')
self.stage = None
# Our API here is not great when it comes to kwargs passing.
# pylint: disable=arguments-differ
def _Prepare(self, extra_config=None, **kwargs):
"""Prepare this stage for testing."""
if extra_config is None:
extra_config = {
'archive_build_debug': True,
'vm_tests': True,
'upload_symbols': True,
}
super(DebugSymbolsStageTest, self)._Prepare(
extra_config=extra_config, **kwargs)
self._run.attrs.release_tag = self.VERSION
self.buildstore = FakeBuildStore()
# pylint: enable=arguments-differ
def ConstructStage(self):
"""Create a DebugSymbolsStage instance for testing"""
self._run.GetArchive().SetupArchivePath()
return artifact_stages.DebugSymbolsStage(self._run, self.buildstore,
self._current_board)
def assertBoardAttrEqual(self, attr, expected_value):
"""Assert the value of a board run |attr| against |expected_value|."""
value = self.stage.board_runattrs.GetParallel(attr)
self.assertEqual(expected_value, value)
def _TestPerformStage(self,
extra_config=None,
create_android_symbols_archive=False):
"""Run PerformStage for the stage with the given extra config."""
self._Prepare(extra_config=extra_config)
self.tar_mock.side_effect = '/my/tar/ball'
self.stage = self.ConstructStage()
if create_android_symbols_archive:
symbols_file = os.path.join(self.stage.archive_path,
constants.ANDROID_SYMBOLS_FILE)
osutils.Touch(symbols_file)
try:
self.stage.PerformStage()
except Exception:
return self.stage._HandleStageException(sys.exc_info())
def testPerformStageWithSymbols(self):
"""Smoke test for an PerformStage when debugging is enabled"""
self._TestPerformStage()
self.assertEqual(self.gen_mock.call_count, 1)
self.assertEqual(self.gen_android_mock.call_count, 0)
self.assertEqual(self.upload_mock.call_count, 1)
self.assertEqual(self.tar_mock.call_count, 2)
self.assertBoardAttrEqual('breakpad_symbols_generated', True)
self.assertBoardAttrEqual('debug_tarball_generated', True)
def testPerformStageWithAndroidSymbols(self):
"""Smoke test for an PerformStage when Android symbols are available"""
self._TestPerformStage(create_android_symbols_archive=True)
self.assertEqual(self.gen_mock.call_count, 1)
self.assertEqual(self.gen_android_mock.call_count, 1)
self.assertEqual(self.upload_mock.call_count, 1)
self.assertEqual(self.tar_mock.call_count, 2)
self.assertBoardAttrEqual('breakpad_symbols_generated', True)
self.assertBoardAttrEqual('debug_tarball_generated', True)
def testPerformStageNoSymbols(self):
"""Smoke test for an PerformStage when debugging is disabled"""
extra_config = {
'archive_build_debug': False,
'vm_tests': False,
'upload_symbols': False,
}
result = self._TestPerformStage(extra_config)
self.assertIsNone(result)
self.assertEqual(self.gen_mock.call_count, 1)
self.assertEqual(self.gen_android_mock.call_count, 0)
self.assertEqual(self.upload_mock.call_count, 0)
self.assertEqual(self.tar_mock.call_count, 2)
self.assertBoardAttrEqual('breakpad_symbols_generated', True)
self.assertBoardAttrEqual('debug_tarball_generated', True)
def testGenerateCrashStillNotifies(self):
"""Crashes in symbol generation should still notify external events."""
class TestError(Exception):
"""Unique test exception"""
self.gen_mock.side_effect = TestError('mew')
result = self._TestPerformStage()
self.assertIsInstance(result[0], failures_lib.InfrastructureFailure)
self.assertEqual(self.gen_mock.call_count, 1)
self.assertEqual(self.gen_android_mock.call_count, 0)
self.assertEqual(self.upload_mock.call_count, 0)
self.assertEqual(self.tar_mock.call_count, 0)
self.assertBoardAttrEqual('breakpad_symbols_generated', False)
self.assertBoardAttrEqual('debug_tarball_generated', False)
def testUploadCrashStillNotifies(self):
"""Crashes in symbol upload should still notify external events."""
self.upload_mock.side_effect = failures_lib.BuildScriptFailure(
cros_build_lib.RunCommandError('mew'), 'mew')
result = self._TestPerformStage()
self.assertIs(result[0], results_lib.Results.FORGIVEN)
self.assertBoardAttrEqual('breakpad_symbols_generated', True)
self.assertBoardAttrEqual('debug_tarball_generated', True)
def testUploadCrashUploadsList(self):
"""A crash in symbol upload should still post the failed list file."""
self.upload_mock.side_effect = failures_lib.BuildScriptFailure(
cros_build_lib.RunCommandError('mew'), 'mew')
self._Prepare()
stage = self.ConstructStage()
with mock.patch.object(os.path, 'exists') as mock_exists, \
mock.patch.object(artifact_stages.DebugSymbolsStage,
'UploadArtifact') as mock_upload:
mock_exists.return_value = True
self.assertRaises(artifact_stages.DebugSymbolsUploadException,
stage.UploadSymbols, stage._build_root,
stage._current_board)
self.assertEqual(mock_exists.call_count, 1)
self.assertEqual(mock_upload.call_count, 1)
class UploadTestArtifactsStageMock(
generic_stages_unittest.ArchivingStageMixinMock):
"""Partial mock for BuildImageStage."""
TARGET = 'chromite.cbuildbot.stages.artifact_stages.UploadTestArtifactsStage'
ATTRS = (
generic_stages_unittest.ArchivingStageMixinMock.ATTRS +
('BuildAutotestTarballs', 'BuildTastTarball',
'BuildGuestImagesTarball'))
def BuildAutotestTarballs(self, *args, **kwargs):
with patches(
patch(commands, 'BuildTarball'),
patch(commands, 'FindFilesWithPattern', return_value=['foo.txt'])):
self.backup['BuildAutotestTarballs'](*args, **kwargs)
def BuildTastTarball(self, *args, **kwargs):
with patch(commands, 'BuildTarball'):
self.backup['BuildTastTarball'](*args, **kwargs)
def BuildGuestImagesTarball(self, *args, **kwargs):
self.backup['BuildGuestImagesTarball'](*args, **kwargs)
class UploadTestArtifactsStageTest(build_stages_unittest.AllConfigsTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Tests UploadTestArtifactsStage."""
def setUp(self):
self._release_tag = None
osutils.SafeMakedirs(os.path.join(self.build_root, 'chroot', 'tmp'))
self.StartPatcher(UploadTestArtifactsStageMock())
self.buildstore = FakeBuildStore()
def ConstructStage(self):
return artifact_stages.UploadTestArtifactsStage(self._run, self.buildstore,
self._current_board)
def RunTestsWithBotId(self, bot_id, options_tests=True):
"""Test with the config for the specified bot_id."""
self._Prepare(bot_id)
self._run.options.tests = options_tests
self._run.attrs.release_tag = '0.0.1'
# Simulate images being ready.
board_runattrs = self._run.GetBoardRunAttrs(self._current_board)
board_runattrs.SetParallel('images_generated', True)
generate_quick_provision_payloads_mock = self.PatchObject(
commands, 'GenerateQuickProvisionPayloads')
generate_update_payloads_mock = self.PatchObject(commands,
'GeneratePayloads')
with parallel_unittest.ParallelMock():
with self.RunStageWithConfig():
if (self._run.config.upload_hw_test_artifacts and
self._run.config.images):
self.assertNotEqual(generate_update_payloads_mock.call_count, 0)
self.assertNotEqual(generate_quick_provision_payloads_mock.call_count,
0)
else:
self.assertEqual(generate_update_payloads_mock.call_count, 0)
self.assertEqual(generate_quick_provision_payloads_mock.call_count, 0)
def testAllConfigs(self):
"""Test all major configurations"""
self.RunAllConfigs(self.RunTestsWithBotId)
# TODO: Delete ArchivingMock once ArchivingStage is deprecated.
class ArchivingMock(partial_mock.PartialMock):
"""Partial mock for ArchivingStage."""
TARGET = 'chromite.cbuildbot.stages.artifact_stages.ArchivingStage'
ATTRS = ('UploadArtifact',)
def UploadArtifact(self, *args, **kwargs):
with patch(commands, 'ArchiveFile', return_value='foo.txt'):
with patch(commands, 'UploadArchivedFile'):
self.backup['UploadArtifact'](*args, **kwargs)
# TODO: Delete ArchivingStageTest once ArchivingStage is deprecated.
class ArchivingStageTest(generic_stages_unittest.AbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Excerise ArchivingStage functionality."""
RELEASE_TAG = ''
def setUp(self):
self.StartPatcher(ArchivingMock())
self._Prepare()
self.buildstore = FakeBuildStore()
def ConstructStage(self):
self._run.GetArchive().SetupArchivePath()
archive_stage = artifact_stages.ArchiveStage(self._run, self.buildstore,
self._current_board)
return artifact_stages.ArchivingStage(self._run, self.buildstore,
self._current_board, archive_stage)
class GenerateSysrootStageTest(generic_stages_unittest.AbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Exercise GenerateSysrootStage functionality."""
RELEASE_TAG = ''
# pylint: disable=protected-access
def setUp(self):
self._Prepare()
self.rc_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
self.rc_mock.SetDefaultCmdResult()
self.buildstore = FakeBuildStore()
def ConstructStage(self):
self._run.GetArchive().SetupArchivePath()
return artifact_stages.GenerateSysrootStage(self._run, self.buildstore,
self._current_board)
def testGenerateSysroot(self):
"""Test that the sysroot generation was called correctly."""
stage = self.ConstructStage()
self.PatchObject(path_util, 'ToChrootPath', return_value='', autospec=True)
self.PatchObject(stage._upload_queue, 'put', autospec=True)
stage._GenerateSysroot()
sysroot_tarball = 'sysroot_%s.tar.xz' % ('virtual_target-os')
stage._upload_queue.put.assert_called_with([sysroot_tarball])
class CollectPGOProfilesStageTest(generic_stages_unittest.AbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase,
cros_test_lib.RunCommandTestCase):
"""Exercise CollectPGOProfilesStage functionality."""
RELEASE_TAG = ''
_VALID_CLANG_VERSION_SHA = '4e8231b5cf0f5f62c7a51a857e29f5be5cb55734'
_VALID_CLANG_VERSION_STRING = (
'Chromium OS 10.0_pre377782_p20200113-r1 clang version 10.0.0 '
'(/var/cache/chromeos-cache/distfiles/host/egit-src/llvm-project '
'4e8231b5cf0f5f62c7a51a857e29f5be5cb55734)\n'
'Target: x86_64-pc-linux-gnu\n'
'Thread model: posix\n'
'InstalledDir: /usr/bin\n'
)
# pylint: disable=protected-access
def setUp(self):
self._Prepare()
self.buildstore = FakeBuildStore()
def ConstructStage(self):
self._run.GetArchive().SetupArchivePath()
return artifact_stages.CollectPGOProfilesStage(self._run, self.buildstore,
self._current_board)
def testParseLLVMHeadSHA(self):
stage = self.ConstructStage()
actual_sha = stage._ParseLLVMHeadSHA(self._VALID_CLANG_VERSION_STRING)
self.assertEqual(actual_sha, self._VALID_CLANG_VERSION_SHA)
def testCollectLLVMMetadataRaisesOnAnInvalidVersionString(self):
stage = self.ConstructStage()
self.rc.AddCmdResult(partial_mock.In('equery'),
stdout=' - + llvm_pgo_generate :\n')
valid_version_lines = self._VALID_CLANG_VERSION_STRING.splitlines()
valid_version_lines[0] = 'clang version 8.0.1'
self.rc.AddCmdResult(partial_mock.In('clang'),
stdout='\n'.join(valid_version_lines))
with self.assertRaises(ValueError) as raised:
stage._CollectLLVMMetadata()
self.assertIn('version string', str(raised.exception))
def testCollectLLVMMetadataRaisesIfClangIsntPGOGeneratedEmpty(self):
stage = self.ConstructStage()
self.rc.AddCmdResult(partial_mock.In('equery'), stdout='\n')
self.rc.AddCmdResult(partial_mock.In('clang'),
stdout=self._VALID_CLANG_VERSION_STRING)
with self.assertRaises(ValueError) as raised:
stage._CollectLLVMMetadata()
self.assertIn('pgo_generate flag', str(raised.exception))
def testCollectLLVMMetadataRaisesIfClangIsntPGOGenerated(self):
stage = self.ConstructStage()
self.rc.AddCmdResult(partial_mock.In('equery'),
stdout=' - - llvm_pgo_generate :\n')
self.rc.AddCmdResult(partial_mock.In('clang'),
stdout=self._VALID_CLANG_VERSION_STRING)
with self.assertRaises(ValueError) as raised:
stage._CollectLLVMMetadata()
self.assertIn('pgo_generate flag', str(raised.exception))
def testCollectLLVMMetadataFunctionsInASimpleCase(self):
stage = self.ConstructStage()
self.rc.AddCmdResult(partial_mock.In('equery'),
stdout=' - + llvm_pgo_generate :\n')
self.rc.AddCmdResult(partial_mock.In('clang'),
stdout=self._VALID_CLANG_VERSION_STRING)
upload_queue_put = self.PatchObject(stage._upload_queue, 'put')
stage._CollectLLVMMetadata()
upload_queue_put.assert_called_once()
written_file = os.path.join(stage.archive_path, 'llvm_metadata.json')
given_metadata = json.loads(osutils.ReadFile(written_file))
expected_metadata = {
'head_sha': self._VALID_CLANG_VERSION_SHA,
}
self.assertEqual(given_metadata, expected_metadata)
upload_queue_put.assert_called_with([written_file])
def testCollectPGOProfiles(self):
"""Test that the sysroot generation was called correctly."""
stage = self.ConstructStage()
# No profiles directory
with self.assertRaises(Exception) as msg:
stage._CollectPGOProfiles()
self.assertEqual('No profile directories found.', str(msg.exception))
# Create profiles directory
cov_path = 'build/%s/build/coverage_data' % stage._current_board
out_cov_path = os.path.abspath(
os.path.join(self.build_root, 'chroot', cov_path))
os.makedirs(os.path.join(out_cov_path, 'raw_profiles'))
# No profraw files
with self.assertRaises(Exception) as msg:
stage._CollectPGOProfiles()
self.assertEqual('No profraw files found in profiles directory.',
str(msg.exception))
# Create profraw file
profraw = os.path.join(out_cov_path, 'raw_profiles', 'a.profraw')
with open(profraw, 'a') as f:
f.write('123')
# Check uploading tarball
self.PatchObject(stage._upload_queue, 'put', autospec=True)
stage._CollectPGOProfiles()
llvm_profdata = path_util.ToChrootPath(
os.path.join(stage.archive_path, 'llvm.profdata'))
profraw_list = path_util.ToChrootPath(
os.path.join(stage.archive_path, 'profraw_list'))
self.assertEqual(['llvm-profdata', 'merge',
'-output', llvm_profdata,
'-f', profraw_list],
stage._merge_cmd)
tarball = stage.PROFDATA_TAR
stage._upload_queue.put.assert_called_with([tarball])
| |
import json
from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from ..models import Temperature, HeartBeats, Breathing, BabyCrib, Movement, Noise
from ..serializers import TemperatureSerializer, HeartBeatsSerializer, BreathingSerializer, BabyCribSerializer, MovementSerializer, NoiseSerializer
client = Client()
class GetCurrentTemperatureTest(TestCase):
""" Test class for GET current temperature from API """
def setUp(self):
Temperature.objects.create(temperature=35)
def test_get_current_temperature(self):
response = client.get(reverse('temperature'))
temperature = Temperature.objects.order_by('datetime').last()
serializer = TemperatureSerializer(temperature)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewTemperatureTest(TestCase):
""" Test class for saving a new temperature registry """
def setUp(self):
self.valid_payload = {
'temperature': 27.2
}
self.invalid_payload = {
'temperature': ''
}
def test_creat_valid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class GetCurrentHeartBeatsTest(TestCase):
""" Test class for GET current heartbeats from API """
def setUp(self):
HeartBeats.objects.create(beats=70)
def test_get_current_heartbeats(self):
response = client.get(reverse('heartbeats'))
heartbeats = HeartBeats.objects.order_by('datetime').last()
serializer = HeartBeatsSerializer(heartbeats)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewHeartBeatsTest(TestCase):
"""" Test class for saving a new heartbeats registry """
def setUp(self):
self.valid_payload = {
'beats': 80
}
self.invalid_payload = {
'beats': ''
}
def test_create_valid_heartbeats(self):
response = client.post(
reverse('heartbeats'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_heartbeats(self):
response = client.post(
reverse('heartbeats'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class GetCurrentBreathingTest(TestCase):
""" Test class for GET current breathing from API """
def setUp(self):
Breathing.objects.create(status=1)
def test_get_current_breathing(self):
response = client.get(reverse('breathing'))
breathing = Breathing.objects.order_by('datetime').last()
serializer = BreathingSerializer(breathing)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewBreathingTest(TestCase):
"""" Test class for saving a new breathing registry """
def setUp(self):
self.valid_payload = {
'status': 'breathing'
}
self.invalid_payload = {
'status': 'crying'
}
def test_create_valid_breathing(self):
response = client.post(
reverse('breathing'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_breathing(self):
response = client.post(
reverse('breathing'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class ControlStreamingTest(TestCase):
""" Test class for starting or stoping streaming """
def setUp(self):
self.valid_payload = {
'action': 'start'
}
self.invalid_payload = {
'action': 'continue'
}
def test_valid_streaming_control(self):
response = client.post(
reverse('streaming'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_invalid_streaming_control(self):
response = client.post(
reverse('streaming'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class GetBabyCribStatusTest(TestCase):
""" Test class to check babycrib status """
def setUp(self):
BabyCrib.objects.create(status='vibration', duration=100)
def test_get_babycrib_status(self):
response = client.get(reverse('movement'))
babycrib = BabyCrib.objects.order_by('datetime').last()
serializer = BabyCribSerializer(babycrib)
self.assertEqual(response.data['movement'], serializer.data['status'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_still_moving_registry(self):
response = client.get(reverse('movement'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['is_moving'], True)
self.assertTrue(response.data['remaining_time'] > 0)
def test_get_empty_babycrib_registry(self):
BabyCrib.objects.all().delete()
response = client.get(reverse('movement'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['is_moving'], False)
class ControlBabyCribMovementTest(TestCase):
""" Test class for controling babycrib movementation """
def setUp(self):
self.valid_payload = {
'status': 'resting',
'duration': '0'
}
self.invalid_payload = {
'status': 'stop',
'duration': 100
}
def test_set_invalid_movement(self):
response = client.post(
reverse('movement'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_raise_file_not_found(self):
response = client.post(
reverse('movement'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
class GetCurrentNoiseStatusTest(TestCase):
""" Test class for GET current crying status from API """
def setUp(self):
Noise.objects.create(is_crying=True)
def test_get_movement_status(self):
response = client.get(reverse('noise'))
noise = Noise.objects.order_by('datetime').last()
serializer = NoiseSerializer(noise)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewNoiseTest(TestCase):
"""" Test class for saving a new noise registry """
def setUp(self):
self.valid_payload = {
'is_crying': True
}
self.invalid_payload = {
'is_crying': 'crying'
}
def test_create_valid_noise(self):
response = client.post(
reverse('noise'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_noise(self):
response = client.post(
reverse('noise'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| |
chown_data = {
'imports' : [
(0x220fd80, 8, 9, '*.endgrent'),
(0x220fd88, 8, 9, '*.__uflow'),
(0x220fd90, 8, 9, '*.getenv'),
(0x220fd98, 8, 9, '*.abort'),
(0x220fda0, 8, 9, '*.__errno_location'),
(0x220fda8, 8, 9, '*.strncmp'),
(0x220fdb0, 8, 9, '*._exit'),
(0x220fdb8, 8, 9, '*.strcpy'),
(0x220fdc0, 8, 9, '*.__fpending'),
(0x220fdc8, 8, 9, '*.qsort'),
(0x220fdd0, 8, 9, '*.fcntl'),
(0x220fdd8, 8, 9, '*.textdomain'),
(0x220fde0, 8, 9, '*.fclose'),
(0x220fde8, 8, 9, '*.getpwuid'),
(0x220fdf0, 8, 9, '*.bindtextdomain'),
(0x220fdf8, 8, 9, '*.stpcpy'),
(0x220fe00, 8, 9, '*.dcgettext'),
(0x220fe08, 8, 9, '*.__ctype_get_mb_cur_max'),
(0x220fe10, 8, 9, '*.strlen'),
(0x220fe18, 8, 9, '*.__lxstat'),
(0x220fe20, 8, 9, '*.openat'),
(0x220fe28, 8, 9, '*.__stack_chk_fail'),
(0x220fe30, 8, 9, '*.getopt_long'),
(0x220fe38, 8, 9, '*.mbrtowc'),
(0x220fe40, 8, 9, '*.strchr'),
(0x220fe48, 8, 9, '*.getgrgid'),
(0x220fe50, 8, 9, '*.__fxstatat'),
(0x220fe58, 8, 9, '*.strrchr'),
(0x220fe60, 8, 9, '*.lseek'),
(0x220fe68, 8, 9, '*.__assert_fail'),
(0x220fe70, 8, 9, '*.memset'),
(0x220fe78, 8, 9, '*.fscanf'),
(0x220fe80, 8, 9, '*.close'),
(0x220fe88, 8, 9, '*.__openat_2'),
(0x220fe90, 8, 9, '*.closedir'),
(0x220fe98, 8, 9, '*.memcmp'),
(0x220fea0, 8, 9, '*.fputs_unlocked'),
(0x220fea8, 8, 9, '*.calloc'),
(0x220feb0, 8, 9, '*.strcmp'),
(0x220feb8, 8, 9, '*.dirfd'),
(0x220fec0, 8, 9, '*.getpwnam'),
(0x220fec8, 8, 9, '*.memcpy'),
(0x220fed0, 8, 9, '*.getgrnam'),
(0x220fed8, 8, 9, '*.fileno'),
(0x220fee0, 8, 9, '*.__xstat'),
(0x220fee8, 8, 9, '*.readdir'),
(0x220fef0, 8, 9, '*.malloc'),
(0x220fef8, 8, 9, '*.fflush'),
(0x220ff00, 8, 9, '*.nl_langinfo'),
(0x220ff08, 8, 9, '*.ungetc'),
(0x220ff10, 8, 9, '*.__fxstat'),
(0x220ff18, 8, 9, '*.endpwent'),
(0x220ff20, 8, 9, '*.__freading'),
(0x220ff28, 8, 9, '*.fchdir'),
(0x220ff30, 8, 9, '*.realloc'),
(0x220ff38, 8, 9, '*.fdopen'),
(0x220ff40, 8, 9, '*.setlocale'),
(0x220ff48, 8, 9, '*.__printf_chk'),
(0x220ff50, 8, 9, '*.memmove'),
(0x220ff58, 8, 9, '*.error'),
(0x220ff60, 8, 9, '*.open'),
(0x220ff68, 8, 9, '*.fseeko'),
(0x220ff70, 8, 9, '*.fchown'),
(0x220ff78, 8, 9, '*.fdopendir'),
(0x220ff80, 8, 9, '*.strtoul'),
(0x220ff88, 8, 9, '*.fstatfs'),
(0x220ff90, 8, 9, '*.__cxa_atexit'),
(0x220ff98, 8, 9, '*.fchownat'),
(0x220ffa0, 8, 9, '*.exit'),
(0x220ffa8, 8, 9, '*.fwrite'),
(0x220ffb0, 8, 9, '*.__fprintf_chk'),
(0x220ffb8, 8, 9, '*.mbsinit'),
(0x220ffc0, 8, 9, '*.iswprint'),
(0x220ffc8, 8, 9, '*.__ctype_b_loc'),
(0x220ffd0, 8, 9, '*.free'),
(0x220ffd8, 8, 9, '*._ITM_deregisterTMCloneTable'),
(0x220ffe0, 8, 9, '*.__libc_start_main'),
(0x220ffe8, 8, 9, '*.__gmon_start__'),
(0x220fff0, 8, 9, '*._ITM_registerTMCloneTable'),
(0x220fff8, 8, 9, '*.__cxa_finalize'),
],
'exports' : [
(0x20027a0, 0, '__entry', 'chown'),
(0x2008420, 0, 'fts_open', 'chown'),
(0x20087c0, 0, 'fts_close', 'chown'),
(0x2008950, 0, 'fts_read', 'chown'),
(0x20090d0, 0, 'fts_set', 'chown'),
(0x2009100, 0, 'fts_children', 'chown'),
(0x200b0a0, 1, '_IO_stdin_used', 'chown'),
(0x200c5e0, 1, 'quoting_style_vals', 'chown'),
(0x200c960, 1, 'version_etc_copyright', 'chown'),
(0x220fb20, 1, 'quoting_style_args', 'chown'),
(0x2210010, 1, 'Version', 'chown'),
(0x2210018, 1, 'exit_failure', 'chown'),
(0x2210020, 1, 'quote_quoting_options', 'chown'),
(0x2210080, 1, '__progname', 'chown'),
(0x2210080, 1, 'program_invocation_short_name', 'chown'),
(0x2210088, 1, 'stdout', 'chown'),
(0x2210090, 1, 'optind', 'chown'),
(0x22100a0, 1, 'optarg', 'chown'),
(0x22100a8, 1, '__progname_full', 'chown'),
(0x22100a8, 1, 'program_invocation_name', 'chown'),
(0x22100c0, 1, 'stderr', 'chown'),
(0x22100f8, 1, 'program_name', 'chown'),
],
'relocs' : [
('chown', 0x20f950, 2, 0x28a0),
('chown', 0x20f958, 2, 0x2860),
('chown', 0x20f960, 2, 0xb19a),
('chown', 0x20f980, 2, 0xb1a4),
('chown', 0x20f9a0, 2, 0xb1b4),
('chown', 0x20f9c0, 2, 0xb1ac),
('chown', 0x20f9e0, 2, 0xb1b1),
('chown', 0x20fa00, 2, 0xb1c0),
('chown', 0x20fa20, 2, 0xb1c3),
('chown', 0x20fa40, 2, 0xb1d1),
('chown', 0x20fa60, 2, 0xb1d7),
('chown', 0x20fa80, 2, 0xb1b6),
('chown', 0x20faa0, 2, 0xb1de),
('chown', 0x20fac0, 2, 0xb1e6),
('chown', 0x20fae0, 2, 0xb1eb),
('chown', 0x20fb20, 2, 0xc141),
('chown', 0x20fb28, 2, 0xc149),
('chown', 0x20fb30, 2, 0xc14f),
('chown', 0x20fb38, 2, 0xc15c),
('chown', 0x20fb40, 2, 0xc169),
('chown', 0x20fb48, 2, 0xcb40),
('chown', 0x20fb50, 2, 0xc17d),
('chown', 0x20fb58, 2, 0xc162),
('chown', 0x20fb60, 2, 0xb13d),
('chown', 0x20fb68, 2, 0xc185),
('chown', 0x210008, 2, 0x210008),
('chown', 0x210010, 2, 0xc0d4),
('chown', 0x210060, 2, 0x210070),
('chown', 0x210078, 2, 0x210100),
],
'names' : [
(0x2001d38, 'chown.init_function'),
(0x2001d50, 'chown.LazyLoaderTrampoline'),
(0x2001d60, 'chown.plt_endgrent'),
(0x2001d70, 'chown.plt___uflow'),
(0x2001d80, 'chown.plt_getenv'),
(0x2001d90, 'chown.plt_abort'),
(0x2001da0, 'chown.plt___errno_location'),
(0x2001db0, 'chown.plt_strncmp'),
(0x2001dc0, 'chown.plt__exit'),
(0x2001dd0, 'chown.plt_strcpy'),
(0x2001de0, 'chown.plt___fpending'),
(0x2001df0, 'chown.plt_qsort'),
(0x2001e00, 'chown.plt_fcntl'),
(0x2001e10, 'chown.plt_textdomain'),
(0x2001e20, 'chown.plt_fclose'),
(0x2001e30, 'chown.plt_getpwuid'),
(0x2001e40, 'chown.plt_bindtextdomain'),
(0x2001e50, 'chown.plt_stpcpy'),
(0x2001e60, 'chown.plt_dcgettext'),
(0x2001e70, 'chown.plt___ctype_get_mb_cur_max'),
(0x2001e80, 'chown.plt_strlen'),
(0x2001e90, 'chown.plt___lxstat'),
(0x2001ea0, 'chown.plt_openat'),
(0x2001eb0, 'chown.plt___stack_chk_fail'),
(0x2001ec0, 'chown.plt_getopt_long'),
(0x2001ed0, 'chown.plt_mbrtowc'),
(0x2001ee0, 'chown.plt_strchr'),
(0x2001ef0, 'chown.plt_getgrgid'),
(0x2001f00, 'chown.plt___fxstatat'),
(0x2001f10, 'chown.plt_strrchr'),
(0x2001f20, 'chown.plt_lseek'),
(0x2001f30, 'chown.plt___assert_fail'),
(0x2001f40, 'chown.plt_memset'),
(0x2001f50, 'chown.plt_fscanf'),
(0x2001f60, 'chown.plt_close'),
(0x2001f70, 'chown.plt___openat_2'),
(0x2001f80, 'chown.plt_closedir'),
(0x2001f90, 'chown.plt_memcmp'),
(0x2001fa0, 'chown.plt_fputs_unlocked'),
(0x2001fb0, 'chown.plt_calloc'),
(0x2001fc0, 'chown.plt_strcmp'),
(0x2001fd0, 'chown.plt_dirfd'),
(0x2001fe0, 'chown.plt_getpwnam'),
(0x2001ff0, 'chown.plt_memcpy'),
(0x2002000, 'chown.plt_getgrnam'),
(0x2002010, 'chown.plt_fileno'),
(0x2002020, 'chown.plt___xstat'),
(0x2002030, 'chown.plt_readdir'),
(0x2002040, 'chown.plt_malloc'),
(0x2002050, 'chown.plt_fflush'),
(0x2002060, 'chown.plt_nl_langinfo'),
(0x2002070, 'chown.plt_ungetc'),
(0x2002080, 'chown.plt___fxstat'),
(0x2002090, 'chown.plt_endpwent'),
(0x20020a0, 'chown.plt___freading'),
(0x20020b0, 'chown.plt_fchdir'),
(0x20020c0, 'chown.plt_realloc'),
(0x20020d0, 'chown.plt_fdopen'),
(0x20020e0, 'chown.plt_setlocale'),
(0x20020f0, 'chown.plt___printf_chk'),
(0x2002100, 'chown.plt_memmove'),
(0x2002110, 'chown.plt_error'),
(0x2002120, 'chown.plt_open'),
(0x2002130, 'chown.plt_fseeko'),
(0x2002140, 'chown.plt_fchown'),
(0x2002150, 'chown.plt_fdopendir'),
(0x2002160, 'chown.plt_strtoul'),
(0x2002170, 'chown.plt_fstatfs'),
(0x2002180, 'chown.plt___cxa_atexit'),
(0x2002190, 'chown.plt_fchownat'),
(0x20021a0, 'chown.plt_exit'),
(0x20021b0, 'chown.plt_fwrite'),
(0x20021c0, 'chown.plt___fprintf_chk'),
(0x20021d0, 'chown.plt_mbsinit'),
(0x20021e0, 'chown.plt_iswprint'),
(0x20021f0, 'chown.plt___ctype_b_loc'),
(0x2002200, 'chown.plt_free'),
(0x2002208, 'chown.plt___cxa_finalize'),
(0x20027a0, 'chown.__entry'),
(0x2002860, 'chown.fini_function_0'),
(0x20028a0, 'chown.init_function_0'),
(0x2008420, 'chown.fts_open'),
(0x20087c0, 'chown.fts_close'),
(0x2008950, 'chown.fts_read'),
(0x20090d0, 'chown.fts_set'),
(0x2009100, 'chown.fts_children'),
(0x200b08c, 'chown.fini_function'),
(0x200b0a0, 'chown._IO_stdin_used'),
(0x200c5e0, 'chown.quoting_style_vals'),
(0x200c960, 'chown.version_etc_copyright'),
(0x220f950, 'chown.ptr_init_function_0_0220f950'),
(0x220f958, 'chown.ptr_fini_function_0_0220f958'),
(0x220fb20, 'chown.quoting_style_args'),
(0x220fd80, '*.endgrent_0220fd80'),
(0x220fd88, '*.__uflow_0220fd88'),
(0x220fd90, '*.getenv_0220fd90'),
(0x220fd98, '*.abort_0220fd98'),
(0x220fda0, '*.__errno_location_0220fda0'),
(0x220fda8, '*.strncmp_0220fda8'),
(0x220fdb0, '*._exit_0220fdb0'),
(0x220fdb8, '*.strcpy_0220fdb8'),
(0x220fdc0, '*.__fpending_0220fdc0'),
(0x220fdc8, '*.qsort_0220fdc8'),
(0x220fdd0, '*.fcntl_0220fdd0'),
(0x220fdd8, '*.textdomain_0220fdd8'),
(0x220fde0, '*.fclose_0220fde0'),
(0x220fde8, '*.getpwuid_0220fde8'),
(0x220fdf0, '*.bindtextdomain_0220fdf0'),
(0x220fdf8, '*.stpcpy_0220fdf8'),
(0x220fe00, '*.dcgettext_0220fe00'),
(0x220fe08, '*.__ctype_get_mb_cur_max_0220fe08'),
(0x220fe10, '*.strlen_0220fe10'),
(0x220fe18, '*.__lxstat_0220fe18'),
(0x220fe20, '*.openat_0220fe20'),
(0x220fe28, '*.__stack_chk_fail_0220fe28'),
(0x220fe30, '*.getopt_long_0220fe30'),
(0x220fe38, '*.mbrtowc_0220fe38'),
(0x220fe40, '*.strchr_0220fe40'),
(0x220fe48, '*.getgrgid_0220fe48'),
(0x220fe50, '*.__fxstatat_0220fe50'),
(0x220fe58, '*.strrchr_0220fe58'),
(0x220fe60, '*.lseek_0220fe60'),
(0x220fe68, '*.__assert_fail_0220fe68'),
(0x220fe70, '*.memset_0220fe70'),
(0x220fe78, '*.fscanf_0220fe78'),
(0x220fe80, '*.close_0220fe80'),
(0x220fe88, '*.__openat_2_0220fe88'),
(0x220fe90, '*.closedir_0220fe90'),
(0x220fe98, '*.memcmp_0220fe98'),
(0x220fea0, '*.fputs_unlocked_0220fea0'),
(0x220fea8, '*.calloc_0220fea8'),
(0x220feb0, '*.strcmp_0220feb0'),
(0x220feb8, '*.dirfd_0220feb8'),
(0x220fec0, '*.getpwnam_0220fec0'),
(0x220fec8, '*.memcpy_0220fec8'),
(0x220fed0, '*.getgrnam_0220fed0'),
(0x220fed8, '*.fileno_0220fed8'),
(0x220fee0, '*.__xstat_0220fee0'),
(0x220fee8, '*.readdir_0220fee8'),
(0x220fef0, '*.malloc_0220fef0'),
(0x220fef8, '*.fflush_0220fef8'),
(0x220ff00, '*.nl_langinfo_0220ff00'),
(0x220ff08, '*.ungetc_0220ff08'),
(0x220ff10, '*.__fxstat_0220ff10'),
(0x220ff18, '*.endpwent_0220ff18'),
(0x220ff20, '*.__freading_0220ff20'),
(0x220ff28, '*.fchdir_0220ff28'),
(0x220ff30, '*.realloc_0220ff30'),
(0x220ff38, '*.fdopen_0220ff38'),
(0x220ff40, '*.setlocale_0220ff40'),
(0x220ff48, '*.__printf_chk_0220ff48'),
(0x220ff50, '*.memmove_0220ff50'),
(0x220ff58, '*.error_0220ff58'),
(0x220ff60, '*.open_0220ff60'),
(0x220ff68, '*.fseeko_0220ff68'),
(0x220ff70, '*.fchown_0220ff70'),
(0x220ff78, '*.fdopendir_0220ff78'),
(0x220ff80, '*.strtoul_0220ff80'),
(0x220ff88, '*.fstatfs_0220ff88'),
(0x220ff90, '*.__cxa_atexit_0220ff90'),
(0x220ff98, '*.fchownat_0220ff98'),
(0x220ffa0, '*.exit_0220ffa0'),
(0x220ffa8, '*.fwrite_0220ffa8'),
(0x220ffb0, '*.__fprintf_chk_0220ffb0'),
(0x220ffb8, '*.mbsinit_0220ffb8'),
(0x220ffc0, '*.iswprint_0220ffc0'),
(0x220ffc8, '*.__ctype_b_loc_0220ffc8'),
(0x220ffd0, '*.free_0220ffd0'),
(0x220ffd8, '*._ITM_deregisterTMCloneTable_0220ffd8'),
(0x220ffe0, '*.__libc_start_main_0220ffe0'),
(0x220ffe8, '*.__gmon_start___0220ffe8'),
(0x220fff0, '*._ITM_registerTMCloneTable_0220fff0'),
(0x220fff8, '*.__cxa_finalize_0220fff8'),
(0x2210010, 'chown.Version'),
(0x2210018, 'chown.exit_failure'),
(0x2210020, 'chown.quote_quoting_options'),
(0x2210080, 'chown.program_invocation_short_name'),
(0x2210088, 'chown.stdout'),
(0x2210090, 'chown.optind'),
(0x22100a0, 'chown.optarg'),
(0x22100a8, 'chown.__progname_full'),
(0x22100c0, 'chown.stderr'),
(0x22100f8, 'chown.program_name'),
],
'pltgot' : [
],
}
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPUClusterResolver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from six.moves.urllib.error import URLError
from tensorflow.python.client import session
from tensorflow.python.distribute import cluster_resolver
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
mock = test.mock
class MockRequestClass(object):
def __init__(self, name, tpu_map):
self._name = name
self._tpu_map = tpu_map
def execute(self):
if self._name in self._tpu_map:
return self._tpu_map[self._name]
else:
raise KeyError('Resource %s was not found' % self._name)
class MockNodeClass(object):
def __init__(self, tpu_map):
self._tpu_map = tpu_map
def get(self, name):
return MockRequestClass(name, self._tpu_map)
def mock_request_compute_metadata(cls, *args, **kwargs):
del cls, kwargs # Unused.
if args[0] == 'project/project-id':
return 'test-project'
elif args[0] == 'instance/zone':
return 'projects/test-project/locations/us-central1-c'
elif args[0] == 'instance/network-interfaces/0/ip':
return '10.128.1.2'
return ''
def mock_is_running_in_gce(cls, *args, **kwargs):
del cls, args, kwargs # Unused.
return True
def mock_is_not_running_in_gce(cls, *args, **kwargs):
del cls, args, kwargs # Unused.
return False
def mock_running_in_gce_urlopen(cls, *args, **kwargs):
del cls, args, kwargs # Unused.
mock_response = mock.MagicMock()
mock_response.info.return_value = {'Metadata-Flavor': 'Google'}
return mock_response
def mock_not_running_in_gce_urlopen(cls, *args, **kwargs):
del cls, args, kwargs # Unused.
raise URLError(reason='Host does not exist.')
class TPUClusterResolverTest(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
"""Verifies that the ClusterSpec generates the correct proto.
We are testing this four different ways to ensure that the ClusterSpec
returned by the TPUClusterResolver behaves identically to a normal
ClusterSpec when passed into the generic ClusterSpec libraries.
Args:
cluster_spec: ClusterSpec returned by the TPUClusterResolver
expected_proto: Expected protobuf
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(expected_proto,
server_lib.ClusterSpec(
cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(expected_proto,
server_lib.ClusterSpec(
cluster_spec.as_dict()).as_cluster_def())
def mock_service_client(self, tpu_map=None):
if tpu_map is None:
tpu_map = {}
mock_locations = mock.MagicMock()
mock_locations.nodes.return_value = MockNodeClass(tpu_map)
mock_project = mock.MagicMock()
mock_project.locations.return_value = mock_locations
mock_client = mock.MagicMock()
mock_client.projects.return_value = mock_project
return mock_client
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_isRunningInGCE',
mock_is_running_in_gce)
def testCheckRunningInGceWithNoTpuName(self):
with self.assertRaisesRegexp(RuntimeError, '.*Google Cloud.*'):
cluster_resolver.TPUClusterResolver(tpu='')
@mock.patch.object(six.moves.urllib.request,
'urlopen',
mock_running_in_gce_urlopen)
def testIsRunningInGce(self):
self.assertTrue(cluster_resolver.TPUClusterResolver._isRunningInGCE())
@mock.patch.object(six.moves.urllib.request,
'urlopen',
mock_not_running_in_gce_urlopen)
def testIsNotRunningInGce(self):
self.assertFalse(cluster_resolver.TPUClusterResolver._isRunningInGCE())
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_requestComputeMetadata',
mock_request_compute_metadata)
def testRetrieveProjectAndZoneFromMetadata(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'health': 'HEALTHY'
}
}
resolver = cluster_resolver.TPUClusterResolver(
project=None,
zone=None,
tpu=['test-tpu-1'],
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map),
coordinator_name='coordinator')
actual_cluster_spec = resolver.cluster_spec()
expected_proto = """
job {
name: 'coordinator'
tasks { key: 0 value: '10.128.1.2:%s' }
}
job {
name: 'worker'
tasks { key: 0 value: '10.1.2.3:8470' }
}
""" % resolver._coordinator_port
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
self.assertEqual(resolver.master(), 'grpc://10.1.2.3:8470')
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_requestComputeMetadata',
mock_request_compute_metadata)
def testRetrieveProjectAndZoneFromMetadataNoCoordinator(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'health': 'HEALTHY'
}
}
resolver = cluster_resolver.TPUClusterResolver(
project=None,
zone=None,
tpu=['test-tpu-1'],
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = resolver.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(resolver.master(), 'grpc://10.1.2.3:8470')
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_requestComputeMetadata',
mock_request_compute_metadata)
def testUnhealthyCloudTpu(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'health': 'UNHEALTHY'
}
}
resolver = cluster_resolver.TPUClusterResolver(
project=None,
zone=None,
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
with self.assertRaises(RuntimeError):
resolver.cluster_spec()
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_requestComputeMetadata',
mock_request_compute_metadata)
def testNotReadyCloudTpu(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'CREATING'
}
}
resolver = cluster_resolver.TPUClusterResolver(
project=None,
zone=None,
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
with self.assertRaises(RuntimeError):
resolver.cluster_spec()
def testSimpleSuccessfulRetrieval(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'ipAddress': '10.1.2.3',
'port': '8470',
'health': 'HEALTHY'
}
}
resolver = cluster_resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=['test-tpu-1'],
coordinator_name='coordinator',
coordinator_address='10.128.1.5:10203',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = resolver.cluster_spec()
expected_proto = """
job { name: 'coordinator' tasks { key: 0 value: '10.128.1.5:10203' } }
job { name: 'worker' tasks { key: 0 value: '10.1.2.3:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(resolver.master(), 'grpc://10.1.2.3:8470')
def testNewNetworkEndpointFormat(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'health': 'HEALTHY',
'networkEndpoints': [{
'ipAddress': '10.2.3.4',
'port': 8470,
}]
}
}
resolver = cluster_resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name='coordinator',
coordinator_address='10.128.1.5:10203',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = resolver.cluster_spec()
expected_proto = """
job { name: 'coordinator' tasks { key: 0 value: '10.128.1.5:10203' } }
job { name: 'worker' tasks { key: 0 value: '10.2.3.4:8470' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual('grpc://10.2.3.4:8470', resolver.master())
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_requestComputeMetadata',
mock_request_compute_metadata)
def testPodResolution(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
resolver = cluster_resolver.TPUClusterResolver(
tpu='test-tpu-1',
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map),
coordinator_name='coordinator')
actual_cluster_spec = resolver.cluster_spec()
expected_proto = """
job {
name: 'coordinator',
tasks { key: 0 value: '10.128.1.2:%s'}
}
job {
name: 'worker'
tasks { key: 0 value: '10.2.3.4:8470' }
tasks { key: 1 value: '10.2.3.5:8470' }
tasks { key: 2 value: '10.2.3.6:8470' }
tasks { key: 3 value: '10.2.3.7:8470' }
}
""" % resolver._coordinator_port
self._verifyClusterSpecEquality(actual_cluster_spec, str(expected_proto))
self.assertEqual(resolver.master(), 'grpc://10.2.3.4:8470')
def testPodResolutionNoCoordinator(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
resolver = cluster_resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
actual_cluster_spec = resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.2.3.4:8470' }
tasks { key: 1 value: '10.2.3.5:8470' }
tasks { key: 2 value: '10.2.3.6:8470' }
tasks { key: 3 value: '10.2.3.7:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(resolver.master(), 'grpc://10.2.3.4:8470')
def testGetMasterNoEntries(self):
tpu_map = {}
with self.assertRaises(ValueError):
cluster_resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=[],
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
# TODO(saeta): Convert to parameterized test when included in OSS TF.
def verifyShouldResolve(self, tpu, should_resolve):
resolver = cluster_resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu=tpu,
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map={}))
self.assertEqual(should_resolve, resolver._shouldResolve(),
"TPU: '%s'" % tpu)
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_isRunningInGCE',
mock_is_not_running_in_gce)
def testShouldResolveNoName(self):
self.verifyShouldResolve('', False)
def testShouldResolveLocal(self):
self.verifyShouldResolve('local', False)
def testShouldResolveGrpc(self):
self.verifyShouldResolve('grpc://10.1.2.3:8470', False)
def testShouldResolveBns(self):
self.verifyShouldResolve('/bns/foo/bar', False)
def testShouldResolveName(self):
self.verifyShouldResolve('mytpu', True)
def testShouldResolveList(self):
self.verifyShouldResolve(['myothertpu'], True)
def testShouldResolveGrpcPrefix(self):
self.verifyShouldResolve('grpctpu', True)
def testNoCallComputeMetadata(self):
resolver = cluster_resolver.TPUClusterResolver(
tpu='/bns/foo/bar')
self.assertEqual(
compat.as_bytes('/bns/foo/bar'), resolver.master())
self.assertEqual(None, resolver.cluster_spec())
def testGkeEnvironmentForDonut(self):
os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'] = 'grpc://10.120.27.5:8470'
self.assertIn('KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS', os.environ)
self.assertTrue(cluster_resolver.TPUClusterResolver._inGke())
self.assertEqual(
compat.as_bytes('grpc://10.120.27.5:8470'),
compat.as_bytes(cluster_resolver.TPUClusterResolver._gkeEndpoints()))
resolver = cluster_resolver.TPUClusterResolver()
self.assertEqual(
compat.as_bytes('grpc://10.120.27.5:8470'),
compat.as_bytes(resolver.master()))
actual_cluster_spec = resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.120.27.5:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
del os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS']
def testGkeEnvironmentForPod(self):
os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'] = ('grpc://10.120.27.5:8470,'
'grpc://10.120.27.6:8470,'
'grpc://10.120.27.7:8470,'
'grpc://10.120.27.8:8470')
self.assertIn('KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS', os.environ)
self.assertTrue(cluster_resolver.TPUClusterResolver._inGke())
self.assertEqual(
compat.as_bytes('grpc://10.120.27.5:8470,'
'grpc://10.120.27.6:8470,'
'grpc://10.120.27.7:8470,'
'grpc://10.120.27.8:8470'),
compat.as_bytes(cluster_resolver.TPUClusterResolver._gkeEndpoints()))
resolver = cluster_resolver.TPUClusterResolver()
self.assertEqual(
compat.as_bytes('grpc://10.120.27.5:8470'),
compat.as_bytes(resolver.master()))
actual_cluster_spec = resolver.cluster_spec()
expected_proto = """
job {
name: 'worker'
tasks { key: 0 value: '10.120.27.5:8470' }
tasks { key: 1 value: '10.120.27.6:8470' }
tasks { key: 2 value: '10.120.27.7:8470' }
tasks { key: 3 value: '10.120.27.8:8470' }
}
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
del os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS']
def testEnvironmentDiscoveryUrl(self):
os.environ['TPU_API_DISCOVERY_URL'] = 'https://{api}.internal/{apiVersion}'
self.assertEqual('https://{api}.internal/{apiVersion}',
(cluster_resolver.TPUClusterResolver.
_environmentDiscoveryUrl()))
def testEnvironmentAndRpcDetectionForGoogle(self):
resolver = cluster_resolver.TPUClusterResolver(
tpu='/bns/ab/cd/ef')
self.assertEqual(resolver.environment, 'google')
self.assertEqual(resolver.rpc_layer, None)
def testEnvironmentAndRpcDetectionForGrpcString(self):
resolver = cluster_resolver.TPUClusterResolver(
tpu='grpc://10.1.2.3:8470')
self.assertEqual(resolver.environment, '')
self.assertEqual(resolver.rpc_layer, 'grpc')
self.assertEqual(resolver.master(), 'grpc://10.1.2.3:8470')
def testOverrideTaskTypeAndIndexAndGetMaster(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': {
'health':
'HEALTHY',
'networkEndpoints': [
{
'ipAddress': '10.2.3.4',
'port': 8470,
},
{
'ipAddress': '10.2.3.5',
'port': 8470,
},
{
'ipAddress': '10.2.3.6',
'port': 8470,
},
{
'ipAddress': '10.2.3.7',
'port': 8470,
},
]
}
}
resolver = cluster_resolver.TPUClusterResolver(
project='test-project',
zone='us-central1-c',
tpu='test-tpu-1',
coordinator_name=None,
credentials=None,
service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual(resolver.master(), 'grpc://10.2.3.4:8470')
resolver.task_type = 'worker'
resolver.task_index = 3
self.assertEqual(resolver.master(), 'grpc://10.2.3.7:8470')
self.assertEqual(
resolver.master(
task_type='worker', task_index=2, rpc_layer='test'),
'test://10.2.3.6:8470')
def testGetDeviceDictAndCoresWithTPUs(self):
device_names = [
'/job:tpu_worker/task:0/device:TPU:0',
'/job:tpu_worker/task:1/device:TPU:1',
'/job:tpu_worker/task:2/device:TPU:0',
'/job:tpu_worker/task:3/device:TPU:1',
'/job:tpu_worker/task:0/device:TPU:4',
'/job:tpu_worker/task:1/device:TPU:5',
'/job:tpu_worker/task:2/device:TPU:4',
'/job:tpu_worker/task:3/device:TPU:5',
]
device_list = [
session._DeviceAttributes(
name, 'TPU', 1024, 0) for name in device_names
]
device_details = tpu_cluster_resolver._get_device_dict_and_cores(
device_list)
self.assertEqual(device_details.total_cores, 8)
self.assertEqual(device_details.device_map,
{'0': ['0', '4'],
'1': ['1', '5'],
'2': ['0', '4'],
'3': ['1', '5']})
def testGetDeviceDictAndCoresWithCPUsAndGPUs(self):
device_names = [
'/job:tpu_worker/task:0/device:CPU:0',
'/job:tpu_worker/task:1/device:CPU:0',
'/job:tpu_worker/task:2/device:CPU:0',
'/job:tpu_worker/task:3/device:CPU:0',
'/job:tpu_worker/task:0/device:GPU:1',
'/job:tpu_worker/task:1/device:GPU:1',
'/job:tpu_worker/task:2/device:GPU:1',
'/job:tpu_worker/task:3/device:GPU:1',
]
device_list = [
session._DeviceAttributes(
name, 'XLA', 1024, 0) for name in device_names
]
device_dict, num_cores = tpu_cluster_resolver._get_device_dict_and_cores(
device_list)
self.assertEqual(num_cores, 0)
self.assertEqual(device_dict, {})
def testVerifySameCoreCount(self):
self.assertEqual(
tpu_cluster_resolver._verify_and_return_same_core_count(
{0: [0, 1, 2, 3, 4, 5, 6, 7]}), 8)
self.assertEqual(
tpu_cluster_resolver._verify_and_return_same_core_count(
{0: [0, 1], 1: [2, 3]}), 2)
with self.assertRaises(RuntimeError):
tpu_cluster_resolver._verify_and_return_same_core_count(
{0: [0], 1: [1, 2]})
@mock.patch.object(session.BaseSession, 'list_devices')
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_isRunningInGCE',
mock_is_not_running_in_gce)
def testNumAcceleratorsSuccess(self, mock_list_devices):
device_names = [
'/job:tpu_worker/task:0/device:TPU:0',
'/job:tpu_worker/task:1/device:TPU:1',
'/job:tpu_worker/task:2/device:TPU:0',
'/job:tpu_worker/task:3/device:TPU:1',
'/job:tpu_worker/task:0/device:TPU:4',
'/job:tpu_worker/task:1/device:TPU:5',
'/job:tpu_worker/task:2/device:TPU:4',
'/job:tpu_worker/task:3/device:TPU:5',
]
device_list = [
session._DeviceAttributes(
name, 'TPU', 1024, 0) for name in device_names
]
mock_list_devices.return_value = device_list
resolver = cluster_resolver.TPUClusterResolver(tpu='')
self.assertEqual(resolver.num_accelerators(), 2)
@mock.patch.object(session.BaseSession, 'list_devices')
@mock.patch.object(cluster_resolver.TPUClusterResolver,
'_isRunningInGCE',
mock_is_not_running_in_gce)
def testNumAcceleratorsRetryFailure(self, mock_list_devices):
resolver = cluster_resolver.TPUClusterResolver(tpu='')
mock_list_devices.side_effect = errors.DeadlineExceededError(
None, None, 'timeout')
with self.assertRaises(RuntimeError):
resolver.num_accelerators()
if __name__ == '__main__':
test.main()
| |
"""Configuration management setup
Some terminology:
- name
As written in config files.
- value
Value associated with a name
- key
Name combined with it's section (section.name)
- variant
A single word describing where the configuration key-value pair came from
"""
import configparser
import locale
import logging
import os
import sys
from typing import Any, Dict, Iterable, List, NewType, Optional, Tuple
from pip._internal.exceptions import (
ConfigurationError,
ConfigurationFileCouldNotBeLoaded,
)
from pip._internal.utils import appdirs
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.misc import ensure_dir, enum
RawConfigParser = configparser.RawConfigParser # Shorthand
Kind = NewType("Kind", str)
CONFIG_BASENAME = 'pip.ini' if WINDOWS else 'pip.conf'
ENV_NAMES_IGNORED = "version", "help"
# The kinds of configurations there are.
kinds = enum(
USER="user", # User Specific
GLOBAL="global", # System Wide
SITE="site", # [Virtual] Environment Specific
ENV="env", # from PIP_CONFIG_FILE
ENV_VAR="env-var", # from Environment Variables
)
OVERRIDE_ORDER = kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR
VALID_LOAD_ONLY = kinds.USER, kinds.GLOBAL, kinds.SITE
logger = logging.getLogger(__name__)
# NOTE: Maybe use the optionx attribute to normalize keynames.
def _normalize_name(name):
# type: (str) -> str
"""Make a name consistent regardless of source (environment or file)
"""
name = name.lower().replace('_', '-')
if name.startswith('--'):
name = name[2:] # only prefer long opts
return name
def _disassemble_key(name):
# type: (str) -> List[str]
if "." not in name:
error_message = (
"Key does not contain dot separated section and key. "
"Perhaps you wanted to use 'global.{}' instead?"
).format(name)
raise ConfigurationError(error_message)
return name.split(".", 1)
def get_configuration_files():
# type: () -> Dict[Kind, List[str]]
global_config_files = [
os.path.join(path, CONFIG_BASENAME)
for path in appdirs.site_config_dirs('pip')
]
site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME)
legacy_config_file = os.path.join(
os.path.expanduser('~'),
'pip' if WINDOWS else '.pip',
CONFIG_BASENAME,
)
new_config_file = os.path.join(
appdirs.user_config_dir("pip"), CONFIG_BASENAME
)
return {
kinds.GLOBAL: global_config_files,
kinds.SITE: [site_config_file],
kinds.USER: [legacy_config_file, new_config_file],
}
class Configuration:
"""Handles management of configuration.
Provides an interface to accessing and managing configuration files.
This class converts provides an API that takes "section.key-name" style
keys and stores the value associated with it as "key-name" under the
section "section".
This allows for a clean interface wherein the both the section and the
key-name are preserved in an easy to manage form in the configuration files
and the data stored is also nice.
"""
def __init__(self, isolated, load_only=None):
# type: (bool, Optional[Kind]) -> None
super().__init__()
if load_only is not None and load_only not in VALID_LOAD_ONLY:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, VALID_LOAD_ONLY))
)
)
self.isolated = isolated
self.load_only = load_only
# Because we keep track of where we got the data from
self._parsers = {
variant: [] for variant in OVERRIDE_ORDER
} # type: Dict[Kind, List[Tuple[str, RawConfigParser]]]
self._config = {
variant: {} for variant in OVERRIDE_ORDER
} # type: Dict[Kind, Dict[str, Any]]
self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]]
def load(self):
# type: () -> None
"""Loads configuration from configuration files and environment
"""
self._load_config_files()
if not self.isolated:
self._load_environment_vars()
def get_file_to_edit(self):
# type: () -> Optional[str]
"""Returns the file with highest priority in configuration
"""
assert self.load_only is not None, \
"Need to be specified a file to be editing"
try:
return self._get_parser_to_modify()[0]
except IndexError:
return None
def items(self):
# type: () -> Iterable[Tuple[str, Any]]
"""Returns key-value pairs like dict.items() representing the loaded
configuration
"""
return self._dictionary.items()
def get_value(self, key):
# type: (str) -> Any
"""Get a value from the configuration.
"""
try:
return self._dictionary[key]
except KeyError:
raise ConfigurationError(f"No such key - {key}")
def set_value(self, key, value):
# type: (str, Any) -> None
"""Modify a value in the configuration.
"""
self._ensure_have_load_only()
assert self.load_only
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only][key] = value
self._mark_as_modified(fname, parser)
def unset_value(self, key):
# type: (str) -> None
"""Unset a value in the configuration."""
self._ensure_have_load_only()
assert self.load_only
if key not in self._config[self.load_only]:
raise ConfigurationError(f"No such key - {key}")
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
if not (parser.has_section(section)
and parser.remove_option(section, name)):
# The option was not removed.
raise ConfigurationError(
"Fatal Internal error [id=1]. Please report as a bug."
)
# The section may be empty after the option was removed.
if not parser.items(section):
parser.remove_section(section)
self._mark_as_modified(fname, parser)
del self._config[self.load_only][key]
def save(self):
# type: () -> None
"""Save the current in-memory state.
"""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
with open(fname, "w") as f:
parser.write(f)
#
# Private routines
#
def _ensure_have_load_only(self):
# type: () -> None
if self.load_only is None:
raise ConfigurationError("Needed a specific file to be modifying.")
logger.debug("Will be working with %s variant only", self.load_only)
@property
def _dictionary(self):
# type: () -> Dict[str, Any]
"""A dictionary representing the loaded configuration.
"""
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in OVERRIDE_ORDER:
retval.update(self._config[variant])
return retval
def _load_config_files(self):
# type: () -> None
"""Loads configuration from configuration files
"""
config_files = dict(self.iter_config_files())
if config_files[kinds.ENV][0:1] == [os.devnull]:
logger.debug(
"Skipping loading configuration files due to "
"environment's PIP_CONFIG_FILE being os.devnull"
)
return
for variant, files in config_files.items():
for fname in files:
# If there's specific variant set in `load_only`, load only
# that variant, not the others.
if self.load_only is not None and variant != self.load_only:
logger.debug(
"Skipping file '%s' (variant: %s)", fname, variant
)
continue
parser = self._load_file(variant, fname)
# Keeping track of the parsers used
self._parsers[variant].append((fname, parser))
def _load_file(self, variant, fname):
# type: (Kind, str) -> RawConfigParser
logger.debug("For variant '%s', will try loading '%s'", variant, fname)
parser = self._construct_parser(fname)
for section in parser.sections():
items = parser.items(section)
self._config[variant].update(self._normalized_keys(section, items))
return parser
def _construct_parser(self, fname):
# type: (str) -> RawConfigParser
parser = configparser.RawConfigParser()
# If there is no such file, don't bother reading it but create the
# parser anyway, to hold the data.
# Doing this is useful when modifying and saving files, where we don't
# need to construct a parser.
if os.path.exists(fname):
try:
parser.read(fname)
except UnicodeDecodeError:
# See https://github.com/pypa/pip/issues/4963
raise ConfigurationFileCouldNotBeLoaded(
reason="contains invalid {} characters".format(
locale.getpreferredencoding(False)
),
fname=fname,
)
except configparser.Error as error:
# See https://github.com/pypa/pip/issues/4893
raise ConfigurationFileCouldNotBeLoaded(error=error)
return parser
def _load_environment_vars(self):
# type: () -> None
"""Loads configuration from environment variables
"""
self._config[kinds.ENV_VAR].update(
self._normalized_keys(":env:", self.get_environ_vars())
)
def _normalized_keys(self, section, items):
# type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]
"""Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment.
"""
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized
def get_environ_vars(self):
# type: () -> Iterable[Tuple[str, str]]
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if key.startswith("PIP_"):
name = key[4:].lower()
if name not in ENV_NAMES_IGNORED:
yield name, val
# XXX: This is patched in the tests.
def iter_config_files(self):
# type: () -> Iterable[Tuple[Kind, List[str]]]
"""Yields variant and configuration files associated with it.
This should be treated like items of a dictionary.
"""
# SMELL: Move the conditions out of this function
# environment variables have the lowest priority
config_file = os.environ.get('PIP_CONFIG_FILE', None)
if config_file is not None:
yield kinds.ENV, [config_file]
else:
yield kinds.ENV, []
config_files = get_configuration_files()
# at the base we have any global configuration
yield kinds.GLOBAL, config_files[kinds.GLOBAL]
# per-user configuration next
should_load_user_config = not self.isolated and not (
config_file and os.path.exists(config_file)
)
if should_load_user_config:
# The legacy config file is overridden by the new config file
yield kinds.USER, config_files[kinds.USER]
# finally virtualenv configuration first trumping others
yield kinds.SITE, config_files[kinds.SITE]
def get_values_in_config(self, variant):
# type: (Kind) -> Dict[str, Any]
"""Get values present in a config file"""
return self._config[variant]
def _get_parser_to_modify(self):
# type: () -> Tuple[str, RawConfigParser]
# Determine which parser to modify
assert self.load_only
parsers = self._parsers[self.load_only]
if not parsers:
# This should not happen if everything works correctly.
raise ConfigurationError(
"Fatal Internal error [id=2]. Please report as a bug."
)
# Use the highest priority parser.
return parsers[-1]
# XXX: This is patched in the tests.
def _mark_as_modified(self, fname, parser):
# type: (str, RawConfigParser) -> None
file_parser_tuple = (fname, parser)
if file_parser_tuple not in self._modified_parsers:
self._modified_parsers.append(file_parser_tuple)
def __repr__(self):
# type: () -> str
return f"{self.__class__.__name__}({self._dictionary!r})"
| |
#!/usr/bin/env python3
from participantCollection import ParticipantCollection
import re
import datetime
import time
import pyperclip
# EDIT ME
currentMonthTotalDays = 31
year = 2020
currentMonthIndex = datetime.date.today().month
currentDayOfMonthIndex = datetime.date.today().day
currentDayOfYearIndex = time.localtime().tm_yday
# TODO: testing...
# currentMonthTotalDays = 31
# currentMonthIndex = 12
# currentDayOfMonthIndex = 31
# currentDayOfYearIndex = 366
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnLastDayOfMonth():
answer = ""
answer += "These participants have checked in at least once in CURRENT_MONTH_NAME:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list**:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateForJan1():
# first day of the challenge, and late signup grace period
print("using templateForJan1")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is the very first day of the year-long Stay Clean YEAR challenge. ~~We will no longer be accepting new signups.~~ Good news! We will be be accepting late signups for the next 14 days. If you forgot to sign up for the YEAR challenge, and you've been clean for all of January, just leave a \"sign me up\" comment below, and I'll add you. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateForJan2to13():
# late signup grace period
print("using templateForJan2to13")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. This is the CURRENT_DAY_OF_MONTH_NAME day of our 14 day late-signup grace period. If you forgot to sign up for the YEAR challenge, and you've been clean for all of January, just leave a \"sign me up\" comment below, and I'll add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateForJan14():
# last day of late signup grace period
print("using templateForJan14")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. This is the **last day** of our 14 day late-signup grace period. If you forgot to sign up for the YEAR challenge, and you've been clean for all of January, just leave a \"sign me up\" comment below, and I'll add you. After today, further signup requests will be silently ignored.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateForJan15():
# first day AFTER the late signup grace period
print("using templateForJan15")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Our 14 day late-signup grace period is now over. If you forgot to sign up, it's too late to sign up for Stay Clean YEAR, but feel free to leave comments here anyway, and join us over on the monthly challenge thread.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateForJan16to25():
# first day AFTER the late signup grace period
print("using templateForJan16to25")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateForJan26to30():
print("using templateForJan26to30")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(currentMonthTotalDays - currentDayOfMonthIndex) + " days to make a checkin comment (if you haven't already done so in CURRENT_MONTH_NAME) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateForJan31():
print("using templateForJan31")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already done so in CURRENT_MONTH_NAME) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnLastDayOfMonth()
print("=============================================================")
return answer
def templateForUltimateMinus5toPenultimateDayOfMonth():
print("using templateForUltimateMinus5toPenultimateDayOfMonth")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(currentMonthTotalDays - currentDayOfMonthIndex) + " days to make a checkin comment (if you haven't already done so in CURRENT_MONTH_NAME) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateForUltimateDayOfMonth():
print("using templateForUltimateDayOfMonth")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already done so in CURRENT_MONTH_NAME) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipantsOnLastDayOfMonth()
print("=============================================================")
return answer
def templateForUltimateMinus5toPenultimateDayOfYear():
print("using templateForUltimateMinus5toPenultimateDayOfYear")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(currentMonthTotalDays - currentDayOfMonthIndex) + " days to make a checkin comment (if you haven't already done so in CURRENT_MONTH_NAME) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateForUltimateDayOfYear():
print("using templateForUltimateDayOfYear")
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the very last day of the Stay Clean YEAR challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors.\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += templateForParticipantsOnLastDayOfMonth()
return answer
def templateForNormalDay():
print("using templateForNormalDay")
answer = ""
print("=============================================================")
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, and today is **day CURRENT_DAY_OF_YEAR_INDEX** of the year-long Stay Clean YEAR challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed for not checking in at least once per month. However, if you let me know you're still with it I'll re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- Participants are required to check in once per month. If you have a "~" after your name, you have yet to check in during CURRENT_MONTH_NAME. If it is still there at the end of CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! And be sure to join us for the Stay Clean monthly thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. These NUMBER_STILL_IN participants represent **CUMULATIVE_DAYS_BY_THOSE_STILL_IN pornfree days** in YEAR! That's more than **CUMULATIVE_YEARS_BY_THOSE_STILL_IN years**.\n"
answer += "\n"
answer += "Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
print("=============================================================")
return answer
def templateToUse():
# if currentDayOfMonthIndex == 1:
# return templateFor1()
# elif currentDayOfMonthIndex == 2:
# return templateFor2()
# elif currentDayOfMonthIndex == 3:
# return templateFor3()
# elif currentDayOfMonthIndex == 4:
# return templateFor4()
# elif 5 <= currentDayOfMonthIndex <= 9:
# return templateFor5to9()
# elif 10 <= currentDayOfMonthIndex <= 14:
# return templateFor10to14()
# if currentDayOfMonthIndex == 15:
# return templateFor15()
# elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
# return templateFor16toPenultimate()
# else:
# return templateForUltimate()
if currentDayOfYearIndex == 1:
return templateForJan1()
elif 2 <= currentDayOfYearIndex <= 13:
return templateForJan2to13()
elif currentDayOfYearIndex == 14:
return templateForJan14()
elif currentDayOfYearIndex == 15:
return templateForJan15()
elif 16 <= currentDayOfYearIndex <= 25:
return templateForJan16to25()
elif 26 <= currentDayOfYearIndex <= 30:
return templateForJan26to30()
elif currentDayOfYearIndex == 31:
return templateForJan31()
elif currentMonthName == "December" and (26 <= currentDayOfMonthIndex <= 30):
return templateForUltimateMinus5toPenultimateDayOfYear()
elif currentMonthName == "December" and currentDayOfMonthIndex == 31:
return templateForUltimateDayOfYear()
# elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
elif (currentMonthPenultimateDayIndex - 4) <= currentDayOfMonthIndex <= currentMonthPenultimateDayIndex:
return templateForUltimateMinus5toPenultimateDayOfMonth()
elif currentDayOfMonthIndex == currentMonthTotalDays:
return templateForUltimateDayOfMonth()
else:
return templateForNormalDay()
pass
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_YEAR_INDEX', str(currentDayOfYearIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
answer = re.sub('CUMULATIVE_DAYS_BY_THOSE_STILL_IN', str(currentDayOfYearIndex * numberStillIn), answer)
answer = re.sub('CUMULATIVE_YEARS_BY_THOSE_STILL_IN', str(int(currentDayOfYearIndex * numberStillIn / 365)), answer)
answer = re.sub('YEAR', str(year), answer)
return answer
outputString = stringToPrint()
print("=============================================================")
print(outputString)
print("=============================================================")
pyperclip.copy(outputString)
| |
from trading_ig.rest import IGService
import responses
import json
import pandas as pd
import datetime
import pytest
import re
"""
unit tests for historical prices methods
"""
class TestHistoricalPrices:
@responses.activate
def test_historical_prices_v3_defaults_happy(self):
# fetch_historical_prices v3 - default params
with open('tests/data/historic_prices.json', 'r') as file:
response_body = json.loads(file.read())
responses.add(responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.GC.Month2.IP',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json=response_body,
status=200)
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic(epic='MT.D.GC.Month2.IP')
prices = result['prices']
assert isinstance(result, dict)
assert isinstance(prices, pd.DataFrame)
# with no other params, default returns 10 rows at MINUTE resolution
assert prices.shape[0] == 10
assert prices.shape[1] == 13
# assert time series rows are 1 minute apart
prices['tvalue'] = prices.index
prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift())
assert any(prices["delta"].dropna() == datetime.timedelta(minutes=1))
@responses.activate
def test_historical_prices_v3_datetime_happy(self):
# fetch_historical_prices v3 - between two dates, daily resolution
with open('tests/data/historic_prices_dates.json', 'r') as file:
response_body = json.loads(file.read())
responses.add(responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.GC.Month2.IP',
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json=response_body,
status=200)
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic(
epic='MT.D.GC.Month2.IP',
resolution='D',
start_date='2020-09-01T00:00:00',
end_date='2020-09-04T23:59:59')
prices = result['prices']
assert isinstance(result, dict)
assert isinstance(prices, pd.DataFrame)
# assert DataFrame shape
assert prices.shape[0] == 4
assert prices.shape[1] == 13
# assert time series rows are 1 day apart
prices['tvalue'] = prices.index
prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift())
assert any(prices["delta"].dropna() == datetime.timedelta(days=1))
# assert default paging
assert result['metadata']['pageData']['pageSize'] == 20
assert result['metadata']['pageData']['pageNumber'] == 1
assert result['metadata']['pageData']['totalPages'] == 1
@responses.activate
def test_historical_prices_v3_num_points_happy(self):
# fetch_historical_prices v3 - number of data points, weekly resolution
with open('tests/data/historic_prices_num_points.json', 'r') as file:
response_body = json.loads(file.read())
responses.add(responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.GC.Month2.IP',
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json=response_body,
status=200)
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic(epic='MT.D.GC.Month2.IP',
resolution='W',
numpoints=10)
prices = result['prices']
assert isinstance(result, dict)
assert isinstance(prices, pd.DataFrame)
# assert DataFrame shape
assert prices.shape[0] == 10
assert prices.shape[1] == 13
# assert time series rows are 1 week apart
prices['tvalue'] = prices.index
prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift())
assert any(prices["delta"].dropna() == datetime.timedelta(weeks=1))
@responses.activate
def test_historical_prices_v3_num_points_bad_numpoints(self):
# fetch_historical_prices v3 - number of data points, invalid numpoints
responses.add(responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.GC.Month2.IP',
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={'errorCode': 'Unable to convert value=3.14159 to type= Integer int'}, # noqa
status=400)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic(
epic='MT.D.GC.Month2.IP',
resolution='X',
numpoints=3.14159)
assert result['errorCode'].startswith('Unable to convert value')
@responses.activate
def test_historical_prices_v3_num_points_bad_resolution(self):
# fetch_historical_prices v3 - number of data points, invalid resolution
with open('tests/data/historic_prices_num_points.json', 'r') as file:
response_body = json.loads(file.read())
responses.add(responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.GC.Month2.IP',
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json=response_body,
status=200)
with pytest.raises(ValueError) as excinfo:
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
ig_service.fetch_historical_prices_by_epic(
epic='MT.D.GC.Month2.IP',
resolution='X',
numpoints=10)
assert "Invalid frequency" in str(excinfo.value)
@responses.activate
def test_historical_prices_v3_bad_epic(self):
# fetch_historical_prices v3 - bad epic
responses.add(responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.X.Month1.IP',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={'errorCode': 'error.error.price-history.io-error'},
status=404)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic(epic='MT.D.X.Month1.IP')
assert result['errorCode'] == 'error.error.price-history.io-error'
@responses.activate
def test_historical_prices_v3_bad_date_format(self):
# fetch_historical_prices v3 - bad date format
responses.add(
responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.XX.Month1.IP',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={'errorCode': 'Unable to parse datetime=2020/09/01T00:00:00'},
status=400)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic(
epic='MT.D.GC.Month2.IP',
resolution='D',
start_date='2020/09/01T00:00:00',
end_date='2020-09-04T23:59:59')
assert result['errorCode'].startswith('Unable to parse datetime')
@responses.activate
def test_historical_prices_v3_bad_date_order(self):
# fetch_historical_prices v3 - bad date order
responses.add(responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.XX.Month1.IP',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={"errorCode": "error.invalid.daterange"},
status=400)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic(
epic='MT.D.GC.Month2.IP',
resolution='D',
start_date='2020-09-04T23:59:59',
end_date='2020/09/01T00:00:00')
assert result['errorCode'] == 'error.invalid.daterange'
@responses.activate
def test_historical_prices_by_epic_and_date_range_v1_happy(self):
# fetch_historical_prices_by_epic_and_date_range, v1 daily resolution
with open('tests/data/historic_prices_v1.json', 'r') as file:
response_body = json.loads(file.read())
responses.add(
responses.GET,
re.compile('https://demo-api.ig.com/gateway/deal/prices/.+'),
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json=response_body,
status=200)
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic_and_date_range(
epic='MT.D.GC.Month2.IP',
resolution='D',
start_date='2020:09:01-00:00:00',
end_date='2020:09:04-23:59:59',
version='1')
prices = result['prices']
assert isinstance(result, dict)
assert isinstance(prices, pd.DataFrame)
# assert DataFrame shape
assert prices.shape[0] == 4
assert prices.shape[1] == 13
# assert time series rows are 1 day apart
prices['tvalue'] = prices.index
prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift())
assert any(prices["delta"].dropna() == datetime.timedelta(days=1))
@responses.activate
def test_historical_prices_by_epic_and_date_range_happy(self):
# fetch_historical_prices_by_epic_and_date_range, v2 daily resolution
with open('tests/data/historic_prices_v2.json', 'r') as file:
response_body = json.loads(file.read())
responses.add(
responses.GET,
re.compile('https://demo-api.ig.com/gateway/deal/prices/.+'),
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json=response_body,
status=200)
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic_and_date_range(
epic='MT.D.GC.Month2.IP',
resolution='D',
start_date='2020-09-01 00:00:00',
end_date='2020-09-04 23:59:59')
prices = result['prices']
assert isinstance(result, dict)
assert isinstance(prices, pd.DataFrame)
# assert DataFrame shape
assert prices.shape[0] == 10
assert prices.shape[1] == 13
# assert time series rows are 1 day apart
prices['tvalue'] = prices.index
prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift())
assert any(prices["delta"].dropna() == datetime.timedelta(days=1))
@responses.activate
def test_historical_prices_by_epic_and_date_range_bad_epic(self):
# fetch_historical_prices_by_epic_and_date_range - bad epic
responses.add(
responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.X.Month1.IP/DAY',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={'errorCode': 'error.error.price-history.io-error'},
status=404)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic_and_date_range(
epic='MT.D.X.Month1.IP',
resolution='D',
start_date='2020-09-01T00:00:00',
end_date='2020-09-04T23:59:59')
assert result['errorCode'] == 'error.error.price-history.io-error'
@responses.activate
def test_historical_prices_by_epic_and_date_range_bad_date_format(self):
# fetch_historical_prices_by_epic_and_date_range - bad date format
responses.add(
responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.XX.Month1.IP/DAY',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={'errorCode': 'Unable to parse datetime=2020/09/01T00:00:00'},
status=400)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic_and_date_range(
epic='MT.D.GC.Month2.IP',
resolution='D',
start_date='2020/09/01T00:00:00',
end_date='2020-09-04T23:59:59')
assert result['errorCode'].startswith('Unable to parse datetime')
@responses.activate
def test_historical_prices_by_epic_and_date_range_bad_date_order(self):
# fetch_historical_prices_by_epic_and_date_range - bad date order
responses.add(
responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.XX.Month1.IP/DAY',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={"errorCode": "error.invalid.daterange"},
status=400)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic_and_date_range(
epic='MT.D.GC.Month2.IP',
resolution='D',
start_date='2020:09:04-23:59:59',
end_date='2020:09:01-00:00:00')
assert result['errorCode'] == 'error.invalid.daterange'
@responses.activate
def test_historical_prices_by_epic_and_num_points_happy(self):
# fetch_historical_prices_by_epic_and_num_points - daily resolution
with open('tests/data/historic_prices_v2.json', 'r') as file:
response_body = json.loads(file.read())
responses.add(
responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.GC.Month2.IP/DAY/10',
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json=response_body,
status=200)
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic_and_num_points(
epic='MT.D.GC.Month2.IP',
resolution='D',
numpoints=10)
prices = result['prices']
assert isinstance(result, dict)
assert isinstance(prices, pd.DataFrame)
# assert DataFrame shape
assert prices.shape[0] == 10
assert prices.shape[1] == 13
# assert time series rows are 1 day apart
prices['tvalue'] = prices.index
prices['delta'] = (prices['tvalue'] - prices['tvalue'].shift())
assert any(prices["delta"].dropna() == datetime.timedelta(days=1))
@responses.activate
def test_historical_prices_by_epic_and_num_points_bad_epic(self):
# fetch_historical_prices_by_epic_and_num_points - bad epic
responses.add(
responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.X.Month1.IP/DAY',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={'errorCode': 'error.error.price-history.io-error'},
status=404)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic(
epic='MT.D.X.Month2.IP',
resolution='W',
numpoints=5)
assert result['errorCode'] == 'error.error.price-history.io-error'
@responses.activate
def test_historical_prices_by_epic_and_num_points_bad_numpoints(self):
# fetch_historical_prices_by_epic_and_num_points, invalid numpoints
responses.add(
responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.GC.Month2.IP',
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={'errorCode': 'Unable to convert value=3.14159 to type= Integer int'}, # noqa
status=400)
with pytest.raises(Exception):
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
result = ig_service.fetch_historical_prices_by_epic_and_num_points(
epic='MT.D.GC.Month2.IP',
resolution='D',
numpoints=3.14159)
assert result['errorCode'].startswith('Unable to convert value')
@responses.activate
def test_historical_prices_by_epic_and_num_points_bad_resolution(self):
# fetch_historical_prices_by_epic_and_num_points, invalid resolution
responses.add(
responses.GET,
'https://demo-api.ig.com/gateway/deal/prices/MT.D.GC.Month2.IP',
match_querystring=False,
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json={},
status=200)
with pytest.raises(ValueError) as excinfo:
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
ig_service.fetch_historical_prices_by_epic_and_num_points(
epic='MT.D.GC.Month2.IP',
resolution='X',
numpoints=10)
assert "Invalid frequency" in str(excinfo.value)
| |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for managing projects via the Cloud Resource Manager API."""
from google.cloud.exceptions import NotFound
class Project(object):
"""Projects are containers for your work on Google Cloud Platform.
.. note::
A :class:`Project` can also be created via
:meth:`Client.new_project() \
<google.cloud.resource_manager.client.Client.new_project>`
To manage labels on a :class:`Project`::
>>> from google.cloud import resource_manager
>>> client = resource_manager.Client()
>>> project = client.new_project('purple-spaceship-123')
>>> project.labels = {'color': 'purple'}
>>> project.labels['environment'] = 'production'
>>> project.update()
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects
:type project_id: str
:param project_id: The globally unique ID of the project.
:type client: :class:`google.cloud.resource_manager.client.Client`
:param client: The Client used with this project.
:type name: str
:param name: The display name of the project.
:type labels: dict
:param labels: A list of labels associated with the project.
"""
def __init__(self, project_id, client, name=None, labels=None):
self._client = client
self.project_id = project_id
self.name = name
self.number = None
self.labels = labels or {}
self.status = None
self.parent = None
def __repr__(self):
return '<Project: %r (%r)>' % (self.name, self.project_id)
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a project given its API representation.
:type resource: dict
:param resource: project resource representation returned from the API
:type client: :class:`google.cloud.resource_manager.client.Client`
:param client: The Client used with this project.
:rtype: :class:`google.cloud.resource_manager.project.Project`
:returns: The project created.
"""
project = cls(project_id=resource['projectId'], client=client)
project.set_properties_from_api_repr(resource)
return project
def set_properties_from_api_repr(self, resource):
"""Update specific properties from its API representation."""
self.name = resource.get('name')
self.number = resource['projectNumber']
self.labels = resource.get('labels', {})
self.status = resource['lifecycleState']
if 'parent' in resource:
self.parent = resource['parent']
@property
def full_name(self):
"""Fully-qualified name (ie, ``'projects/purple-spaceship-123'``)."""
if not self.project_id:
raise ValueError('Missing project ID.')
return 'projects/%s' % (self.project_id)
@property
def path(self):
"""URL for the project (ie, ``'/projects/purple-spaceship-123'``)."""
return '/%s' % (self.full_name)
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`google.cloud.resource_manager.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current project.
:rtype: :class:`google.cloud.resource_manager.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
def create(self, client=None):
"""API call: create the project via a ``POST`` request.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/create
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
"""
client = self._require_client(client)
data = {
'projectId': self.project_id,
'name': self.name,
'labels': self.labels,
}
resp = client._connection.api_request(method='POST', path='/projects',
data=data)
self.set_properties_from_api_repr(resource=resp)
def reload(self, client=None):
"""API call: reload the project via a ``GET`` request.
This method will reload the newest metadata for the project. If you've
created a new :class:`Project` instance via
:meth:`Client.new_project() \
<google.cloud.resource_manager.client.Client.new_project>`,
this method will retrieve project metadata.
.. warning::
This will overwrite any local changes you've made and not saved
via :meth:`update`.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/get
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
"""
client = self._require_client(client)
# We assume the project exists. If it doesn't it will raise a NotFound
# exception.
resp = client._connection.api_request(method='GET', path=self.path)
self.set_properties_from_api_repr(resource=resp)
def exists(self, client=None):
"""API call: test the existence of a project via a ``GET`` request.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/get
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
:rtype: bool
:returns: Boolean indicating existence of the project.
"""
client = self._require_client(client)
try:
# Note that we have to request the entire resource as the API
# doesn't provide a way tocheck for existence only.
client._connection.api_request(method='GET', path=self.path)
except NotFound:
return False
else:
return True
def update(self, client=None):
"""API call: update the project via a ``PUT`` request.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/update
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
"""
client = self._require_client(client)
data = {
'name': self.name,
'labels': self.labels,
'parent': self.parent,
}
resp = client._connection.api_request(
method='PUT', path=self.path, data=data)
self.set_properties_from_api_repr(resp)
def delete(self, client=None, reload_data=False):
"""API call: delete the project via a ``DELETE`` request.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/delete
This actually changes the status (``lifecycleState``) from ``ACTIVE``
to ``DELETE_REQUESTED``.
Later (it's not specified when), the project will move into the
``DELETE_IN_PROGRESS`` state, which means the deleting has actually
begun.
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
:type reload_data: bool
:param reload_data: Whether to reload the project with the latest
state. If you want to get the updated status,
you'll want this set to :data:`True` as the DELETE
method doesn't send back the updated project.
Default: :data:`False`.
"""
client = self._require_client(client)
client._connection.api_request(method='DELETE', path=self.path)
# If the reload flag is set, reload the project.
if reload_data:
self.reload()
def undelete(self, client=None, reload_data=False):
"""API call: undelete the project via a ``POST`` request.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/undelete
This actually changes the project status (``lifecycleState``) from
``DELETE_REQUESTED`` to ``ACTIVE``.
If the project has already reached a status of ``DELETE_IN_PROGRESS``,
this request will fail and the project cannot be restored.
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
:type reload_data: bool
:param reload_data: Whether to reload the project with the latest
state. If you want to get the updated status,
you'll want this set to :data:`True` as the DELETE
method doesn't send back the updated project.
Default: :data:`False`.
"""
client = self._require_client(client)
client._connection.api_request(
method='POST', path=self.path + ':undelete')
# If the reload flag is set, reload the project.
if reload_data:
self.reload()
| |
from __future__ import unicode_literals
from textx import metamodel_from_str
from pytest import raises
import textx.exceptions
import attr
@attr.s(frozen=True)
class Instance(object):
parent = attr.ib()
name = attr.ib()
type = attr.ib()
@attr.s(frozen=True)
class Reference(object):
parent = attr.ib()
instance = attr.ib()
refs = attr.ib()
@attr.s(frozen=True)
class RefItem(object):
parent = attr.ib()
valref = attr.ib()
model_text = '''
struct A {
val x
}
struct B {
val a: A
}
struct C {
val b: B
val a: A
}
struct D {
val c: C
val b1: B
val a: A
}
instance d: D
instance a: A
reference d.c.b.a.x
reference d.b1.a.x
reference a.x
'''
def test_referencing_attributes():
"""
The key idea is that the list of references to "Val"s in the
"Reference"s can have any size>0 and contains not directly references
to objects, but helper objects ("RefItem"s) that contain the desired
references.
With this, the list "refs" to "RefItem"s in the "Reference" object is
build completely during initial parsing. The references inside the
"RefItem"s, can the be resolved on after the other...
We also show how to handle custom classes here.
"""
grammar = '''
Model:
structs+=Struct
instances+=Instance
references+=Reference;
Struct:
'struct' name=ID '{' vals+=Val '}';
Val:
'val' name=ID (':' type=[Struct])?;
Instance:
'instance' name=ID (':' type=[Struct])?;
Reference:
'reference' instance=[Instance] refs+=RefItem;
RefItem:
'.' valref=[Val];
'''
for classes in [[], [Instance, Reference, RefItem]]:
def ref_scope(refItem, myattr, attr_ref):
from textx.scoping.tools import get_named_obj_in_list
from textx.scoping import Postponed
from textx import textx_isinstance
reference = refItem.parent
if reference is None:
return Postponed()
index = list(map(
lambda x: id(x), reference.refs)).index(id(refItem))
assert index >= 0
base = reference.instance if index == 0 \
else reference.refs[index - 1].valref
if base is None or base.type is None:
return Postponed()
x = get_named_obj_in_list(base.type.vals, attr_ref.obj_name)
if index == len(reference.refs) - 1:
if not textx_isinstance(x, myattr.cls):
print(x)
return None
return x
mm = metamodel_from_str(grammar, classes=classes)
mm.register_scope_providers({
"RefItem.valref": ref_scope
})
m = mm.model_from_str(model_text)
assert m.references[0].refs[-1].valref.name == 'x'
assert m.references[0].refs[-1].valref == m.structs[0].vals[0]
assert m.references[0].refs[-2].valref.name == 'a'
assert m.references[0].refs[-2].valref == m.structs[1].vals[0]
assert m.references[0].refs[-3].valref.name == 'b'
assert m.references[0].refs[-3].valref == m.structs[2].vals[0]
assert m.references[1].refs[-1].valref == m.structs[0].vals[0]
assert m.references[2].refs[0].valref.name == 'x'
assert m.references[2].refs[0].valref == m.structs[0].vals[0]
# negative tests
# error: "not_there" not part of A
with raises(textx.exceptions.TextXSemanticError,
match=r'.*Unknown object.*not_there.*'):
mm.model_from_str('''
struct A { val x }
struct B { val a: A}
struct C {
val b: B
val a: A
}
instance c: C
reference c.b.a.not_there
''')
# error: B.a is not of type A
with raises(textx.exceptions.TextXSemanticError,
match=r'.*Unresolvable cross references.*x.*'):
mm.model_from_str('''
struct A { val x }
struct B { val a }
struct C {
val b: B
val a: A
}
instance c: C
reference c.b.a.x
''')
def test_referencing_attributes_with_rrel_all_in_one():
"""
RREL solution: all scope provider information encoded in the grammar.
"""
mm = metamodel_from_str('''
Model:
structs+=Struct
instances+=Instance
references+=Reference;
Struct:
'struct' name=ID '{' vals+=Val '}';
Val:
'val' name=ID (':' type=[Struct])?;
Instance:
'instance' name=ID (':' type=[Struct])?;
Reference:
'reference' ref=[Val|FQN|instances.~type.vals.(~type.vals)*];
FQN: ID ('.' ID)*;
''')
m = mm.model_from_str(model_text)
m.references[-1].ref == m.structs[0].vals[0] # a.x
assert m.references[0].ref.name == 'x'
assert m.references[0].ref == m.structs[0].vals[0]
assert m.references[1].ref == m.structs[0].vals[0]
assert m.references[2].ref.name == 'x'
assert m.references[2].ref == m.structs[0].vals[0]
# negative tests
# error: "not_there" not part of A
with raises(textx.exceptions.TextXSemanticError,
match=r'.*Unknown object "c.b.a.not_there".*'):
mm.model_from_str('''
struct A { val x }
struct B { val a: A}
struct C {
val b: B
val a: A
}
instance c: C
reference c.b.a.not_there
''')
# error: B.a is not of type A
with raises(textx.exceptions.TextXSemanticError,
match=r'.*Unknown object "c.b.a.x".*'):
mm.model_from_str('''
struct A { val x }
struct B { val a }
struct C {
val b: B
val a: A
}
instance c: C
reference c.b.a.x
''')
def test_referencing_attributes_with_rrel_all_in_one_splitstring():
"""
RREL solution: variation with diffferent split string specified in match rule.
"""
mm = metamodel_from_str('''
Model:
structs+=Struct
instances+=Instance
references+=Reference;
Struct:
'struct' name=ID '{' vals+=Val '}';
Val:
'val' name=ID (':' type=[Struct])?;
Instance:
'instance' name=ID (':' type=[Struct])?;
Reference:
'reference' instance=[Instance]
'.' ref=[Val|FQN|.~instance.~type.vals.(~type.vals)*];
FQN[split='->']: ID ('->' ID)*;
''')
m = mm.model_from_str('''
struct A {
val x
}
struct B {
val a: A
}
struct C {
val b: B
val a: A
}
struct D {
val c: C
val b1: B
val a: A
}
instance d: D
instance a: A
reference d.c->b->a->x
reference d.b1->a->x
reference a.x
''')
assert m.references[0].ref.name == 'x'
assert m.references[0].ref == m.structs[0].vals[0]
assert m.references[1].ref == m.structs[0].vals[0]
assert m.references[2].ref.name == 'x'
assert m.references[2].ref == m.structs[0].vals[0]
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Firebase credentials module."""
import collections
import json
import pathlib
import google.auth
from google.auth.transport import requests
from google.oauth2 import credentials
from google.oauth2 import service_account
_request = requests.Request()
_scopes = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/datastore',
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/firebase',
'https://www.googleapis.com/auth/identitytoolkit',
'https://www.googleapis.com/auth/userinfo.email'
]
AccessTokenInfo = collections.namedtuple('AccessTokenInfo', ['access_token', 'expiry'])
"""Data included in an OAuth2 access token.
Contains the access token string and the expiry time. The expirty time is exposed as a
``datetime`` value.
"""
class Base:
"""Provides OAuth2 access tokens for accessing Firebase services."""
def get_access_token(self):
"""Fetches a Google OAuth2 access token using this credential instance.
Returns:
AccessTokenInfo: An access token obtained using the credential.
"""
google_cred = self.get_credential()
google_cred.refresh(_request)
return AccessTokenInfo(google_cred.token, google_cred.expiry)
def get_credential(self):
"""Returns the Google credential instance used for authentication."""
raise NotImplementedError
class Certificate(Base):
"""A credential initialized from a JSON certificate keyfile."""
_CREDENTIAL_TYPE = 'service_account'
def __init__(self, cert):
"""Initializes a credential from a Google service account certificate.
Service account certificates can be downloaded as JSON files from the Firebase console.
To instantiate a credential from a certificate file, either specify the file path or a
dict representing the parsed contents of the file.
Args:
cert: Path to a certificate file or a dict representing the contents of a certificate.
Raises:
IOError: If the specified certificate file doesn't exist or cannot be read.
ValueError: If the specified certificate is invalid.
"""
super(Certificate, self).__init__()
if _is_file_path(cert):
with open(cert) as json_file:
json_data = json.load(json_file)
elif isinstance(cert, dict):
json_data = cert
else:
raise ValueError(
'Invalid certificate argument: "{0}". Certificate argument must be a file path, '
'or a dict containing the parsed file contents.'.format(cert))
if json_data.get('type') != self._CREDENTIAL_TYPE:
raise ValueError('Invalid service account certificate. Certificate must contain a '
'"type" field set to "{0}".'.format(self._CREDENTIAL_TYPE))
try:
self._g_credential = service_account.Credentials.from_service_account_info(
json_data, scopes=_scopes)
except ValueError as error:
raise ValueError('Failed to initialize a certificate credential. '
'Caused by: "{0}"'.format(error))
@property
def project_id(self):
return self._g_credential.project_id
@property
def signer(self):
return self._g_credential.signer
@property
def service_account_email(self):
return self._g_credential.service_account_email
def get_credential(self):
"""Returns the underlying Google credential.
Returns:
google.auth.credentials.Credentials: A Google Auth credential instance."""
return self._g_credential
class ApplicationDefault(Base):
"""A Google Application Default credential."""
def __init__(self):
"""Creates an instance that will use Application Default credentials.
The credentials will be lazily initialized when get_credential() or
project_id() is called. See those methods for possible errors raised.
"""
super(ApplicationDefault, self).__init__()
self._g_credential = None # Will be lazily-loaded via _load_credential().
def get_credential(self):
"""Returns the underlying Google credential.
Raises:
google.auth.exceptions.DefaultCredentialsError: If Application Default
credentials cannot be initialized in the current environment.
Returns:
google.auth.credentials.Credentials: A Google Auth credential instance."""
self._load_credential()
return self._g_credential
@property
def project_id(self):
"""Returns the project_id from the underlying Google credential.
Raises:
google.auth.exceptions.DefaultCredentialsError: If Application Default
credentials cannot be initialized in the current environment.
Returns:
str: The project id."""
self._load_credential()
return self._project_id
def _load_credential(self):
if not self._g_credential:
self._g_credential, self._project_id = google.auth.default(scopes=_scopes)
class RefreshToken(Base):
"""A credential initialized from an existing refresh token."""
_CREDENTIAL_TYPE = 'authorized_user'
def __init__(self, refresh_token):
"""Initializes a credential from a refresh token JSON file.
The JSON must consist of client_id, client_secret and refresh_token fields. Refresh
token files are typically created and managed by the gcloud SDK. To instantiate
a credential from a refresh token file, either specify the file path or a dict
representing the parsed contents of the file.
Args:
refresh_token: Path to a refresh token file or a dict representing the contents of a
refresh token file.
Raises:
IOError: If the specified file doesn't exist or cannot be read.
ValueError: If the refresh token configuration is invalid.
"""
super(RefreshToken, self).__init__()
if _is_file_path(refresh_token):
with open(refresh_token) as json_file:
json_data = json.load(json_file)
elif isinstance(refresh_token, dict):
json_data = refresh_token
else:
raise ValueError(
'Invalid refresh token argument: "{0}". Refresh token argument must be a file '
'path, or a dict containing the parsed file contents.'.format(refresh_token))
if json_data.get('type') != self._CREDENTIAL_TYPE:
raise ValueError('Invalid refresh token configuration. JSON must contain a '
'"type" field set to "{0}".'.format(self._CREDENTIAL_TYPE))
self._g_credential = credentials.Credentials.from_authorized_user_info(json_data, _scopes)
@property
def client_id(self):
return self._g_credential.client_id
@property
def client_secret(self):
return self._g_credential.client_secret
@property
def refresh_token(self):
return self._g_credential.refresh_token
def get_credential(self):
"""Returns the underlying Google credential.
Returns:
google.auth.credentials.Credentials: A Google Auth credential instance."""
return self._g_credential
def _is_file_path(path):
try:
pathlib.Path(path)
return True
except TypeError:
return False
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class SnapshotsOperations(object):
"""SnapshotsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-04-30-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-30-preview"
self.config = config
def _create_or_update_initial(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(snapshot, 'Snapshot')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk
operation.
:type snapshot:
~azure.mgmt.compute.v2016_04_30_preview.models.Snapshot
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Snapshot or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_04_30_preview.models.Snapshot]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _update_initial(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(snapshot, 'SnapshotUpdate')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config):
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch
snapshot operation.
:type snapshot:
~azure.mgmt.compute.v2016_04_30_preview.models.SnapshotUpdate
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Snapshot or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_04_30_preview.models.Snapshot]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Snapshot or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2016_04_30_preview.models.Snapshot or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _delete_initial(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_04_30_preview.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists snapshots under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Snapshot
:rtype:
~azure.mgmt.compute.v2016_04_30_preview.models.SnapshotPaged[~azure.mgmt.compute.v2016_04_30_preview.models.Snapshot]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SnapshotPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SnapshotPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists snapshots under a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Snapshot
:rtype:
~azure.mgmt.compute.v2016_04_30_preview.models.SnapshotPaged[~azure.mgmt.compute.v2016_04_30_preview.models.Snapshot]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SnapshotPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SnapshotPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def _grant_access_initial(
self, resource_group_name, snapshot_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config):
grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def grant_access(
self, resource_group_name, snapshot_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config):
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param access: Possible values include: 'None', 'Read'
:type access: str or
~azure.mgmt.compute.v2016_04_30_preview.models.AccessLevel
:param duration_in_seconds: Time duration in seconds until the SAS
access expires.
:type duration_in_seconds: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns AccessUri or
ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_04_30_preview.models.AccessUri]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._grant_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
access=access,
duration_in_seconds=duration_in_seconds,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _revoke_access_initial(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def revoke_access(
self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config):
"""Revokes access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot within the given
subscription and resource group.
:type snapshot_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_04_30_preview.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._revoke_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| |
# -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_deferred_polymorph.models import SubDeferredPolymorphBaseModel
from .mixins import Renderable
from .mixins import TemplateHintProvider
from .settings import MC_COMPONENT_BASE_MODEL
class RegionManager(models.Manager):
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def regions_by_slug(self):
try:
return self._regions_by_slug
except AttributeError:
self.fill_cache()
return self._regions_by_slug
def regions_by_pk(self):
try:
return self._regions_by_pk
except AttributeError:
self.fill_cache()
return self._regions_by_pk
def region_pk_to_slug(self):
try:
return self._region_pk_to_slug
except AttributeError:
self.fill_cache()
return self._region_pk_to_slug
def fill_cache(self):
regions = list(self.all())
self._regions_by_slug = dict((r.slug, r) for r in regions)
self._regions_by_pk = dict((r.pk, r) for r in regions)
self._region_pk_to_slug = dict((r.pk, r.slug) for r in regions)
def clear_cache(self):
try:
del self._regions_by_slug
del self._regions_by_pk
del self._region_pk_to_slug
except AttributeError:
pass
class Region(TemplateHintProvider, models.Model):
COMBINE = 'combine'
OVERWRITE = 'overwrite'
EXTEND_CHOICES = (
(COMBINE, _('Add to existing layout components')),
(OVERWRITE, _('Replace existing layout components')),
)
name = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
component_extend_rule = models.CharField(
max_length=16, choices=EXTEND_CHOICES,
help_text=_(
'Define how page components that is added to this region change '
'the layout components.'))
position = models.IntegerField(default=0)
available_components = models.ManyToManyField('contenttypes.ContentType')
objects = RegionManager()
class Meta:
verbose_name = _('Region')
verbose_name_plural = _('Regions')
ordering = ('position',)
def natural_key(self):
return [self.slug]
def __unicode__(self):
return self.name
def get_template_hints(self, name_provider, hint_providers):
return ['region-{0}'.format(self.slug)]
def extend_components(self, first, second):
'''
Apply the ``component_extend_rule`` to two given lists. The first
argument shall be the one that is extended, the second argument the one
that shall overwrite or be combined with the original first one.
'''
if self.component_extend_rule == self.OVERWRITE:
if second:
return list(second)
else:
return list(first)
elif self.component_extend_rule == self.COMBINE:
combined = []
if first:
combined.extend(first)
if second:
combined.extend(second)
return combined
def get_valid_component_models(self):
return [
content_type.model_class
for content_type in self.available_components.all()
]
class RegionComponentBaseManager(models.Manager):
def visible(self):
'''
This allows django_mc to provide a common place for injecting additional filters
to the component<->page/layout relation. Only "visible" components will then be used.
As django_mc does not need any visible-filtering by default we just return self.all(),
you still may overwrite this to fit your needs.
'''
return self.all()
class RegionComponentProvider(models.Model):
'''
Drop this mixin into a model that can link component objects to regions. The
mixin will take care of creating the intermediary model.
'''
class Meta:
abstract = True
def get_components_by_region(self):
'''
Return a dictionary in the form of::
{
region.pk: [region_component_obj, region_component_obj, ...]
}
'''
# see RegionComponentBaseManager for details on visible()
queryset = self.region_components.visible()
regions = {}
for region_component in queryset:
regions.setdefault(region_component.region_id, []).append(region_component)
return regions
class RegionComponentBase(models.Model):
region = models.ForeignKey('django_mc.Region', related_name='+')
component = models.ForeignKey(MC_COMPONENT_BASE_MODEL, related_name='+')
position = models.IntegerField(default=0)
# The field ``provider`` will be dynamically defined in the created
# intermediary table.
objects = RegionComponentBaseManager()
class Meta:
abstract = True
def __unicode__(self):
return unicode(
'%s @ %s in %s' % (
self.component.get_real_instance(),
self.provider,
self.region))
def resolve_component(self):
return self.component.resolve_component()
@classmethod
def _create_region_component_model(cls, sender, **kwargs):
'''
Generate intermediary model automatically that looks like this:
class <ModelName>RegionComponent(ComponentProvider.RegionComponentBase):
region = models.ForeignKey(Region, related_name='+')
provider = models.ForeignKey(<Model>,
related_name='region_components')
component = models.ForeignKey(ComponentBase, related_name='+')
position = models.IntegerField(default=0)
Note that the intermediary model inherits from the
``RegionComponentBase`` attribute on the ``ComponentProvider``. You can use
this behaviour to subclass ``ComponentProvider`` and override
``RegionComponentBase`` to alter the behaviour and fields of the
intermediary model.
'''
# Ignore calls from models that are not inherited from
# RegionComponentProvider.
if not issubclass(sender, cls):
return
# Ignore calls from models that are not concrete models
# RegionComponentProvider.
if sender._meta.abstract:
return
# Ignore calls from models that are swapped
if sender._meta.swapped:
return
# The sender of the signal is the model that we want to attach the
# branch model to.
if hasattr(sender, 'RegionComponent'):
return # seems like some base class already has a region component relation
# assert not hasattr(sender, 'RegionComponent'), \
# 'There is already a RegionComponent specified for %s' % sender
db_table = '%s_%s' % (
sender._meta.db_table,
'regioncomponent')
meta = type('Meta', (object,), {
'db_table': db_table,
'app_label': sender._meta.app_label,
'db_tablespace': sender._meta.db_tablespace,
# We sadly cannot use translations here, as using translations ("%" does resolve the
# translation) triggers an import off all installed apps
# (see django/utils/translation/trans_real.py:158). This may lead
# to circular imports and thus break your project badly! I'm sorry.
# 'verbose_name': _('%s region component') % sender._meta.verbose_name,
# 'verbose_name_plural': _('%s region component') % sender._meta.verbose_name_plural,
})
model_name = '%sRegionComponent' % sender._meta.object_name
bases = (sender.RegionComponentBase,)
# Construct and return the new class.
model = type(str(model_name), bases, {
'Meta': meta,
'__module__': sender.__module__,
'provider': models.ForeignKey(
sender,
related_name='region_components'),
})
sender.RegionComponent = model
models.signals.class_prepared.connect(
RegionComponentProvider._create_region_component_model)
class LayoutManager(models.Manager):
def get_by_natural_key(self, slug):
return self.get(slug=slug)
class LayoutMixin(TemplateHintProvider, RegionComponentProvider, models.Model):
DEFAULT_LAYOUT_SLUG = 'default'
name = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
parent = models.ForeignKey('self', null=True, blank=True,
help_text=_(
'Select a layout which shall be extended by this layout according to region '
'extend rules.'))
objects = LayoutManager()
class Meta:
abstract = True
verbose_name = _('Layout')
verbose_name_plural = _('Layouts')
def __unicode__(self):
return self.name
def natural_key(self):
return (self.slug,)
def get_template_hints(self, name_provider, hint_providers):
if self.parent:
parent_hints = self.parent.get_template_hints(name_provider, hint_providers)
else:
parent_hints = []
return ['layout-{0}'.format(self.slug)] + parent_hints
def get_component_providers(self):
if self.parent_id:
return self.parent.get_component_providers() + [self]
else:
return [self]
# @classmethod
# def _create_default_layout(cls, sender, **kwargs):
# # Only create default layout when the synced app is the django_mc.
# if sender.__name__ == __name__:
# default_layout = cls.objects.filter(slug=Layout.DEFAULT_LAYOUT_SLUG)
# if not default_layout.exists():
# cls.objects.create(
# slug=Layout.DEFAULT_LAYOUT_SLUG,
# name='Default')
#
#
# models.signals.post_syncdb.connect(Layout._create_default_layout)
class Layout(LayoutMixin):
class Meta(LayoutMixin.Meta):
swappable = 'MC_LAYOUT_MODEL'
class ComponentBaseMixin(Renderable, SubDeferredPolymorphBaseModel):
class Meta:
abstract = True
verbose_name = _('Component Base')
verbose_name_plural = _('Component Bases')
def resolve_component(self):
# Make sure we get the real component model instance.
#
# May be used to implement more complex component resolve strategies
# (e.g. versionable components, that need to switch to the current version)
return self.get_real_instance()
def get_template_basename(self):
return '%s.html' % self.get_real_instance()._meta.object_name.lower()
class ComponentBase(ComponentBaseMixin):
class Meta(ComponentBaseMixin.Meta):
swappable = 'MC_COMPONENT_BASE_MODEL'
| |
"""Helpers for config validation using voluptuous."""
from datetime import timedelta
import jinja2
import voluptuous as vol
from homeassistant.loader import get_platform
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE)
from homeassistant.helpers.entity import valid_entity_id
import homeassistant.util.dt as dt_util
from homeassistant.util import slugify
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys):
"""Validator that at least one key exists."""
def validate(obj):
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def boolean(value):
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isfile(value):
"""Validate that the value is an existing file."""
return vol.IsFile('not a file')(value)
def ensure_list(value):
"""Wrap value in list if it is not one."""
return value if isinstance(value, list) else [value]
def entity_id(value):
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
raise vol.Invalid('Entity ID {} is an invalid entity id'.format(value))
def entity_ids(value):
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
def icon(value):
"""Validate icon."""
value = str(value)
if value.startswith('mdi:'):
return value
raise vol.Invalid('Icons should start with prefix "mdi:"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'minutes': vol.Coerce(int),
'seconds': vol.Coerce(int),
'milliseconds': vol.Coerce(int),
}),
has_at_least_one_key('days', 'hours', 'minutes',
'seconds', 'milliseconds'),
lambda value: timedelta(**value))
def time_period_str(value):
"""Validate and transform time offset."""
if isinstance(value, int):
raise vol.Invalid('Make sure you wrap time values in quotes')
elif not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith('-'):
negative_offset = True
value = value[1:]
elif value.startswith('+'):
value = value[1:]
try:
parsed = [int(x) for x in value.split(':')]
except ValueError:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
if len(parsed) == 2:
hour, minute = parsed
second = 0
elif len(parsed) == 3:
hour, minute, second = parsed
else:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
time_period = vol.Any(time_period_str, timedelta, time_period_dict)
def log_exception(logger, ex, domain, config):
"""Generate log exception for config validation."""
message = 'Invalid config for [{}]: '.format(domain)
if 'extra keys not allowed' in ex.error_message:
message += '[{}] is an invalid option for [{}]. Check: {}->{}.'\
.format(ex.path[-1], domain, domain,
'->'.join('%s' % m for m in ex.path))
else:
message += str(ex)
if hasattr(config, '__line__'):
message += " (See {}:{})".format(config.__config_file__,
config.__line__ or '?')
logger.error(message)
def match_all(value):
"""Validator that matches all values."""
return value
def platform_validator(domain):
"""Validate if platform exists for given domain."""
def validator(value):
"""Test if platform exists."""
if value is None:
raise vol.Invalid('platform cannot be None')
if get_platform(domain, str(value)):
return value
raise vol.Invalid(
'platform {} does not exist for {}'.format(value, domain))
return validator
def positive_timedelta(value):
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid('Time period should be positive')
return value
def service(value):
"""Validate service."""
# Services use same format as entities so we can use same helper.
if valid_entity_id(value):
return value
raise vol.Invalid('Service {} does not match format <domain>.<name>'
.format(value))
def slug(value):
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
value = str(value)
slg = slugify(value)
if value == slg:
return value
raise vol.Invalid('invalid slug {} (try {})'.format(value, slg))
def string(value):
"""Coerce value to string, except for None."""
if value is not None:
return str(value)
raise vol.Invalid('string value is None')
def temperature_unit(value):
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == 'C':
return TEMP_CELSIUS
elif value == 'F':
return TEMP_FAHRENHEIT
raise vol.Invalid('invalid temperature unit (expected C or F)')
def template(value):
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid('template value is None')
value = str(value)
try:
jinja2.Environment().parse(value)
return value
except jinja2.exceptions.TemplateSyntaxError as ex:
raise vol.Invalid('invalid template ({})'.format(ex))
def time(value):
"""Validate time."""
time_val = dt_util.parse_time(value)
if time_val is None:
raise vol.Invalid('Invalid time specified: {}'.format(value))
return time_val
def time_zone(value):
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
'Invalid time zone passed in. Valid options can be found here: '
'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
# Validator helpers
def key_dependency(key, dependency):
"""Validate that all dependencies exist for key."""
def validator(value):
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid('key dependencies require a dict')
if key in value and dependency not in value:
raise vol.Invalid('dependency violation - key "{}" requires '
'key "{}" to exist'.format(key, dependency))
return value
return validator
# Schemas
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): string,
CONF_SCAN_INTERVAL: vol.All(vol.Coerce(int), vol.Range(min=1)),
}, extra=vol.ALLOW_EXTRA)
EVENT_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required('event'): string,
vol.Optional('event_data'): dict,
})
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Exclusive('service', 'service name'): service,
vol.Exclusive('service_template', 'service name'): template,
vol.Optional('data'): dict,
vol.Optional('data_template'): {match_all: template},
vol.Optional(CONF_ENTITY_ID): entity_ids,
}), has_at_least_one_key('service', 'service_template'))
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'numeric_state',
vol.Required(CONF_ENTITY_ID): entity_id,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}), has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'state',
vol.Required(CONF_ENTITY_ID): entity_id,
vol.Required('state'): str,
vol.Optional('for'): vol.All(time_period, positive_timedelta),
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('from'): str,
}), key_dependency('for', 'state'))
SUN_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'sun',
vol.Optional('before'): sun_event,
vol.Optional('before_offset'): time_period,
vol.Optional('after'): vol.All(vol.Lower, vol.Any('sunset', 'sunrise')),
vol.Optional('after_offset'): time_period,
}), has_at_least_one_key('before', 'after'))
TEMPLATE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'template',
vol.Required(CONF_VALUE_TEMPLATE): template,
})
TIME_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'time',
'before': time,
'after': time,
'weekday': weekdays,
}), has_at_least_one_key('before', 'after', 'weekday'))
ZONE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'zone',
vol.Required(CONF_ENTITY_ID): entity_id,
'zone': entity_id,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('event'): vol.Any('enter', 'leave'),
})
AND_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'and',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
OR_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'or',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
CONDITION_SCHEMA = vol.Any(
NUMERIC_STATE_CONDITION_SCHEMA,
STATE_CONDITION_SCHEMA,
SUN_CONDITION_SCHEMA,
TEMPLATE_CONDITION_SCHEMA,
TIME_CONDITION_SCHEMA,
ZONE_CONDITION_SCHEMA,
AND_CONDITION_SCHEMA,
OR_CONDITION_SCHEMA,
)
_SCRIPT_DELAY_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("delay"): vol.All(time_period, positive_timedelta)
})
SCRIPT_SCHEMA = vol.All(
ensure_list,
[vol.Any(SERVICE_SCHEMA, _SCRIPT_DELAY_SCHEMA, EVENT_SCHEMA,
CONDITION_SCHEMA)],
)
| |
import sys
import os
import time
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.phonon import Phonon
import mutagen
import mutagen.mp3
import mutagen.easyid3
import configobj
import validate
import configobj_gui
import configobj_qt
import mythread
import detect
CONFSPEC = """
silence_threshold = float(default=0.02, min=0, max=0.1) # Threshold volume for silence
min_silence_length = float(default=0.7, min=0) # Minimum length of silence between segments [s]
min_ad_length = float(default=20.0, min=0) # Minimum length of an ad [s]
max_ad_length = float(default=60.0, min=0) # Maximum length of an ad [s]
min_correlation = float(default=0.50, min=0.0, max=1.0) # Minimum correlation between segments to be considered the same
noise_volume = float(default=0.5, min=0.0, max=1.0) # Volume of generated noise
noise_length = float(default=1.0, min=0) # Length of generated noise [s]
#save_ads = boolean(default=False) # Save audio of segments considered ads. Useful for debugging.
"""
RATE = 44100
class FileChooser(QWidget):
def __init__(self,label='',initial_text='',exists=True,parent=None):
QWidget.__init__(self, parent)
layout = QHBoxLayout(self)
label = QLabel(label)
layout.addWidget(label)
edit = QLineEdit(initial_text)
layout.addWidget(edit)
browse = QPushButton('...')
browse.clicked.connect(self.browse)
layout.addWidget(browse)
ok = QPushButton(app.style().standardIcon(QStyle.SP_DialogOkButton),'OK')
ok.setEnabled(False)
layout.addWidget(ok)
edit.textChanged.connect(lambda x: ok.setEnabled(True))
self.ok = ok
self.edit = edit
self.file_should_exist = exists
def browse(self):
if self.file_should_exist:
path = QFileDialog.getOpenFileName(self)
else:
path = QFileDialog.getSaveFileName(self)
if path != '':
self.edit.setText(path)
class SegmentList(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
layout = QVBoxLayout(self)
listWidget = QListWidget(self)
layout.addWidget(listWidget)
self.listWidget = listWidget
class ItemWithPlayControl(QWidget):
def __init__(self, text, soundfile,parent = None):
QWidget.__init__(self, parent)
self.playIcon = QIcon.fromTheme('media-playback-start',QIcon(':/trolltech/styles/commonstyle/images/media-play-32.png'))
self.stopIcon = QIcon.fromTheme('media-playback-stop',QIcon(':/trolltech/styles/commonstyle/images/media-stop-32.png'))
layout = QHBoxLayout(self)
self.checkbox = QCheckBox(text)
layout.addWidget(self.checkbox)
playbtn = QPushButton(self.playIcon, 'Play')
playbtn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
playbtn.clicked.connect(self.play)
layout.addWidget(playbtn)
self.playbtn = playbtn
self.soundfile = soundfile
player.currentSourceChanged.connect(self.stop)
def sizeHint(self):
return self.layout().sizeHint()
def play(self):
# Already playing
if player.state() == Phonon.PlayingState and player.currentSource().fileName() == self.soundfile:
player.stop()
self.stop()
else:
player.setCurrentSource(Phonon.MediaSource(self.soundfile))
player.play()
self.playbtn.setText('Stop')
self.playbtn.setIcon(self.stopIcon)
self.soundfile = player.currentSource().fileName()
def stop(self):
self.playbtn.setText('Play')
self.playbtn.setIcon(self.playIcon)
class MainWindow(QMainWindow):
def __init__(self, initialPath=None, parent=None):
QMainWindow.__init__(self, parent)
mainWidget = QWidget(self)
self.setCentralWidget(mainWidget)
layout = QVBoxLayout(mainWidget)
fileChooser = FileChooser('Input','', True)
fileChooser.ok.clicked.connect(self.addSegments)
# Show chooser if no initial path was given, otherwise calculate segments right away
if initialPath == None:
layout.addWidget(fileChooser)
else:
fileChooser.edit.setText(initialPath)
segmentList = SegmentList()
layout.addWidget(segmentList)
buttons = QDialogButtonBox(QDialogButtonBox.Save | QDialogButtonBox.Close,Qt.Horizontal)
buttons.button(QDialogButtonBox.Save).clicked.connect(lambda: mythread.run_task('Saving. This can take a few minutes.',self.save, parent=self))
buttons.button(QDialogButtonBox.Save).setEnabled(False)
buttons.button(QDialogButtonBox.Close).clicked.connect(self.close)
layout.addWidget(buttons)
self.fileChooser = fileChooser
self.segmentList = segmentList
self.segmentData = []
self.buttons = buttons
self.buildMenu()
# Load config
self.spec = configobj.ConfigObj(CONFSPEC.split('\n'), list_values=False)
self.config = configobj_qt.from_QSettings(QSettings())
self.config.configspec = self.spec
self.config.validate(validate.Validator())
if initialPath != None:
self.addSegments()
def buildMenu(self):
menuBar = self.menuBar()
menu = menuBar.addMenu('&Application')
action = menu.addAction('&Quit')
action.triggered.connect(QApplication.instance().closeAllWindows)
action.setShortcut('Ctrl+Q')
menu = menuBar.addMenu('&Settings')
action = menu.addAction('&Configure...')
action.triggered.connect(self.showConfig)
menu = menuBar.addMenu('&Help')
action = menu.addAction('&About...')
action.triggered.connect(lambda: QMessageBox.about(self, 'About','GuiDetect 0.0'))
def showConfig(self):
wnd = configobj_gui.ConfigWindow(self.config, self.spec,parent=self)
wnd.optionChanged.connect(lambda : configobj_qt.to_QSettings(self.config))
wnd.show()
def save(self, thread):
thread.emit(SIGNAL('valueChanged(int)'),1)
thread.emit(SIGNAL('valueChanged(int)'),2)
for i in range(self.segmentList.listWidget.count()):
if thread.wantsToQuit():
return
(segment, is_ad, correlation) = self.segmentData[i]
if self.segmentList.listWidget.itemWidget(self.segmentList.listWidget.item(i)).checkbox.isChecked():
self.audio.replace_with_static(segment, self.config['noise_volume'],self.config['noise_length'])
if not is_ad: # Not a known ad but was identified by user
self.db.add_confirmed_signature(segment.signature)
self.db.save()
# Copy metadata
info = mutagen.File(self.original_path)
if type(info) == mutagen.mp3.MP3:
info = mutagen.easyid3.EasyID3(self.original_path)
self.audio.save() # BUG: Will not save in original sample rate, channel count etc.
info2 = mutagen.File(self.original_path)
if type(info2) == mutagen.mp3.MP3:
info2 = mutagen.easyid3.EasyID3(self.original_path)
for (key, value) in info.items():
info2[key] = value
info2.save()
def addSegments(self):
self.fileChooser.ok.setEnabled(False)
self.segmentList.listWidget.clear()
try:
self.segmentData = mythread.run_task('Analyzing. This can take a few minutes.', self.dostuff, parent=self)
except Exception, e:
QMessageBox.critical(self,'An error occured',str(e))
return
for (segment, is_ad, correlation) in self.segmentData:
start = segment.start
end = segment.end
soundfile = segment.soundfile
item = QListWidgetItem()
self.segmentList.listWidget.addItem(item)
text = '%.0fs..%.0fs'%(float(start)/float(self.audio.rate),float(end)/float(self.audio.rate))
if correlation > 0.0:
text += ' (%.2f)'%correlation
widget = ItemWithPlayControl(text, segment.soundfile)
widget.checkbox.setChecked(is_ad)
item.setSizeHint(widget.sizeHint())
self.segmentList.listWidget.setItemWidget(item,widget)
self.buttons.button(QDialogButtonBox.Save).setEnabled(True)
def dostuff(self, thread):
segments = []
thread.emit(SIGNAL('valueChanged(int)'),1)
thread.emit(SIGNAL('valueChanged(int)'),2)
filepath = str(self.fileChooser.edit.text())
self.original_path = filepath
info = mutagen.File(filepath)
if type(info) == mutagen.mp3.MP3:
info = mutagen.easyid3.EasyID3(filepath)
try:
album = info['album'][0]
except (KeyError, TypeError):
QMessageBox.warning(self, 'Warning!','Could not determine album name.')
album = 'uknnown'
print album
db = detect.SignatureDB(album+'.db')
self.db = db
audio = detect.AudioFile(filepath, RATE, 1)
self.audio = audio
try: # For debugging purposes it's sometimes sensible to not actually have a thread
if thread.wantsToQuit():
return []
except AttributeError:
pass
c = self.config
for segment in audio.find_segments(True, c['silence_threshold'],c['min_silence_length'],c['min_ad_length'],c['max_ad_length']):
try:
if thread.wantsToQuit():
return []
except AttributeError:
pass
(is_ad, correlation) = db.check_signature(segment.signature)
segments.append((segment, is_ad, correlation))
db.file_checked()
db.save()
return segments
def close(self):
# Remove temporary files
for (segment, _, _) in self.segmentData:
segment.remove_soundfile()
return QMainWindow.close(self)
app = QApplication(sys.argv)
app.setApplicationName('guidetect')
app.setOrganizationName('pafcu')
player = Phonon.createPlayer(Phonon.NoCategory)
try:
initial_path = sys.argv[1]
except IndexError:
initial_path = None
wnd = MainWindow(initial_path)
wnd.show()
app.exec_()
| |
from datetime import datetime
from decimal import Decimal
from gnucash_reports.configuration.current_date import get_today
from gnucash_reports.configuration.expense_categories import get_accounts_for_category
from gnucash_reports.utilities import clean_account_name
import enum
import piecash
import re
@enum.unique
class AccountTypes(enum.Enum):
"""
Enumeration that will contain the account types.
"""
none = 'NONE'
bank = 'BANK'
cash = 'CASH'
credit = 'CREDIT'
asset = 'ASSET'
liability = 'LIABILITY'
stock = 'STOCK'
mutual_fund = 'MUTUAL'
currency = 'CURRENCY'
income = 'INCOME'
expense = 'EXPENSE'
equity = 'EQUITY'
accounts_receivable = 'ACCOUNTS_RECEIVABLE'
accounts_payable = 'ACCOUNTS_PAYABLE'
root = 'ROOT'
trading = 'TRADING'
_book = None
_account_cache = dict()
def initialize(file_uri, read_only=True, do_backup=False):
"""
Connect to the uri provided.
:param file_uri: uri containing gnucash data
:param read_only: open as read_only
:param do_backup: do a backup first
:return: gnucash book object
"""
global _book
_book = piecash.open_book(uri_conn=file_uri, open_if_lock=True, readonly=read_only, do_backup=do_backup)
return _book
def get_account(account_name):
"""
Retrieve the account object based on the account name provided. Raises an exception if the account or any of it's
parents cannot be found.
:param account_name: name of the account to retrieve
:return: the account object.
"""
global _account_cache
current_account = _book.root_account
current_account_name = ''
for child_name in re.split('[:.]', account_name):
if current_account_name:
current_account_name = current_account_name + '.' + child_name
else:
current_account_name = child_name
account = _account_cache.get(current_account_name, None)
if account is None:
account = _book.session.query(piecash.Account).filter(piecash.Account.parent == current_account,
piecash.Account.name == child_name).one_or_none()
if account is None:
raise RuntimeError('Account %s is not found in %s' % (account_name, current_account))
_account_cache[current_account_name] = account
current_account = account
return current_account
def get_splits(account, start_date=None, end_date=None, credit=True, debit=True):
"""
Return all of the splits associated with the account.
:param account: account object
:param start_date: start date to fetch splits for
:param end_date: end date to fetch splits for
:param credit: should credit splits be returned
:param debit: should debit splits be returned
:return: list of the splits
"""
# TODO: Allow start_date to be none and will not filter based on start date values.
# Sanitize values
if start_date is not None:
if hasattr(start_date, 'date'):
start_date = start_date.date()
if not end_date:
end_date = get_today()
elif hasattr(end_date, 'date'):
end_date = end_date.date()
filters = [piecash.Split.account == account,
piecash.Transaction.post_date <= end_date]
# If start_date is not defined, then don't use that value to filter dates
if start_date:
filters.append(piecash.Transaction.post_date >= start_date)
# Filter out the credit and debits at the database level instead of in this script
if credit and not debit:
filters.append(
piecash.Split.value > Decimal('0.0')
)
elif debit and not credit:
filters.append(
piecash.Split.value < Decimal('0.0')
)
split_query = _book.session.query(piecash.Split).join(piecash.Transaction).filter(*filters)
split_query = split_query.order_by(piecash.Transaction._post_date)
return split_query.all()
def account_walker(accounts, ignores=None, place_holders=False, recursive=True, **kwargs):
"""
Generator method that will recursively walk the list of accounts provided, ignoring the accounts that are in the
ignore list.
:param accounts: list of accounts to start processing
:param ignores: any accounts that should be ignored
:param recursive: walk through the children of the accounts in the list
:param place_holders: return place holder accounts
"""
if not ignores:
ignores = []
# Allow for a none account list to be provided
if accounts is None:
accounts = []
_account_list = [a for a in accounts]
ignores = [clean_account_name(account_name) for account_name in ignores]
while _account_list:
account_name = _account_list.pop()
if account_name in ignores:
continue
account = get_account(account_name)
if not account.placeholder or place_holders:
yield account
if recursive:
_account_list += [clean_account_name(a.fullname) for a in account.children]
def parse_walker_parameters(definition):
"""
convert the incoming definition into a kwargs that can be used for the account walker.
:param definition: dictionary, list, or string containing account definitions.
:return: dictionary containing:
accounts - list of accounts to walk through
ignores - list of accounts to ignore while walking through the accounts
place_holders - should place holder accounts be processed
recursive - should the children accounts be processed
"""
return_value = {
'ignores': None,
'place_holders': False,
'recursive': True,
'name': None
}
# Allow for a none definition to be provided and overwrite to an empty list
if definition is None:
definition = []
if isinstance(definition, dict):
# If the definition has a defined category, then this will override all of the other parameters. Category
# definitions have already been processed account walkers, and therefore should not contain place_holders
# or recursive values.
if 'category' in definition:
definition.update({
'ignores': None,
'place_holders': False,
'recursive': False,
'accounts': get_accounts_for_category(definition['category']),
})
return_value.update(definition)
elif isinstance(definition, list) or isinstance(definition, set):
return_value.update(accounts=list(definition))
else:
return_value.update(accounts=[definition])
# Set a name value for the account walker parameters to a default which is the leaf name of the first account
if return_value['name'] is None:
if return_value['accounts']:
return_value['name'] = clean_account_name(return_value['accounts'][0]).split('.')[-1]
else:
return_value['name'] = 'None'
return return_value
def get_balance_on_date(account, date_value=get_today(), currency=None):
"""
Step through the splits in the account and return the value in the currency requested.
:param account: account to fetch the splits for
:param date_value: the date value to fetch the balance as of
:param currency: the currency to calculate the value in. If none, uses the accounts currency.
:return: amount that the account is worth as of date.
"""
# Sanitize values
if hasattr(date_value, 'date'):
date_value = date_value.date()
splits = get_splits(account, end_date=date_value)
if splits:
balance_decimal = sum([s.quantity for s in splits])
if currency:
# If the account_commodity and the currency are the same value, then just ignore fetching the value from the
# database.
if currency.mnemonic != account.commodity.mnemonic:
price_value = _book.session.query(piecash.Price).filter(
piecash.Price.commodity == account.commodity,
piecash.Price.currency == currency,
piecash.Price.date <= date_value,
).order_by(piecash.Price.date.desc()).limit(1).one_or_none()
if price_value:
# print date_value, account.fullname, balance_decimal, price_value.value
balance_decimal = balance_decimal * price_value.value
else:
print currency, account.commodity, date_value
raise NotImplementedError('Couldn\'t find a valid value')
else:
balance_decimal = Decimal(0.0)
return balance_decimal
def get_corr_account_full_name(split):
"""
Iterate through the parent splits and return all of the accounts that have a value in the opposite sign of the value
in split.
:param split:
:return:
"""
return_value = []
signed = split.value.is_signed()
for child_split in split.transaction.splits:
split_value = child_split.value
if signed != split_value.is_signed():
return_value.append(child_split.account)
if not return_value:
raise RuntimeError('Couldn\'t find opposite accounts.')
if len(return_value) > 1:
print 'return_value: ', return_value
raise RuntimeError('Split returned more than one correlating account')
return return_value[0].fullname
def get_prices(commodity, currency):
"""
Return all of the prices for a specific commodity in the currency provided.
:param commodity:
:param currency:
:return:
"""
price_list = _book.session.query(piecash.Price).filter(
piecash.Price.commodity == commodity,
piecash.Price.currency == currency
).order_by(piecash.Price.date.desc()).all()
return price_list
| |
import unittest
import numpy as np
import scipy.sparse
from sklearn.utils.testing import assert_array_almost_equal
from autosklearn.pipeline.implementations.OneHotEncoder import OneHotEncoder
dense1 = np.array([[0, 1, 0],
[0, 0, 0],
[1, 1, 0]])
dense1_1h = np.array([[1, 0, 0, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 1]])
dense1_1h_minimum_fraction = np.array([[0, 1, 0, 1, 1],
[0, 1, 1, 0, 1],
[1, 0, 0, 1, 1]])
# Including NaNs
dense2 = np.array([[0, np.NaN, 0],
[np.NaN, 0, 2],
[1, 1, 1],
[np.NaN, 0, 1]])
dense2_1h = np.array([[0, 1, 0, 1, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 1, 0]])
dense2_1h_minimum_fraction = np.array([[1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 0],
[1, 0, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 1]])
dense2_partial_1h = np.array([[0., 1., 0., 1., 0., 0., 0.],
[1., 0., 0., 0., 1., 0., 2.],
[0., 0., 1., 0., 0., 1., 1.],
[1., 0., 0., 0., 1., 0., 1.]])
dense2_1h_minimum_fraction_as_sparse = np.array([[0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 0, 1]])
# All NaN slice
dense3 = np.array([[0, 1, np.NaN],
[1, 0, np.NaN]])
dense3_1h = np.array([[1, 0, 0, 1, 1],
[0, 1, 1, 0, 1]])
sparse1 = scipy.sparse.csc_matrix(([3, 2, 1, 1, 2, 3],
((1, 4, 5, 2, 3, 5),
(0, 0, 0, 1, 1, 1))), shape=(6, 2))
sparse1_1h = scipy.sparse.csc_matrix(([1, 1, 1, 1, 1, 1],
((5, 4, 1, 2, 3, 5),
(0, 1, 2, 3, 4, 5))), shape=(6, 6))
sparse1_paratial_1h = scipy.sparse.csc_matrix(([1, 1, 1, 1, 2, 3],
((5, 4, 1, 2, 3, 5),
(0, 1, 2, 3, 3, 3))),
shape=(6, 4))
# All zeros slice
sparse2 = scipy.sparse.csc_matrix(([2, 1, 0, 0, 0, 0],
((1, 4, 5, 2, 3, 5),
(0, 0, 0, 1, 1, 1))), shape=(6, 2))
sparse2_1h = scipy.sparse.csc_matrix(([1, 1, 1, 1, 1, 1],
((5, 4, 1, 2, 3, 5),
(0, 1, 2, 3, 3, 3))), shape=(6, 4))
sparse2_csr = scipy.sparse.csr_matrix(([2, 1, 0, 0, 0, 0],
((1, 4, 5, 2, 3, 5),
(0, 0, 0, 1, 1, 1))), shape=(6, 2))
sparse2_csr_1h = scipy.sparse.csr_matrix(([1, 1, 1, 1, 1, 1],
((5, 4, 1, 2, 3, 5),
(0, 1, 2, 3, 3, 3))), shape=(6, 4))
class OneHotEncoderTest(unittest.TestCase):
def test_dense1(self):
self.fit_then_transform(dense1_1h, dense1)
self.fit_then_transform_dense(dense1_1h, dense1)
def test_dense1_minimum_fraction(self):
self.fit_then_transform(dense1_1h_minimum_fraction, dense1, minimum_fraction=0.5)
self.fit_then_transform_dense(dense1_1h_minimum_fraction, dense1, minimum_fraction=0.5)
def test_dense2(self):
self.fit_then_transform(dense2_1h, dense2)
self.fit_then_transform_dense(dense2_1h, dense2)
def test_dense2_minimum_fraction(self):
self.fit_then_transform(dense2_1h_minimum_fraction, dense2,
minimum_fraction=0.3)
self.fit_then_transform_dense(dense2_1h_minimum_fraction, dense2,
minimum_fraction=0.3)
def test_dense2_with_non_sparse_components(self):
self.fit_then_transform(dense2_partial_1h, dense2,
categorical_features=[True, True, False])
self.fit_then_transform_dense(dense2_partial_1h, dense2,
categorical_features=[True, True, False])
# Minimum fraction is not too interesting here...
def test_dense3(self):
self.fit_then_transform(dense3_1h, dense3)
self.fit_then_transform_dense(dense3_1h, dense3)
def test_sparse1(self):
self.fit_then_transform(sparse1_1h.todense(), sparse1)
self.fit_then_transform_dense(sparse1_1h.todense(), sparse1)
def test_sparse1_minimum_fraction(self):
expected = np.array([[0, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 1]], dtype=float).transpose()
self.fit_then_transform(expected, sparse1,
minimum_fraction=0.5)
self.fit_then_transform_dense(expected, sparse1,
minimum_fraction=0.5)
def test_sparse1_with_non_sparse_components(self):
self.fit_then_transform(sparse1_paratial_1h.todense(), sparse1,
categorical_features=[True, False])
# This test does not apply here. The sparse matrix will be cut into a
# continouos and a categorical part, after one hot encoding only the
# categorical part is an array, the continuous part will still be a
# sparse matrix. Therefore, the OHE will only return a sparse matrix
#self.fit_then_transform_dense(sparse1_paratial_1h.todense(), sparse1,
# categorical_features=[True, False])
def test_sparse2(self):
self.fit_then_transform(sparse2_1h.todense(), sparse2)
self.fit_then_transform_dense(sparse2_1h.todense(), sparse2)
def test_sparse2_minimum_fraction(self):
expected = np.array([[0, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 1]], dtype=float).transpose()
self.fit_then_transform(expected, sparse2,
minimum_fraction=0.5)
self.fit_then_transform_dense(expected, sparse2,
minimum_fraction=0.5)
def test_sparse2_csr(self):
self.fit_then_transform(sparse2_csr_1h.todense(), sparse2_csr)
self.fit_then_transform_dense(sparse2_csr_1h.todense(), sparse2_csr)
def test_sparse_on_dense2_minimum_fraction(self):
sparse = scipy.sparse.csr_matrix(dense2)
self.fit_then_transform(dense2_1h_minimum_fraction_as_sparse, sparse,
minimum_fraction=0.5)
self.fit_then_transform_dense(dense2_1h_minimum_fraction_as_sparse, sparse,
minimum_fraction=0.5)
def fit_then_transform(self, expected, input, categorical_features='all',
minimum_fraction=None):
# Test fit_transform
ohe = OneHotEncoder(categorical_features=categorical_features,
minimum_fraction=minimum_fraction)
transformation = ohe.fit_transform(input.copy())
self.assertIsInstance(transformation, scipy.sparse.csr_matrix)
assert_array_almost_equal(expected.astype(float),
transformation.todense())
# Test fit, and afterwards transform
ohe2 = OneHotEncoder(categorical_features=categorical_features,
minimum_fraction=minimum_fraction)
ohe2.fit(input.copy())
transformation = ohe2.transform(input.copy())
self.assertIsInstance(transformation, scipy.sparse.csr_matrix)
assert_array_almost_equal(expected, transformation.todense())
def fit_then_transform_dense(self, expected, input,
categorical_features='all',
minimum_fraction=None):
ohe = OneHotEncoder(categorical_features=categorical_features,
sparse=False, minimum_fraction=minimum_fraction)
transformation = ohe.fit_transform(input.copy())
self.assertIsInstance(transformation, np.ndarray)
assert_array_almost_equal(expected, transformation)
ohe2 = OneHotEncoder(categorical_features=categorical_features,
sparse=False, minimum_fraction=minimum_fraction)
ohe2.fit(input.copy())
transformation = ohe2.transform(input.copy())
self.assertIsInstance(transformation, np.ndarray)
assert_array_almost_equal(expected, transformation)
def test_transform_with_unknown_value(self):
input = np.array(((0, 1, 2, 3, 4, 5), (0, 1, 2, 3, 4, 5))).transpose()
ohe = OneHotEncoder()
ohe.fit(input)
test_data = np.array(((0, 1, 2, 6), (0, 1, 6, 7))).transpose()
output = ohe.transform(test_data).todense()
self.assertEqual(5, np.sum(output))
input = np.array(((0, 1, 2, 3, 4, 5), (0, 1, 2, 3, 4, 5))).transpose()
ips = scipy.sparse.csr_matrix(input)
ohe = OneHotEncoder()
ohe.fit(ips)
test_data = np.array(((0, 1, 2, 6), (0, 1, 6, 7))).transpose()
tds = scipy.sparse.csr_matrix(test_data)
output = ohe.transform(tds).todense()
self.assertEqual(3, np.sum(output))
| |
"""Storm topologies specifications logic.
Note: this module is coupled with the pyleus_topology.yaml file format
and with the Spec classes in the Java code. Any change to this class should
be propagated to this two resources, too.
"""
from __future__ import absolute_import
import copy
from pyleus.exception import InvalidTopologyError
from pyleus.storm import DEFAULT_STREAM
from pyleus.storm.component import SERIALIZERS
def _as_set(obj):
return set() if obj is None else set(obj)
def _as_list(obj):
return list() if obj is None else list(obj)
class TopologySpec(object):
"""Topology level specification class."""
def __init__(self, specs):
"""Convert the specs dictionary coming from the yaml file into
attributes and perform validation at topology level."""
if not _as_set(specs).issuperset(set(["name", "topology"])):
raise InvalidTopologyError(
"Each topology must specify tags 'name' and 'topology'"
" Found: {0}".format(_as_list(specs)))
self.name = specs["name"]
if "workers" in specs:
self.workers = specs["workers"]
if "ackers" in specs:
self.ackers = specs["ackers"]
if "max_spout_pending" in specs:
self.max_spout_pending = specs["max_spout_pending"]
if "max_shellbolt_pending" in specs:
self.max_shellbolt_pending = specs["max_shellbolt_pending"]
if "message_timeout_secs" in specs:
self.message_timeout_secs = specs["message_timeout_secs"]
if "logging_config" in specs:
self.logging_config = specs["logging_config"]
if "serializer" in specs:
if specs["serializer"] in SERIALIZERS:
self.serializer = specs["serializer"]
else:
raise InvalidTopologyError(
"Unknown serializer. Allowed: {0}. Found: {1}"
.format(SERIALIZERS, specs["serializer"]))
if "requirements_filename" in specs:
self.requirements_filename = specs["requirements_filename"]
else:
self.requirements_filename = None
self.topology = []
for component in specs["topology"]:
if "spout" in component:
self.topology.append(SpoutSpec(component["spout"]))
elif "bolt" in component:
self.topology.append(BoltSpec(component["bolt"]))
else:
raise InvalidTopologyError(
"Unknown tag. Allowed:'bolt' and 'spout'. Found: {0}"
.format(_as_list(specs["topology"])))
def verify_groupings(self):
"""Verify that the groupings specified in the yaml file match
with all the other specs.
"""
topology_out_fields = {}
for component in self.topology:
topology_out_fields[component.name] = component.output_fields
for component in self.topology:
if (isinstance(component, BoltSpec) and
component.groupings is not None):
component.verify_groupings(topology_out_fields)
def asdict(self):
"""Return a copy of the object as a dictionary."""
dict_object = copy.deepcopy(self.__dict__)
dict_object["topology"] = [
component.asdict() for component in self.topology]
return dict_object
class ComponentSpec(object):
"""Base class for Storm component specifications."""
COMPONENT = "component"
KEYS_LIST = [
"name", "type", "module", "tick_freq_secs", "parallelism_hint",
"options", "output_fields", "groupings", "tasks"]
def __init__(self, specs):
"""Convert a component specs dictionary coming from the yaml file into
attributes and perform validation at the component level."""
if specs is None:
raise InvalidTopologyError(
"[{0}] Empty components are not allowed. At least 'name'"
" and 'module' must be specified".format(self.COMPONENT))
if "name" not in specs:
raise InvalidTopologyError(
"[{0}] Tag not found in yaml file: {1}"
.format(self.COMPONENT, "name"))
self.name = specs["name"]
self.type = specs.get("type", "python")
if not set(self.KEYS_LIST).issuperset(_as_set(specs)):
raise InvalidTopologyError(
"[{0}] These tags are not allowed: {1}"
.format(self.name,
_as_list(_as_set(specs) - set(self.KEYS_LIST))))
# Optional parameters
if "tick_freq_secs" in specs:
self.tick_freq_secs = specs["tick_freq_secs"]
if "parallelism_hint" in specs:
self.parallelism_hint = specs["parallelism_hint"]
if "tasks" in specs:
self.tasks = specs["tasks"]
# These two are not currently specified in the yaml file
self.options = specs.get("options", None)
self.output_fields = specs.get("output_fields", None)
def update_from_module(self, specs):
"""Update the component specs with the ones coming from the python
module and perform some additional validation.
"""
required_attributes = ["component_type", "output_fields", "options"]
if _as_set(specs) != set(required_attributes):
raise InvalidTopologyError(
"[{0}] Python class should specify attributes 'output_fields'"
" and 'options'. Found: {1}. Are you inheriting from Bolt or"
" Spout?".format(self.name, specs))
if specs["component_type"] != self.COMPONENT:
raise InvalidTopologyError(
"[{0}] Component type mismatch. Python class: {1}. Yaml"
" file: {2}".format(self.name, specs["component_type"],
self.COMPONENT))
self.output_fields = specs["output_fields"]
module_opt = _as_set(specs["options"])
yaml_opt = _as_set(self.options)
if not module_opt.issuperset(yaml_opt):
raise InvalidTopologyError(
"[{0}] Options mismatch. Python class: {1}. Yaml file: {2}"
.format(self.name,
_as_list(specs["options"]),
_as_list(self.options)))
def asdict(self):
"""Return a copy of the object as a dictionary"""
return {self.COMPONENT: copy.deepcopy(self.__dict__)}
class BoltSpec(ComponentSpec):
"""Bolt specifications class."""
COMPONENT = "bolt"
GROUPINGS_LIST = [
"global_grouping", "shuffle_grouping", "fields_grouping",
"local_or_shuffle_grouping", "none_grouping", "all_grouping"]
def __init__(self, specs):
"""Bolt specific initialization. Bolts may have a grouping section."""
super(BoltSpec, self).__init__(specs)
if "module" not in specs:
raise InvalidTopologyError(
"[{0}] Tag not found in yaml file: {1}"
.format(self.name, "module"))
self.module = specs["module"]
if "groupings" in specs:
self.groupings = []
for grouping in specs["groupings"]:
self.groupings.append(self._expand_grouping(grouping))
def _expand_grouping(self, group):
"""Normalize the groupings specified in the yaml file for that
component.
"""
if len(group) != 1:
raise InvalidTopologyError(
"[{0}] Each grouping element must specify one and only"
" one type. Found: {1}"
.format(self.name, group.keys()))
group_type = group.keys()[0]
if (group_type not in self.GROUPINGS_LIST):
raise InvalidTopologyError(
"[{0}] Unknown grouping type. Allowed: {1}. Found: {2}"
.format(self.name, self.GROUPINGS_LIST, group_type))
group_spec = group[group_type]
# only the name of the component has been specified
if isinstance(group[group_type], str):
group_spec = {
"component": group_spec,
"stream": DEFAULT_STREAM,
}
# specified component tag, but not stream
elif "stream" not in group_spec:
group_spec["stream"] = DEFAULT_STREAM
return {group_type: group_spec}
def _verify_grouping_format(self, group_type, group_spec):
"""Verify grouping format based on the kind of grouping."""
if group_type in (
"global_grouping",
"shuffle_grouping",
"local_or_shuffle_grouping",
"none_grouping",
"all_grouping"):
if _as_set(group_spec) != set(["component", "stream"]):
raise InvalidTopologyError(
"[{0}] [{1}] Unrecognized format: {2}".format(
self.name, group_type,
_as_list(group_spec)))
elif group_type == "fields_grouping":
if (_as_set(group_spec) !=
set(["component", "stream", "fields"])):
raise InvalidTopologyError(
"[{0}] [{1}] Unrecognized format: {2}".format(
self.name, group_type,
_as_list(group_spec)))
fields = group_spec["fields"]
if fields is None:
raise InvalidTopologyError(
"[{0}] [{1}] Must specify at least one field."
.format(self.name, group_type))
def _stream_exists(self, component, stream, group_type, topo_out_fields):
"""If stream does not exist in the topology specs, raise an error."""
if (component not in topo_out_fields or
stream not in topo_out_fields[component]):
raise InvalidTopologyError(
"[{0}] [{1}] Unknown stream: [{2}] [{3}]"
.format(self.name, group_type, component, stream))
def _verify_grouping_input(self, group_type, group_spec, topo_out_fields):
"""Verify that grouping input streams and fields exist within the
topology.
"""
component = group_spec["component"]
stream = group_spec["stream"]
self._stream_exists(
component,
stream,
group_type,
topo_out_fields)
if "fields" in group_spec:
fields = group_spec["fields"]
for field in fields:
if field not in topo_out_fields[component][stream]:
raise InvalidTopologyError(
"[{0}] [{1}] Stream {2} does not have field:"
" {3}.".format(
self.name, group_type, stream, field))
def verify_groupings(self, topo_out_fields):
"""Verify that the groupings specified in the yaml file for that
component match with all the other specs.
"""
for group in self.groupings:
group_type = group.keys()[0]
group_spec = group[group_type]
self._verify_grouping_format(group_type, group_spec)
self._verify_grouping_input(group_type, group_spec,
topo_out_fields)
class SpoutSpec(ComponentSpec):
"""Spout specifications class."""
COMPONENT = "spout"
def __init__(self, specs):
"""Spout specific initialization."""
super(SpoutSpec, self).__init__(specs)
if self.type == "python":
if "module" not in specs:
raise InvalidTopologyError(
"[{0}] Tag not found in yaml file: {1}"
.format(self.name, "module"))
self.module = specs["module"]
if self.type == "kafka":
self.output_fields = {DEFAULT_STREAM: ["message"]}
def update_from_module(self, specs):
"""Specific spout validation. Spouts must have output fields."""
super(SpoutSpec, self).update_from_module(specs)
if self.output_fields is None:
raise InvalidTopologyError(
"[{0}] Spout must have 'output_fields' specified in its Python"
" module".format(self.name))
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Multipurpose TensorFlow Docker Helper.
- Assembles Dockerfiles
- Builds images (and optionally runs image tests)
- Pushes images to Docker Hub (provided with credentials)
Logs are written to stderr; the list of successfully built images is
written to stdout.
Read README.md (in this directory) for instructions!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import errno
import itertools
import json
import multiprocessing
import os
import platform
import re
import shutil
import sys
from absl import app
from absl import flags
import cerberus
import docker
import yaml
FLAGS = flags.FLAGS
flags.DEFINE_string('hub_username', None,
'Dockerhub username, only used with --upload_to_hub')
flags.DEFINE_string(
'hub_password', None,
('Dockerhub password, only used with --upload_to_hub. Use from an env param'
' so your password isn\'t in your history.'))
flags.DEFINE_integer('hub_timeout', 3600,
'Abort Hub upload if it takes longer than this.')
flags.DEFINE_string(
'repository', 'tensorflow',
'Tag local images as {repository}:tag (in addition to the '
'hub_repository, if uploading to hub)')
flags.DEFINE_string(
'hub_repository', None,
'Push tags to this Docker Hub repository, e.g. tensorflow/tensorflow')
flags.DEFINE_boolean(
'upload_to_hub',
False,
('Push built images to Docker Hub (you must also provide --hub_username, '
'--hub_password, and --hub_repository)'),
short_name='u',
)
flags.DEFINE_boolean(
'construct_dockerfiles', False, 'Do not build images', short_name='d')
flags.DEFINE_boolean(
'keep_temp_dockerfiles',
False,
'Retain .temp.Dockerfiles created while building images.',
short_name='k')
flags.DEFINE_boolean(
'build_images', False, 'Do not build images', short_name='b')
flags.DEFINE_string(
'run_tests_path', None,
('Execute test scripts on generated Dockerfiles before pushing them. '
'Flag value must be a full path to the "tests" directory, which is usually'
' $(realpath ./tests). A failed tests counts the same as a failed build.'))
flags.DEFINE_boolean(
'stop_on_failure', False,
('Stop processing tags if any one build fails. If False or not specified, '
'failures are reported but do not affect the other images.'))
flags.DEFINE_boolean(
'dry_run',
False,
'Do not build or deploy anything at all.',
short_name='n',
)
flags.DEFINE_string(
'exclude_tags_matching',
None,
('Regular expression that skips processing on any tag it matches. Must '
'match entire string, e.g. ".*gpu.*" ignores all GPU tags.'),
short_name='x')
flags.DEFINE_string(
'only_tags_matching',
None,
('Regular expression that skips processing on any tag it does not match. '
'Must match entire string, e.g. ".*gpu.*" includes only GPU tags.'),
short_name='i')
flags.DEFINE_string(
'dockerfile_dir',
'./dockerfiles', 'Path to an output directory for Dockerfiles.'
' Will be created if it doesn\'t exist.'
' Existing files in this directory will be deleted when new Dockerfiles'
' are made.',
short_name='o')
flags.DEFINE_string(
'partial_dir',
'./partials',
'Path to a directory containing foo.partial.Dockerfile partial files.'
' can have subdirectories, e.g. "bar/baz.partial.Dockerfile".',
short_name='p')
flags.DEFINE_multi_string(
'release', [],
'Set of releases to build and tag. Defaults to every release type.',
short_name='r')
flags.DEFINE_multi_string(
'arg', [],
('Extra build arguments. These are used for expanding tag names if needed '
'(e.g. --arg _TAG_PREFIX=foo) and for using as build arguments (unused '
'args will print a warning).'),
short_name='a')
flags.DEFINE_boolean(
'nocache', False,
'Disable the Docker build cache; identical to "docker build --no-cache"')
flags.DEFINE_string(
'spec_file',
'./spec.yml',
'Path to the YAML specification file',
short_name='s')
# Schema to verify the contents of tag-spec.yml with Cerberus.
# Must be converted to a dict from yaml to work.
# Note: can add python references with e.g.
# !!python/name:builtins.str
# !!python/name:__main__.funcname
# (but this may not be considered safe?)
SCHEMA_TEXT = """
header:
type: string
slice_sets:
type: dict
keyschema:
type: string
valueschema:
type: list
schema:
type: dict
schema:
add_to_name:
type: string
dockerfile_exclusive_name:
type: string
dockerfile_subdirectory:
type: string
partials:
type: list
schema:
type: string
ispartial: true
test_runtime:
type: string
required: false
tests:
type: list
default: []
schema:
type: string
args:
type: list
default: []
schema:
type: string
isfullarg: true
releases:
type: dict
keyschema:
type: string
valueschema:
type: dict
schema:
is_dockerfiles:
type: boolean
required: false
default: false
upload_images:
type: boolean
required: false
default: true
tag_specs:
type: list
required: true
schema:
type: string
"""
class TfDockerTagValidator(cerberus.Validator):
"""Custom Cerberus validator for TF tag spec.
Note: Each _validate_foo function's docstring must end with a segment
describing its own validation schema, e.g. "The rule's arguments are...". If
you add a new validator, you can copy/paste that section.
"""
def __init__(self, *args, **kwargs):
# See http://docs.python-cerberus.org/en/stable/customize.html
if 'partials' in kwargs:
self.partials = kwargs['partials']
super(cerberus.Validator, self).__init__(*args, **kwargs)
def _validate_ispartial(self, ispartial, field, value):
"""Validate that a partial references an existing partial spec.
Args:
ispartial: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if ispartial and value not in self.partials:
self._error(field,
'{} is not present in the partials directory.'.format(value))
def _validate_isfullarg(self, isfullarg, field, value):
"""Validate that a string is either a FULL=arg or NOT.
Args:
isfullarg: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if isfullarg and '=' not in value:
self._error(field, '{} should be of the form ARG=VALUE.'.format(value))
if not isfullarg and '=' in value:
self._error(field, '{} should be of the form ARG (no =).'.format(value))
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, flush=True, **kwargs)
def aggregate_all_slice_combinations(spec, slice_set_names):
"""Figure out all of the possible slice groupings for a tag spec."""
slice_sets = copy.deepcopy(spec['slice_sets'])
for name in slice_set_names:
for slice_set in slice_sets[name]:
slice_set['set_name'] = name
slices_grouped_but_not_keyed = [slice_sets[name] for name in slice_set_names]
all_slice_combos = list(itertools.product(*slices_grouped_but_not_keyed))
return all_slice_combos
def build_name_from_slices(format_string, slices, args, is_dockerfile=False):
"""Build the tag name (cpu-devel...) from a list of slices."""
name_formatter = copy.deepcopy(args)
name_formatter.update({s['set_name']: s['add_to_name'] for s in slices})
name_formatter.update({
s['set_name']: s['dockerfile_exclusive_name']
for s in slices
if is_dockerfile and 'dockerfile_exclusive_name' in s
})
name = format_string.format(**name_formatter)
return name
def update_args_dict(args_dict, updater):
"""Update a dict of arg values with more values from a list or dict."""
if isinstance(updater, list):
for arg in updater:
key, sep, value = arg.partition('=')
if sep == '=':
args_dict[key] = value
if isinstance(updater, dict):
for key, value in updater.items():
args_dict[key] = value
return args_dict
def get_slice_sets_and_required_args(slice_sets, tag_spec):
"""Extract used-slice-sets and required CLI arguments from a spec string.
For example, {FOO}{bar}{bat} finds FOO, bar, and bat. Assuming bar and bat
are both named slice sets, FOO must be specified on the command line.
Args:
slice_sets: Dict of named slice sets
tag_spec: The tag spec string, e.g. {_FOO}{blep}
Returns:
(used_slice_sets, required_args), a tuple of lists
"""
required_args = []
used_slice_sets = []
extract_bracketed_words = re.compile(r'\{([^}]+)\}')
possible_args_or_slice_set_names = extract_bracketed_words.findall(tag_spec)
for name in possible_args_or_slice_set_names:
if name in slice_sets:
used_slice_sets.append(name)
else:
required_args.append(name)
return (used_slice_sets, required_args)
def gather_tag_args(slices, cli_input_args, required_args):
"""Build a dictionary of all the CLI and slice-specified args for a tag."""
args = {}
for s in slices:
args = update_args_dict(args, s['args'])
args = update_args_dict(args, cli_input_args)
for arg in required_args:
if arg not in args:
eprint(('> Error: {} is not a valid slice_set, and also isn\'t an arg '
'provided on the command line. If it is an arg, please specify '
'it with --arg. If not, check the slice_sets list.'.format(arg)))
exit(1)
return args
def gather_slice_list_items(slices, key):
"""For a list of slices, get the flattened list of all of a certain key."""
return list(itertools.chain(*[s[key] for s in slices if key in s]))
def find_first_slice_value(slices, key):
"""For a list of slices, get the first value for a certain key."""
for s in slices:
if key in s and s[key] is not None:
return s[key]
return None
def assemble_tags(spec, cli_args, enabled_releases, all_partials):
"""Gather all the tags based on our spec.
Args:
spec: Nested dict containing full Tag spec
cli_args: List of ARG=foo arguments to pass along to Docker build
enabled_releases: List of releases to parse. Empty list = all
all_partials: Dict of every partial, for reference
Returns:
Dict of tags and how to build them
"""
tag_data = collections.defaultdict(list)
for name, release in spec['releases'].items():
for tag_spec in release['tag_specs']:
if enabled_releases and name not in enabled_releases:
eprint('> Skipping release {}'.format(name))
continue
used_slice_sets, required_cli_args = get_slice_sets_and_required_args(
spec['slice_sets'], tag_spec)
slice_combos = aggregate_all_slice_combinations(spec, used_slice_sets)
for slices in slice_combos:
tag_args = gather_tag_args(slices, cli_args, required_cli_args)
tag_name = build_name_from_slices(tag_spec, slices, tag_args,
release['is_dockerfiles'])
used_partials = gather_slice_list_items(slices, 'partials')
used_tests = gather_slice_list_items(slices, 'tests')
test_runtime = find_first_slice_value(slices, 'test_runtime')
dockerfile_subdirectory = find_first_slice_value(
slices, 'dockerfile_subdirectory')
dockerfile_contents = merge_partials(spec['header'], used_partials,
all_partials)
tag_data[tag_name].append({
'release': name,
'tag_spec': tag_spec,
'is_dockerfiles': release['is_dockerfiles'],
'upload_images': release['upload_images'],
'cli_args': tag_args,
'dockerfile_subdirectory': dockerfile_subdirectory or '',
'partials': used_partials,
'tests': used_tests,
'test_runtime': test_runtime,
'dockerfile_contents': dockerfile_contents,
})
return tag_data
def merge_partials(header, used_partials, all_partials):
"""Merge all partial contents with their header."""
used_partials = list(used_partials)
return '\n'.join([header] + [all_partials[u] for u in used_partials])
def upload_in_background(hub_repository, dock, image, tag):
"""Upload a docker image (to be used by multiprocessing)."""
image.tag(hub_repository, tag=tag)
print(dock.images.push(hub_repository, tag=tag))
def mkdir_p(path):
"""Create a directory and its parents, even if it already exists."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def gather_existing_partials(partial_path):
"""Find and read all available partials.
Args:
partial_path (string): read partials from this directory.
Returns:
Dict[string, string] of partial short names (like "ubuntu/python" or
"bazel") to the full contents of that partial.
"""
partials = {}
for path, _, files in os.walk(partial_path):
for name in files:
fullpath = os.path.join(path, name)
if '.partial.Dockerfile' not in fullpath:
eprint(('> Probably not a problem: skipping {}, which is not a '
'partial.').format(fullpath))
continue
# partial_dir/foo/bar.partial.Dockerfile -> foo/bar
simple_name = fullpath[len(partial_path) + 1:-len('.partial.dockerfile')]
with open(fullpath, 'r') as f:
partial_contents = f.read()
partials[simple_name] = partial_contents
return partials
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Read the full spec file, used for everything
with open(FLAGS.spec_file, 'r') as spec_file:
tag_spec = yaml.safe_load(spec_file)
# Get existing partial contents
partials = gather_existing_partials(FLAGS.partial_dir)
# Abort if spec.yaml is invalid
schema = yaml.safe_load(SCHEMA_TEXT)
v = TfDockerTagValidator(schema, partials=partials)
if not v.validate(tag_spec):
eprint('> Error: {} is an invalid spec! The errors are:'.format(
FLAGS.spec_file))
eprint(yaml.dump(v.errors, indent=2))
exit(1)
tag_spec = v.normalized(tag_spec)
# Assemble tags and images used to build them
all_tags = assemble_tags(tag_spec, FLAGS.arg, FLAGS.release, partials)
# Empty Dockerfile directory if building new Dockerfiles
if FLAGS.construct_dockerfiles:
eprint('> Emptying Dockerfile dir "{}"'.format(FLAGS.dockerfile_dir))
shutil.rmtree(FLAGS.dockerfile_dir, ignore_errors=True)
mkdir_p(FLAGS.dockerfile_dir)
# Set up Docker helper
dock = docker.from_env()
# Login to Docker if uploading images
if FLAGS.upload_to_hub:
if not FLAGS.hub_username:
eprint('> Error: please set --hub_username when uploading to Dockerhub.')
exit(1)
if not FLAGS.hub_repository:
eprint(
'> Error: please set --hub_repository when uploading to Dockerhub.')
exit(1)
if not FLAGS.hub_password:
eprint('> Error: please set --hub_password when uploading to Dockerhub.')
exit(1)
dock.login(
username=FLAGS.hub_username,
password=FLAGS.hub_password,
)
# Each tag has a name ('tag') and a definition consisting of the contents
# of its Dockerfile, its build arg list, etc.
failed_tags = []
succeeded_tags = []
for tag, tag_defs in all_tags.items():
for tag_def in tag_defs:
eprint('> Working on {}'.format(tag))
if FLAGS.exclude_tags_matching and re.match(FLAGS.exclude_tags_matching,
tag):
eprint('>> Excluded due to match against "{}".'.format(
FLAGS.exclude_tags_matching))
continue
if FLAGS.only_tags_matching and not re.match(FLAGS.only_tags_matching,
tag):
eprint('>> Excluded due to failure to match against "{}".'.format(
FLAGS.only_tags_matching))
continue
# Write releases marked "is_dockerfiles" into the Dockerfile directory
if FLAGS.construct_dockerfiles and tag_def['is_dockerfiles']:
path = os.path.join(FLAGS.dockerfile_dir,
tag_def['dockerfile_subdirectory'],
tag + '.Dockerfile')
eprint('>> Writing {}...'.format(path))
if not FLAGS.dry_run:
mkdir_p(os.path.dirname(path))
with open(path, 'w') as f:
f.write(tag_def['dockerfile_contents'])
# Don't build any images for dockerfile-only releases
if not FLAGS.build_images:
continue
# Only build images for host architecture
proc_arch = platform.processor()
is_x86 = proc_arch.startswith('x86')
if (is_x86 and any([arch in tag for arch in ['ppc64le']]) or
not is_x86 and proc_arch not in tag):
continue
# Generate a temporary Dockerfile to use to build, since docker-py
# needs a filepath relative to the build context (i.e. the current
# directory)
dockerfile = os.path.join(FLAGS.dockerfile_dir, tag + '.temp.Dockerfile')
if not FLAGS.dry_run:
with open(dockerfile, 'w') as f:
f.write(tag_def['dockerfile_contents'])
eprint('>> (Temporary) writing {}...'.format(dockerfile))
repo_tag = '{}:{}'.format(FLAGS.repository, tag)
eprint('>> Building {} using build args:'.format(repo_tag))
for arg, value in tag_def['cli_args'].items():
eprint('>>> {}={}'.format(arg, value))
# Note that we are NOT using cache_from, which appears to limit
# available cache layers to those from explicitly specified layers. Many
# of our layers are similar between local builds, so we want to use the
# implied local build cache.
tag_failed = False
image, logs = None, []
if not FLAGS.dry_run:
try:
# Use low level APIClient in order to stream log output
resp = dock.api.build(
timeout=FLAGS.hub_timeout,
path='.',
nocache=FLAGS.nocache,
dockerfile=dockerfile,
buildargs=tag_def['cli_args'],
tag=repo_tag)
last_event = None
image_id = None
# Manually process log output extracting build success and image id
# in order to get built image
while True:
try:
output = next(resp).decode('utf-8')
json_output = json.loads(output.strip('\r\n'))
if 'stream' in json_output:
eprint(json_output['stream'], end='')
match = re.search(r'(^Successfully built |sha256:)([0-9a-f]+)$',
json_output['stream'])
if match:
image_id = match.group(2)
last_event = json_output['stream']
# collect all log lines into the logs object
logs.append(json_output)
except StopIteration:
eprint('Docker image build complete.')
break
except ValueError:
eprint('Error parsing from docker image build: {}'.format(output))
# If Image ID is not set, the image failed to built properly. Raise
# an error in this case with the last log line and all logs
if image_id:
image = dock.images.get(image_id)
else:
raise docker.errors.BuildError(last_event or 'Unknown', logs)
# Run tests if requested, and dump output
# Could be improved by backgrounding, but would need better
# multiprocessing support to track failures properly.
if FLAGS.run_tests_path:
if not tag_def['tests']:
eprint('>>> No tests to run.')
for test in tag_def['tests']:
eprint('>> Testing {}...'.format(test))
container, = dock.containers.run(
image,
'/tests/' + test,
working_dir='/',
log_config={'type': 'journald'},
detach=True,
stderr=True,
stdout=True,
volumes={
FLAGS.run_tests_path: {
'bind': '/tests',
'mode': 'ro'
}
},
runtime=tag_def['test_runtime']),
ret = container.wait()
code = ret['StatusCode']
out = container.logs(stdout=True, stderr=False)
err = container.logs(stdout=False, stderr=True)
container.remove()
if out:
eprint('>>> Output stdout:')
eprint(out.decode('utf-8'))
else:
eprint('>>> No test standard out.')
if err:
eprint('>>> Output stderr:')
eprint(out.decode('utf-8'))
else:
eprint('>>> No test standard err.')
if code != 0:
eprint('>> {} failed tests with status: "{}"'.format(
repo_tag, code))
failed_tags.append(tag)
tag_failed = True
if FLAGS.stop_on_failure:
eprint('>> ABORTING due to --stop_on_failure!')
exit(1)
else:
eprint('>> Tests look good!')
except docker.errors.BuildError as e:
eprint('>> {} failed to build with message: "{}"'.format(
repo_tag, e.msg))
eprint('>> Build logs follow:')
log_lines = [l.get('stream', '') for l in e.build_log]
eprint(''.join(log_lines))
failed_tags.append(tag)
tag_failed = True
if FLAGS.stop_on_failure:
eprint('>> ABORTING due to --stop_on_failure!')
exit(1)
# Clean temporary dockerfiles if they were created earlier
if not FLAGS.keep_temp_dockerfiles:
os.remove(dockerfile)
# Upload new images to DockerHub as long as they built + passed tests
if FLAGS.upload_to_hub:
if not tag_def['upload_images']:
continue
if tag_failed:
continue
eprint('>> Uploading to {}:{}'.format(FLAGS.hub_repository, tag))
if not FLAGS.dry_run:
p = multiprocessing.Process(
target=upload_in_background,
args=(FLAGS.hub_repository, dock, image, tag))
p.start()
if not tag_failed:
succeeded_tags.append(tag)
if failed_tags:
eprint(
'> Some tags failed to build or failed testing, check scrollback for '
'errors: {}'.format(','.join(failed_tags)))
exit(1)
eprint('> Writing built{} tags to standard out.'.format(
' and tested' if FLAGS.run_tests_path else ''))
for tag in succeeded_tags:
print('{}:{}'.format(FLAGS.repository, tag))
if __name__ == '__main__':
app.run(main)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple HTTP Server
"""
import os
import sys
import re
import cgi
import argparse
import functools
import warnings
import urllib
import httplib
import socket
import SocketServer
import BaseHTTPServer
from urllib import urlencode
from SimpleHTTPServer import SimpleHTTPRequestHandler
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# *********************************************************************
# Common
# *********************************************************************
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[422] = "Unprocessable Entity" # RFC 4918
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = {k: '%d %s'%(k,v) for (k,v) in HTTP_CODES.items()}
# *********************************************************************
# utils
# *********************************************************************
class HTTPError(Exception):
default_status = 500
def __init__(self, status=None, body=None, exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# *********************************************************************
# router
# *********************************************************************
class RouteError(Exception):
""" This is a base class for all routing related exceptions """
class RouteReset(Exception):
"""
If raised by a plugin or request handler, the route is reset and all
plugins are re-applied.
"""
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
"""
Turn all capturing groups in a regular expression pattern into
non-capturing groups.
"""
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
rule_syntax = re.compile('(\\\\*)(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>)')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError as E:
raise RouteBuildError('Missing URL argument: %r' % E.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
# *********************************************************************
# http
# *********************************************************************
class HTTPServer(SocketServer.ThreadingTCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
request_queue_size = 4
def server_bind(self):
""" Override server_bind to store the server name. """
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class HTTPRequestHandler(SimpleHTTPRequestHandler):
server_version = "SimpleHTTPServerWithUpload"
default_request_version = "HTTP/0.9"
protocol_version = "HTTP/1.0"
def do_POST(self):
"""Serve a POST request."""
r, info = self.deal_post_data()
self.log_message('%s %s by: %s', r, info, self.client_address)
f = StringIO()
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Upload Result Page</title>\n")
f.write("<body>\n<h2>Upload Result Page</h2>\n")
f.write("<hr>\n")
if r:
f.write("<strong>Success:</strong>")
else:
f.write("<strong>Failed:</strong>")
f.write(info)
f.write("<br><a href=\"%s\">back</a>" % self.headers['referer'])
f.write("<hr><small>Powerd By: bones7456, check new version at ")
f.write("<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">")
f.write("here</a>.</small></body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
def deal_post_data(self):
boundary = self.headers.plisttext.split("=")[1]
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if boundary not in line:
return False, "Content NOT begin with boundary"
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line)
if not fn:
return False, "Can't find out file name..."
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return False, "Can't create file to write, do you have permission to write?"
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith('\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return True, "File '%s' upload success!" % fn
else:
out.write(preline)
preline = line
return False, "Unexpect Ends of data."
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
names = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write('<head><meta charset="utf-8"/></head>\n')
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n")
f.write("<form ENCTYPE=\"multipart/form-data\" method=\"post\">")
f.write("<input name=\"file\" type=\"file\"/>")
f.write("<input type=\"submit\" value=\"upload\"/></form>\n")
f.write("<hr>\n<ul>\n")
names.sort(key=lambda x: x.lower())
for name in names:
fullname = os.path.join(path, name)
text = href = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
text = name + "/"
href = name + "/"
if os.path.islink(fullname):
text = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n' % (urllib.quote(href), cgi.escape(text)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def _handle_commandline():
parser = argparse.ArgumentParser(description='Simple HttpServer')
parser.add_argument('-b', '--bind',
action='store', dest='host', default=None, metavar='host',
help='host to bind default to 0.0.0.0')
parser.add_argument('-p', '--port',
action='store', type=int, dest='port', default=None, metavar='port',
help='port to listen default to 8000')
parser.add_argument('-d', '--debug',
action='store_true', dest='debug', default=False,
help='start server debug mode')
return vars(parser.parse_args())
if __name__ == "__main__":
args = _handle_commandline()
server_address = (args['host'], args['port'])
debug = args['debug']
httpd = HTTPServer(server_address, HTTPRequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
try:
httpd.serve_forever()
except KeyboardInterrupt:
print "Bye Bye"
| |
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import struct
import re
import os
import os.path
import base64
import sys
import hashlib
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| |
"""This contains a set of tests for paratemp.sim_setup.para_temp_setup"""
########################################################################
# #
# This script was written by Thomas Heavey in 2018. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2018 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
import distutils.spawn
import errno
import os
import pathlib
import pytest
import shutil
from paratemp.tools import cd
n_gro, n_top, n_template, n_ndx = ('spc-and-methanol.gro',
'spc-and-methanol.top',
'templatemdp.txt',
'index.ndx')
n_gro_o1, n_gro_o2 = 'PT-out0.gro', 'PT-out1.gro'
@pytest.fixture
def grompp():
if distutils.spawn.find_executable('gmx'):
return 'gmx grompp'
if distutils.spawn.find_executable('gmx_mpi'):
return 'gmx_mpi grompp'
elif distutils.spawn.find_executable('grompp'):
return 'grompp'
else:
raise OSError(errno.ENOENT, 'No GROMACS executable found')
class TestCompileTPRs(object):
def test_pt_dir_blank(self, pt_blank_dir):
files_present = {f.name for f in pt_blank_dir.glob('*')}
must_contain = {n_top, n_gro, n_template, n_ndx, n_gro_o1, n_gro_o2}
assert must_contain - files_present == set()
def test_basic(self, pt_blank_dir, grompp):
"""
:param pathlib.PosixPath pt_blank_dir:
:return:
"""
from paratemp.sim_setup import compile_tprs
from paratemp.tools import get_temperatures
dir_topo = pt_blank_dir.joinpath('TOPO')
dir_topo.mkdir()
number = 2
with cd(dir_topo):
compile_tprs(start_temp=298, scaling_exponent=0.025,
number=number,
template='../'+n_template,
structure='../'+n_gro,
base_name='nvt',
grompp_exe=grompp)
assert dir_topo.exists()
for i in range(number):
assert dir_topo.joinpath('nvt{}.tpr'.format(i)).exists()
assert get_temperatures(
str(dir_topo.joinpath('temperatures.dat'))).shape == (2,)
def test_multi_structure(self, pt_blank_dir, grompp):
from paratemp.sim_setup import compile_tprs
from paratemp.tools import get_temperatures
dir_topo = pt_blank_dir.joinpath('TOPO')
dir_topo.mkdir()
number = 2
with cd(dir_topo):
compile_tprs(start_temp=298, scaling_exponent=0.025,
number=number,
template='../'+n_template,
multi_structure=True,
structure='../PT-out',
base_name='nvt',
grompp_exe=grompp)
assert dir_topo.exists()
for i in range(number):
assert dir_topo.joinpath('nvt{}.tpr'.format(i)).exists()
assert get_temperatures(
str(dir_topo.joinpath('temperatures.dat'))).shape == (2,)
def test_raises_os_error(self, pt_blank_dir, grompp):
from paratemp.sim_setup import compile_tprs
dir_topo = pt_blank_dir.joinpath('TOPO')
dir_topo.mkdir()
number = 2
with cd(dir_topo):
with pytest.raises(OSError, match='Incorrect number of '
'structure files found'):
compile_tprs(start_temp=298, scaling_exponent=0.025,
number=number,
template='../'+n_template,
multi_structure=True,
structure='../',
base_name='nvt',
grompp_exe=grompp)
with pytest.raises(
OSError, match='No structure file found'):
compile_tprs(start_temp=298, scaling_exponent=0.025,
number=number,
template='../'+n_template,
structure='../not-here.gro',
base_name='nvt',
grompp_exe=grompp)
with pytest.raises(
OSError, match='No topology file found'):
compile_tprs(start_temp=298, scaling_exponent=0.025,
number=number,
template='../'+n_template,
structure='../'+n_gro,
topology='../not-here.top',
base_name='nvt',
grompp_exe=grompp)
def test_raises_runtime_error(self, pt_blank_dir, grompp):
from paratemp.sim_setup import compile_tprs
dir_topo = pt_blank_dir.joinpath('TOPO')
dir_topo.mkdir()
number = 2
with cd(dir_topo):
with pytest.raises(RuntimeError):
compile_tprs(start_temp=298, scaling_exponent=0.025,
number=number,
template='../'+n_template,
structure='../*top',
base_name='nvt',
grompp_exe=grompp)
def test_warns(self, pt_blank_dir, grompp):
from paratemp.sim_setup import compile_tprs
from paratemp.tools import get_temperatures
dir_topo = pt_blank_dir.joinpath('TOPO')
dir_topo.mkdir()
number = 2
with cd(dir_topo):
with pytest.warns(
UserWarning, match=r'Found \d+ structure files'):
compile_tprs(start_temp=298, scaling_exponent=0.025,
number=number,
template='../'+n_template,
structure='../*.gro',
base_name='nvt',
grompp_exe=grompp)
assert dir_topo.exists()
for i in range(number):
assert dir_topo.joinpath('nvt{}.tpr'.format(i)).exists()
assert get_temperatures(
str(dir_topo.joinpath('temperatures.dat'))).shape == (2,)
class TestAddCptToSubScript(object):
@pytest.fixture
def sub_script_path(self, path_test_data):
path = str(path_test_data / 'gromacs-start-job.sub')
b_path = os.path.join(os.path.dirname(path),
'temp-submission-script.bak')
b2_path = os.path.join(os.path.dirname(path),
'backup.bak')
shutil.copy(path, b2_path)
yield os.path.abspath(path)
# If a backup of the original was made, copy the backup over the updated
# version:
if os.path.isfile(b_path):
os.rename(b_path, path)
else:
os.rename(b2_path, path)
@pytest.fixture
def sub_script_path_cpt(self, path_test_data):
path = str(path_test_data / 'gromacs-start-job-cpt.sub')
b_path = os.path.join(os.path.dirname(path),
'temp-submission-script.bak')
b2_path = os.path.join(os.path.dirname(path),
'backup.bak')
shutil.copy(path, b2_path)
yield os.path.abspath(path)
# If a backup of the original was made, copy the backup over the updated
# version:
if os.path.isfile(b_path):
os.rename(b_path, path)
else:
os.rename(b2_path, path)
def test_adding_to_script(self, sub_script_path):
orig_lines = open(sub_script_path, 'r').readlines()
from paratemp.sim_setup.para_temp_setup import \
_add_cpt_to_sub_script as acpt
acpt(sub_script_path, 'checkpoint_test')
new_lines = open(sub_script_path, 'r').readlines()
for line in new_lines:
if 'mpirun' in line:
md_line = line
break
else:
raise ValueError('Could not find "mpirun" line')
assert 'checkpoint_test' in md_line
assert len(set(new_lines) - set(orig_lines)) == 1
def test_no_change(self, sub_script_path_cpt):
orig_lines = open(sub_script_path_cpt, 'r').readlines()
from paratemp.sim_setup.para_temp_setup import \
_add_cpt_to_sub_script as acpt
acpt(sub_script_path_cpt, 'checkpoint_test')
new_lines = open(sub_script_path_cpt, 'r').readlines()
for line in new_lines:
if 'mpirun' in line:
md_line = line
break
else:
raise ValueError('Could not find "mpirun" line')
assert 'checkpoint_test' not in md_line
assert not len(set(new_lines) - set(orig_lines))
def test_comment_line(self, sub_script_path):
orig_lines = open(sub_script_path, 'r').readlines()
from paratemp.sim_setup.para_temp_setup import \
_add_cpt_to_sub_script as acpt
acpt(sub_script_path, 'checkpoint_test')
new_lines = open(sub_script_path, 'r').readlines()
for line in new_lines:
if 'comment' in line:
new_comm_line = line
break
else:
raise ValueError('Could not find "comment" line')
for line in orig_lines:
if 'comment' in line:
orig_comm_line = line
break
else:
raise ValueError('Could not find "comment" line')
assert orig_comm_line == new_comm_line
def test_raises_value_error(self, tmpdir):
from paratemp.sim_setup.para_temp_setup import \
_add_cpt_to_sub_script as acpt
test_sub = tmpdir.join('test.sub').ensure()
with pytest.raises(ValueError, match='Could not find GROMACS mdrun'):
acpt(str(test_sub), 'checkpoint_test')
class TestFindCPTBase(object):
def test_dir(self, path_test_data):
pp = path_test_data / 'spc-and-methanol-run'
if pp.exists():
return pp
else:
# This would be bad if this test data somehow went missing...
raise OSError(errno.ENOENT, 'run spc-and-methanol dir not found')
def test_works(self, pt_run_dir):
from paratemp.sim_setup.para_temp_setup import _find_cpt_base
cpt_base = _find_cpt_base(str(pt_run_dir)+'/')
assert cpt_base == str(pt_run_dir.joinpath('PT-out'))
def test_raises_value_error(self, tmpdir):
from paratemp.sim_setup.para_temp_setup import _find_cpt_base
with pytest.raises(ValueError):
_find_cpt_base(str(tmpdir))
| |
# Testing the line trace facility.
from test import support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception as exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError as exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
self.addCleanup(sys.settrace, sys.gettrace())
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.__code__.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.__code__.co_firstlineno,
tracer.events, func.events)
def test_set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def test_set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
namespace = {}
exec("def f():\n" + "\n" * 256 + " pass", namespace)
self.run_and_compare(
namespace["f"],
[(0, 'call'),
(257, 'line'),
(257, 'return')])
def test_17_none_f_trace(self):
# Issue 20041: fix TypeError when f_trace is set to None.
def func():
sys._getframe().f_trace = None
lineno = 2
self.run_and_compare(func,
[(0, 'call'),
(1, 'line')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in range(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not raised!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print(i) # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.__code__.co_firstlineno + 2):
raise RuntimeError("i am crashing")
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
def test_exception_arguments(self):
def f():
x = 0
# this should raise an error
x.no_such_attr
def g(frame, event, arg):
if (event == 'exception'):
type, exception, trace = arg
self.assertIsInstance(exception, Exception)
return g
existing = sys.gettrace()
try:
sys.settrace(g)
try:
f()
except AttributeError:
# this is expected
pass
finally:
sys.settrace(existing)
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.__code__:
firstLine = frame.f_code.co_firstlineno
if event == 'line' and frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
def jump_infinite_while_loop(output):
output.append(1)
while 1:
output.append(2)
output.append(3)
jump_infinite_while_loop.jump = (3, 4)
jump_infinite_while_loop.output = [1, 3]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError as e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError as e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError as e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError) as e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError as e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError as e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError as e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError as e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError as e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
def jump_across_with(output):
with open(support.TESTFN, "wb") as fp:
pass
with open(support.TESTFN, "wb") as fp:
pass
jump_across_with.jump = (1, 3)
jump_across_with.output = []
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError as e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError("Trace-function-less jump failed to fail")
class JumpTestCase(unittest.TestCase):
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(None)
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_jump_infinite_while_loop(self):
self.run_test(jump_infinite_while_loop)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
# Must set sys.settrace(None) in setUp(), else condition is not
# triggered.
no_jump_without_trace_function()
def test_jump_across_with(self):
self.addCleanup(support.unlink, support.TESTFN)
self.run_test(jump_across_with)
def test_20_large_function(self):
d = {}
exec("""def f(output): # line 0
x = 0 # line 1
y = 1 # line 2
''' # line 3
%s # lines 4-1004
''' # line 1005
x += 1 # line 1006
output.append(x) # line 1007
return""" % ('\n' * 1000,), d)
f = d['f']
f.jump = (2, 1007)
f.output = [0]
self.run_test(f)
def test_jump_to_firstlineno(self):
# This tests that PDB can jump back to the first line in a
# file. See issue #1689458. It can only be triggered in a
# function call if the function is defined on a single line.
code = compile("""
# Comments don't count.
output.append(2) # firstlineno is here.
output.append(3)
output.append(4)
""", "<fake module>", "exec")
class fake_function:
__code__ = code
jump = (2, 0)
tracer = JumpTracer(fake_function)
sys.settrace(tracer.trace)
namespace = {"output": []}
exec(code, namespace)
sys.settrace(None)
self.compare_jump_output([2, 3, 2, 3, 4], namespace["output"])
def test_main():
support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
| |
from functools import wraps
import inspect
from textwrap import dedent
from typing import Any, Callable, List, Mapping, Optional, Tuple, Type, Union, cast
import warnings
from pandas._libs.properties import cache_readonly # noqa
from pandas._typing import F
def deprecate(
name: str,
alternative: Callable[..., Any],
version: str,
alt_name: Optional[str] = None,
klass: Optional[Type[Warning]] = None,
stacklevel: int = 2,
msg: Optional[str] = None,
) -> Callable[[F], F]:
"""
Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.'
"""
alt_name = alt_name or alternative.__name__
klass = klass or FutureWarning
warning_msg = msg or f"{name} is deprecated, use {alt_name} instead"
@wraps(alternative)
def wrapper(*args, **kwargs) -> Callable[..., Any]:
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
# adding deprecated directive to the docstring
msg = msg or f"Use `{alt_name}` instead."
doc_error_msg = (
"deprecate needs a correctly formatted docstring in "
"the target function (should have a one liner short "
"summary, and opening quotes should be in their own "
f"line). Found:\n{alternative.__doc__}"
)
# when python is running in optimized mode (i.e. `-OO`), docstrings are
# removed, so we check that a docstring with correct formatting is used
# but we allow empty docstrings
if alternative.__doc__:
if alternative.__doc__.count("\n") < 3:
raise AssertionError(doc_error_msg)
empty1, summary, empty2, doc = alternative.__doc__.split("\n", 3)
if empty1 or empty2 and not summary:
raise AssertionError(doc_error_msg)
wrapper.__doc__ = dedent(
f"""
{summary.strip()}
.. deprecated:: {version}
{msg}
{dedent(doc)}"""
)
return wrapper
def deprecate_kwarg(
old_arg_name: str,
new_arg_name: Optional[str],
mapping: Optional[Union[Mapping[Any, Any], Callable[[Any], Any]]] = None,
stacklevel: int = 2,
) -> Callable[[F], F]:
"""
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
"""
if mapping is not None and not hasattr(mapping, "get") and not callable(mapping):
raise TypeError(
"mapping from old to new argument values must be dict or callable!"
)
def _deprecate_kwarg(func: F) -> F:
@wraps(func)
def wrapper(*args, **kwargs) -> Callable[..., Any]:
old_arg_value = kwargs.pop(old_arg_name, None)
if old_arg_value is not None:
if new_arg_name is None:
msg = (
f"the {repr(old_arg_name)} keyword is deprecated and "
"will be removed in a future version. Please take "
f"steps to stop the use of {repr(old_arg_name)}"
)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
kwargs[old_arg_name] = old_arg_value
return func(*args, **kwargs)
elif mapping is not None:
if callable(mapping):
new_arg_value = mapping(old_arg_value)
else:
new_arg_value = mapping.get(old_arg_value, old_arg_value)
msg = (
f"the {old_arg_name}={repr(old_arg_value)} keyword is "
"deprecated, use "
f"{new_arg_name}={repr(new_arg_value)} instead"
)
else:
new_arg_value = old_arg_value
msg = (
f"the {repr(old_arg_name)}' keyword is deprecated, "
f"use {repr(new_arg_name)} instead"
)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name) is not None:
msg = (
f"Can only specify {repr(old_arg_name)} "
f"or {repr(new_arg_name)}, not both"
)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return cast(F, wrapper)
return _deprecate_kwarg
def _format_argument_list(allow_args: Union[List[str], int]):
"""
Convert the allow_args argument (either string or integer) of
`deprecate_nonkeyword_arguments` function to a string describing
it to be inserted into warning message.
Parameters
----------
allowed_args : list, tuple or int
The `allowed_args` argument for `deprecate_nonkeyword_arguments`,
but None value is not allowed.
Returns
-------
s : str
The substring describing the argument list in best way to be
inserted to the warning message.
Examples
--------
`format_argument_list(0)` -> ''
`format_argument_list(1)` -> 'except for the first argument'
`format_argument_list(2)` -> 'except for the first 2 arguments'
`format_argument_list([])` -> ''
`format_argument_list(['a'])` -> "except for the arguments 'a'"
`format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'"
`format_argument_list(['a', 'b', 'c'])` ->
"except for the arguments 'a', 'b' and 'c'"
"""
if not allow_args:
return ""
elif allow_args == 1:
return " except for the first argument"
elif isinstance(allow_args, int):
return f" except for the first {allow_args} arguments"
elif len(allow_args) == 1:
return f" except for the argument '{allow_args[0]}'"
else:
last = allow_args[-1]
args = ", ".join(["'" + x + "'" for x in allow_args[:-1]])
return f" except for the arguments {args} and '{last}'"
def deprecate_nonkeyword_arguments(
version: str,
allowed_args: Optional[Union[List[str], int]] = None,
stacklevel: int = 2,
) -> Callable:
"""
Decorator to deprecate a use of non-keyword arguments of a function.
Parameters
----------
version : str
The version in which positional arguments will become
keyword-only.
allowed_args : list or int, optional
In case of list, it must be the list of names of some
first arguments of the decorated functions that are
OK to be given as positional arguments. In case of an
integer, this is the number of positional arguments
that will stay positional. In case of None value,
defaults to list of all arguments not having the
default value.
stacklevel : int, default=2
The stack level for warnings.warn
"""
def decorate(func):
if allowed_args is not None:
allow_args = allowed_args
else:
spec = inspect.getfullargspec(func)
allow_args = spec.args[: -len(spec.defaults)]
@wraps(func)
def wrapper(*args, **kwargs):
arguments = _format_argument_list(allow_args)
if isinstance(allow_args, (list, tuple)):
num_allow_args = len(allow_args)
else:
num_allow_args = allow_args
if len(args) > num_allow_args:
msg = (
f"Starting with Pandas version {version} all arguments of "
f"{func.__name__}{arguments} will be keyword-only"
)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
return func(*args, **kwargs)
return wrapper
return decorate
def rewrite_axis_style_signature(
name: str, extra_params: List[Tuple[str, Any]]
) -> Callable[..., Any]:
def decorate(func: F) -> F:
@wraps(func)
def wrapper(*args, **kwargs) -> Callable[..., Any]:
return func(*args, **kwargs)
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
params = [
inspect.Parameter("self", kind),
inspect.Parameter(name, kind, default=None),
inspect.Parameter("index", kind, default=None),
inspect.Parameter("columns", kind, default=None),
inspect.Parameter("axis", kind, default=None),
]
for pname, default in extra_params:
params.append(inspect.Parameter(pname, kind, default=default))
sig = inspect.Signature(params)
# https://github.com/python/typing/issues/598
func.__signature__ = sig # type: ignore
return cast(F, wrapper)
return decorate
def doc(*docstrings: Union[str, Callable], **params) -> Callable[[F], F]:
"""
A decorator take docstring templates, concatenate them and perform string
substitution on it.
This decorator will add a variable "_docstring_components" to the wrapped
callable to keep track the original docstring template for potential usage.
If it should be consider as a template, it will be saved as a string.
Otherwise, it will be saved as callable, and later user __doc__ and dedent
to get docstring.
Parameters
----------
*docstrings : str or callable
The string / docstring / docstring template to be appended in order
after default docstring under callable.
**params
The string which would be used to format docstring template.
"""
def decorator(decorated: F) -> F:
# collecting docstring and docstring templates
docstring_components: List[Union[str, Callable]] = []
if decorated.__doc__:
docstring_components.append(dedent(decorated.__doc__))
for docstring in docstrings:
if hasattr(docstring, "_docstring_components"):
docstring_components.extend(
docstring._docstring_components # type: ignore
)
elif isinstance(docstring, str) or docstring.__doc__:
docstring_components.append(docstring)
# formatting templates and concatenating docstring
decorated.__doc__ = "".join(
[
component.format(**params)
if isinstance(component, str)
else dedent(component.__doc__ or "")
for component in docstring_components
]
)
decorated._docstring_components = docstring_components # type: ignore
return decorated
return decorator
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module https://matplotlib.org/users/license.html
class Substitution:
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise AssertionError("Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, func: F) -> F:
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs) -> None:
"""
Update self.params with supplied args.
"""
if isinstance(self.params, dict):
self.params.update(*args, **kwargs)
class Appender:
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
addendum: Optional[str]
def __init__(self, addendum: Optional[str], join: str = "", indents: int = 0):
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
self.addendum = addendum
self.join = join
def __call__(self, func: F) -> F:
func.__doc__ = func.__doc__ if func.__doc__ else ""
self.addendum = self.addendum if self.addendum else ""
docitems = [func.__doc__, self.addendum]
func.__doc__ = dedent(self.join.join(docitems))
return func
def indent(text: Optional[str], indents: int = 1) -> str:
if not text or not isinstance(text, str):
return ""
jointext = "".join(["\n"] + [" "] * indents)
return jointext.join(text.split("\n"))
| |
"""
@file
@brief Some automation helpers about notebooks
"""
import os
import sys
import json
import warnings
from io import StringIO
from nbformat import versions
from nbformat.reader import reads, NotJSONError
from nbformat.v4 import upgrade
from ..filehelper import read_content_ufs
from ..loghelper import noLOG
from ..filehelper import explore_folder_iterfile, remove_folder
from .notebook_runner import NotebookRunner
from .notebook_exception import NotebookException
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ImportWarning)
try:
from ipykernel.kernelspec import install as install_k
raisewarn = False
except ImportError: # pragma: no cover
raisewarn = True
if raisewarn: # pragma: no cover
warnings.warn("ipykernel is not installed. pyquickhelper cannot execute a notebook.",
category=ImportWarning)
def writes(nb, **kwargs):
"""
Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
++++++++++
nb : NotebookNode
The notebook to write.
kwargs :
Among these parameters, *version* (int) which is
The nbformat version to write.
Used for downgrading notebooks.
Returns
+++++++
s : unicode
The notebook string.
"""
try:
return versions[nb.nbformat].writes_json(nb, **kwargs)
except AttributeError as e: # pragma: no cover
raise NotebookException(
"probably wrong error: {0}".format(nb.nbformat)) from e
def upgrade_notebook(filename, encoding="utf-8"):
"""
Converts a notebook from version 2 to latest.
@param filename filename
@param encoding encoding
@return modification?
"""
with open(filename, "r", encoding=encoding) as payload:
content = payload.read()
try:
nb = reads(content)
except NotJSONError as e: # pragma: no cover
if len(content) > 10:
lc = list(content[:10])
else:
lc = list(content)
raise ValueError(
"Unable to read content type '{0}' in '{2}' ---- {1}".format(type(content), lc, filename)) from e
if not hasattr(nb, "nbformat") or nb.nbformat >= 4:
return False
try:
upgrade(nb, from_version=nb.nbformat)
except ValueError as e: # pragma: no cover
raise ValueError("Unable to convert '{0}'.".format(filename)) from e
s = writes(nb)
if isinstance(s, bytes):
s = s.decode('utf8')
if s == content:
return False
with open(filename, "w", encoding=encoding) as f:
f.write(s)
return True
def read_nb(filename, profile_dir=None, encoding="utf8", working_dir=None,
comment="", fLOG=noLOG, code_init=None,
kernel_name="python", log_level="30", extended_args=None,
kernel=False, replacements=None):
"""
Reads a notebook and return a @see cl NotebookRunner object.
@param filename notebook filename (or stream)
@param profile_dir profile directory
@param encoding encoding for the notebooks
@param working_dir working directory
@param comment additional information added to error message
@param code_init to initialize the notebook with a python code as if it was a cell
@param fLOG logging function
@param log_level Choices: (0, 10, 20, 30=default, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')
@param kernel_name kernel name, it can be None
@param extended_args others arguments to pass to the command line
(`--KernelManager.autorestar=True` for example),
see :ref:`l-ipython_notebook_args` for a full list
@param kernel *kernel* is True by default, the notebook can be run, if False,
the notebook can be read but not run
@param replacements replacements to make in every cell before running it,
dictionary ``{ string: string }``
@return @see cl NotebookRunner
"""
if isinstance(filename, str):
with open(filename, "r", encoding=encoding) as payload:
nb = reads(payload.read())
nb_runner = NotebookRunner(
nb, profile_dir=profile_dir, theNotebook=os.path.abspath(filename),
kernel=kernel, working_dir=working_dir,
comment=comment, fLOG=fLOG, code_init=code_init,
kernel_name="python", log_level="30", extended_args=None,
filename=filename, replacements=replacements)
return nb_runner
else:
nb = reads(filename.read())
nb_runner = NotebookRunner(nb, kernel=kernel,
profile_dir=profile_dir, working_dir=working_dir,
comment=comment, fLOG=fLOG, code_init=code_init,
kernel_name="python", log_level="30", extended_args=None,
filename=filename, replacements=replacements)
return nb_runner
def read_nb_json(js, profile_dir=None, encoding="utf8",
working_dir=None, comment="", fLOG=noLOG, code_init=None,
kernel_name="python", log_level="30", extended_args=None,
kernel=False, replacements=None):
"""
Reads a notebook from a :epkg:`JSON` stream or string.
@param js string or stream
@param profile_dir profile directory
@param encoding encoding for the notebooks
@param working_dir working directory
@param comment additional information added to error message
@param code_init to initialize the notebook with a python code as if it was a cell
@param fLOG logging function
@param log_level Choices: (0, 10, 20, 30=default, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')
@param kernel_name kernel name, it can be None
@param extended_args others arguments to pass to the command line ('--KernelManager.autorestar=True' for example),
see :ref:`l-ipython_notebook_args` for a full list
@param kernel *kernel* is True by default, the notebook can be run, if False,
the notebook can be read but not run
@param replacements replacements to make in every cell before running it,
dictionary ``{ string: string }``
@return instance of @see cl NotebookRunner
"""
if isinstance(js, str):
st = StringIO(js)
else:
st = js
return read_nb(st, encoding=encoding, kernel=kernel,
profile_dir=profile_dir, working_dir=working_dir,
comment=comment, fLOG=fLOG, code_init=code_init,
kernel_name="python", log_level="30", extended_args=None,
replacements=replacements)
def find_notebook_kernel(kernel_spec_manager=None):
"""
Returns a dict mapping kernel names to resource directories.
@param kernel_spec_manager see `KernelSpecManager <http://jupyter-client.readthedocs.org/en/
latest/api/kernelspec.html#jupyter_client.kernelspec.KernelSpecManager>`_
A KernelSpecManager to use for installation.
If none provided, a default instance will be created.
@return dict
The list of installed kernels is described at
`Making kernel for Jupyter <http://jupyter-client.readthedocs.org/en/latest/kernels.html#kernelspecs>`_.
The function only works with *Jupyter>=4.0*.
"""
if kernel_spec_manager is None:
from jupyter_client.kernelspec import KernelSpecManager
kernel_spec_manager = KernelSpecManager()
return kernel_spec_manager.find_kernel_specs()
def get_notebook_kernel(kernel_name, kernel_spec_manager=None):
"""
Returns a `KernelSpec <https://ipython.org/ipython-doc/dev/api/
generated/IPython.kernel.kernelspec.html>`_.
@param kernel_spec_manager see `KernelSpecManager <http://jupyter-client.readthedocs.org/en/
latest/api/kernelspec.html#jupyter_client.kernelspec.KernelSpecManager>`_
A KernelSpecManager to use for installation.
If none provided, a default instance will be created.
@param kernel_name kernel name
@return KernelSpec
The function only works with *Jupyter>=4.0*.
"""
if kernel_spec_manager is None:
from jupyter_client.kernelspec import KernelSpecManager
kernel_spec_manager = KernelSpecManager()
return kernel_spec_manager.get_kernel_spec(kernel_name)
def install_notebook_extension(path=None, overwrite=False, symlink=False,
user=False, prefix=None, nbextensions_dir=None,
destination=None):
"""
Installs notebook extensions,
see `install_nbextension <https://ipython.org/ipython-doc/
dev/api/generated/IPython.html.nbextensions.html
#IPython.html.nbextensions.install_nbextension>`_
for documentation.
@param path if None, use default value
@param overwrite overwrite the extension
@param symlink see the original function
@param user user
@param prefix see the original function
@param nbextensions_dir see the original function
@param destination see the original function
@return standard output
Default value is
`https://github.com/ipython-contrib/IPython-notebook-extensions/archive/master.zip
<https://github.com/ipython-contrib/IPython-notebook-extensions/archive/master.zip>`_.
"""
if path is None:
path = "https://github.com/ipython-contrib/IPython-notebook-extensions/archive/master.zip"
cout = sys.stdout
cerr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
from notebook.nbextensions import install_nbextension
install_nbextension(path=path, overwrite=overwrite, symlink=symlink,
user=user, prefix=prefix, nbextensions_dir=nbextensions_dir,
destination=destination)
out = sys.stdout.getvalue()
err = sys.stderr.getvalue()
sys.stdout = cout
sys.stderr = cerr
if len(err) != 0:
raise NotebookException(
"unable to install exception from: {0}\nOUT:\n{1}\n[nberror]\n{2}".format(path, out, err))
return out
def get_jupyter_datadir():
"""
Returns the data directory for the notebook.
@return path
"""
from jupyter_client.kernelspec import KernelSpecManager
return KernelSpecManager().data_dir
def get_jupyter_extension_dir(user=False, prefix=None,
nbextensions_dir=None):
"""
Parameters
++++++++++
user : bool [default: False]
Whether to check the user's .ipython/nbextensions directory.
Otherwise check a system-wide install (e.g. /usr/local/share/jupyter/nbextensions).
prefix : str [optional]
Specify install prefix, if it should differ from default (e.g. /usr/local).
Will check prefix/share/jupyter/nbextensions
nbextensions_dir : str [optional]
Specify absolute path of nbextensions directory explicitly.
Return
++++++
path: path to installed extensions (by the user)
"""
from notebook.nbextensions import _get_nbextension_dir
return _get_nbextension_dir(nbextensions_dir=nbextensions_dir, user=user, prefix=prefix)
def get_installed_notebook_extension(user=False, prefix=None,
nbextensions_dir=None):
"""
Retuns installed extensions.
:param user: bool [default: False]
Whether to check the user's .ipython/nbextensions directory.
Otherwise check a system-wide install (e.g. /usr/local/share/jupyter/nbextensions).
:param prefix: str [optional]
Specify install prefix, if it should differ from default (e.g. /usr/local).
Will check prefix/share/jupyter/nbextensions
:param nbextensions_dir: str [optional]
Specify absolute path of nbextensions directory explicitly.
:return: list: list of installed notebook extension (by the user)
You can install extensions with function @see fn install_notebook_extension.
"""
path = get_jupyter_extension_dir(
user=user, prefix=prefix, nbextensions_dir=nbextensions_dir)
if not os.path.exists(path):
raise FileNotFoundError(path)
res = []
for file in explore_folder_iterfile(path):
rel = os.path.relpath(file, path)
spl = os.path.split(rel)
name = spl[-1]
if name == "main.js":
fold = "/".join(spl[:-1]).replace("\\", "/") + "/main"
res.append(fold)
return res
def install_jupyter_kernel(exe=sys.executable, kernel_spec_manager=None, user=False, kernel_name=None, prefix=None):
"""
Installs a kernel based on executable (this python by default).
@param exe Python executable
current one by default
@param kernel_spec_manager (KernelSpecManager [optional]).
A KernelSpecManager to use for installation.
If none provided, a default instance will be created.
@param user (bool).
Whether to do a user-only install, or system-wide.
@param kernel_name (str), optional.
Specify a name for the kernelspec.
This is needed for having multiple IPython
kernels for different environments.
@param prefix (str), optional.
Specify an install prefix for the kernelspec.
This is needed to install into a non-default
location, such as a conda/virtual-env.
@return The path where the kernelspec was installed.
A kernel is defined by the following fields:
::
{
"display_name": "Python 3 (ENSAE)",
"language": "python",
"argv": [ "c:\\\\PythonENSAE\\\\python\\\\python.exe",
"-m",
"ipykernel",
"-f",
"{connection_file}"
]
}
For R, it looks like:
::
{
"display_name": "R (ENSAE)",
"language": "R",
"argv": [ "c:\\\\PythonENSAE\\\\tools\\\\R\\\\bin\\\\x64\\\\R.exe",
"--quiet",
"-e",
"IRkernel::main()",
"--args",
"{connection_file}"
]
}
"""
exe = exe.replace("pythonw.exe", "python.exe")
dest = install_k(kernel_spec_manager=kernel_spec_manager,
user=user, kernel_name=kernel_name, prefix=prefix)
kernel_file = os.path.join(dest, "kernel.json")
kernel = dict(display_name=kernel_name,
language="python",
argv=[exe, "-m", "ipykernel", "-f", "{connection_file}"])
s = json.dumps(kernel)
with open(kernel_file, "w") as f:
f.write(s)
return dest
def install_python_kernel_for_unittest(suffix=None):
"""
Installs a kernel based on this python (sys.executable) for unit test purposes.
@param suffix suffix to add to the kernel name
@return kernel name
"""
exe = os.path.split(sys.executable)[0].replace("pythonw", "python")
exe = exe.replace("\\", "/").replace("/",
"_").replace(".", "_").replace(":", "")
kern = "ut_" + exe + "_" + str(sys.version_info[0])
if suffix is not None:
kern += "_" + suffix
kern = kern.lower()
install_jupyter_kernel(kernel_name=kern)
return kern
def remove_kernel(kernel_name, kernel_spec_manager=None):
"""
Removes a kernel.
@param kernel_spec_manager see `KernelSpecManager <http://jupyter-client.readthedocs.org/
en/latest/api/kernelspec.html#jupyter_client.kernelspec.KernelSpecManager>`_
A KernelSpecManager to use for installation.
If none provided, a default instance will be created.
@param kernel_name kernel name
The function only works with *Jupyter>=4.0*.
"""
kernels = find_notebook_kernel(kernel_spec_manager=kernel_spec_manager)
if kernel_name in kernels:
fold = kernels[kernel_name]
if not os.path.exists(fold):
raise FileNotFoundError("unable to remove folder " + fold)
remove_folder(fold)
else:
raise NotebookException( # pragma: no cover
"Unable to find kernel '{0}' in {1}".format(
kernel_name, ", ".join(kernels.keys())))
def remove_execution_number(infile, outfile=None, encoding="utf-8", indent=2, rule=int):
"""
Removes execution number from a notebook.
@param infile filename of the notebook
@param outfile None ot save the file
@param encoding encoding
@param indent indentation
@param rule determines the rule which specifies execution numbers,
'None' for None, 'int' for consectuive integers numbers.
@return modified string or None if outfile is not None and the file was not modified
.. todoext::
:title: remove execution number from notebook facilitate git versionning
:tag: enhancement
:issue: 18
:cost: 1
:hidden:
:date: 2016-08-23
:release: 1.4
Remove execution number from the notebook
to avoid commiting changes only about those numbers
`notebook 5.1.0 <https://jupyter-notebook.readthedocs.io/en/stable/changelog.html>`_
introduced changes which are incompatible with
leaving the cell executing number empty.
"""
def fixup(adict, k, v, cellno=0, outputs="outputs"):
for key in adict.keys():
if key == k:
if rule is None:
adict[key] = v
elif rule is int:
cellno += 1
adict[key] = cellno
else:
raise ValueError( # pragma: no cover
"Rule '{0}' does not apply on {1}={2}".format(rule, key, adict[key]))
elif key == "outputs":
if isinstance(adict[key], dict):
fixup(adict[key], k, v, cellno=cellno, outputs=outputs)
elif isinstance(adict[key], list):
for el in adict[key]:
if isinstance(el, dict):
fixup(el, k, v, cellno=cellno, outputs=outputs)
elif isinstance(adict[key], dict):
cellno = fixup(adict[key], k, v,
cellno=cellno, outputs=outputs)
elif isinstance(adict[key], list):
for el in adict[key]:
if isinstance(el, dict):
cellno = fixup(el, k, v, cellno=cellno,
outputs=outputs)
return cellno
content = read_content_ufs(infile)
js = json.loads(content)
fixup(js, "execution_count", None)
st = StringIO()
json.dump(js, st, indent=indent, sort_keys=True)
res = st.getvalue()
if outfile is not None:
if content != res:
with open(outfile, "w", encoding=encoding) as f:
f.write(res)
return content
return None
return res
| |
import json
from datetime import datetime
from itertools import groupby
import markdown2
from django.urls import reverse
from django.db import models, transaction
from django.utils import timezone
from django.utils.six.moves.urllib.parse import quote
from django.utils.six.moves.urllib.request import urlopen
from pynaco.naco import normalizeSimplified
from .validators import validate_merged_with
class Identifier_Type(models.Model):
"""Custom Identifier Type.
Used in conjunction with the Identifier model.
"""
label = models.CharField(
max_length=255,
help_text='What kind of data is this? Personal website? Twitter?')
icon_path = models.CharField(
max_length=255,
blank=True,
help_text='Path to icon image?')
homepage = models.URLField(
blank=True,
help_text='Homepage of label. Twitter.com, Facebook.com, etc')
class Meta:
ordering = ['label']
verbose_name = 'Identifier Type'
def __str__(self):
return self.label
class Identifier(models.Model):
"""An Identifier for a Name models instance. Most commonly
represented as a permalink.
This is used in conjunction with the Identifier Type model to
specify the type of Identifier the instance represents. An example
instance would have an Identifier Type of `Twitter` and the value
field would have the permalink to the Name's Twitter profile page.
"""
type = models.ForeignKey(
'Identifier_Type',
help_text="Catagorize this record's identifiers here",
on_delete=models.CASCADE)
belong_to_name = models.ForeignKey('Name', on_delete=models.CASCADE)
value = models.CharField(max_length=500)
visible = models.BooleanField(default=True)
order = models.IntegerField(default=0)
class Meta:
ordering = ['order', 'type']
def __str__(self):
return self.value
class NoteManager(models.Manager):
"""Custom Model Manager for the Note model."""
def public_notes(self):
return self.get_queryset().exclude(note_type=self.model.NONPUBLIC)
class Note(models.Model):
"""A note regarding the related Name model instance."""
BIOGRAPHICAL_HISTORICAL = 0
DELETION_INFORMATION = 1
NONPUBLIC = 2
SOURCE = 3
OTHER = 4
NOTE_TYPE_CHOICES = (
(BIOGRAPHICAL_HISTORICAL, 'Biographical/Historical'),
(DELETION_INFORMATION, 'Deletion Information'),
(NONPUBLIC, 'Nonpublic'),
(SOURCE, 'Source'),
(OTHER, 'Other')
)
note = models.TextField(help_text='Enter notes about this record here')
note_type = models.IntegerField(choices=NOTE_TYPE_CHOICES)
belong_to_name = models.ForeignKey('Name', on_delete=models.CASCADE)
objects = NoteManager()
class Meta:
base_manager_name = 'objects'
def get_note_type_label(self):
"""Returns the label associated with an instance's
note_type.
"""
id, note_type = self.NOTE_TYPE_CHOICES[self.note_type]
return note_type
def __str__(self):
return self.note
class Variant(models.Model):
"""Defines an alternative form that a Name may be displayed."""
ACRONYM = 0
ABBREVIATION = 1
TRANSLATION = 2
EXPANSION = 3
OTHER = 4
VARIANT_TYPE_CHOICES = (
(ACRONYM, 'Acronym'),
(ABBREVIATION, 'Abbreviation'),
(TRANSLATION, 'Translation'),
(EXPANSION, 'Expansion'),
(OTHER, 'Other')
)
belong_to_name = models.ForeignKey('Name', on_delete=models.CASCADE)
variant_type = models.IntegerField(
choices=VARIANT_TYPE_CHOICES,
help_text='Choose variant type.')
variant = models.CharField(
max_length=255,
help_text='Fill in the other name variants, if any.')
normalized_variant = models.CharField(
max_length=255,
editable=False,
help_text='NACO normalized variant text')
def get_variant_type_label(self):
"""Returns the label associated with an instance's
variant_type.
"""
id, variant_type = self.VARIANT_TYPE_CHOICES[self.variant_type]
return variant_type
def save(self):
self.normalized_variant = normalizeSimplified(self.variant)
super(Variant, self).save()
def __str__(self):
return self.variant
class TicketingManager(models.Manager):
"""Custom manager for the BaseTicketing model."""
def create(self, *args, **kwargs):
"""Override the create method.
Create or get the single object. If one already exists,
delete it and create a new one so we auto-increment the id.
"""
obj, created = self.get_or_create(stub=self.model.STUB_DEFAULT)
if not created:
with transaction.atomic():
obj.delete()
obj = self.create(stub=self.model.STUB_DEFAULT)
return obj
class BaseTicketing(models.Model):
"""Creates a custom app-level identifier.
This leverages the autoincrement primary key field to
create custom unique identifier. An example identifier
would be `nm0000001`.
"""
# Explicitly set the id of the model, even though it is the same
# as the one Django gives it.
id = models.AutoField(null=False, primary_key=True)
# This is just the smallest placeholder we can create that we can
# replace into to generate a new id.
STUB_DEFAULT = True
stub = models.BooleanField(null=False, default=STUB_DEFAULT, unique=True)
# Override the default manager
objects = TicketingManager()
@property
def ticket(self):
"""Alias for id"""
return self.id
def __str__(self):
return 'nm{ticket:07d}'.format(ticket=self.ticket)
class NameManager(models.Manager):
"""Custom Manager for the Name model.
Provides additional methods that are useful in calculating
statistics on Name model instances.
"""
def visible(self):
"""Retrieves all Name objects that have an Active record status
and are not merged with any other Name objects.
"""
return self.get_queryset().filter(
record_status=self.model.ACTIVE, merged_with=None)
def active_type_counts(self):
"""Calculates counts of Name objects by Name Type.
Statistics are based off of the queryset returned by visible.
The total number is calculated using the count method. All
additional figures are calculated using Python to reduce the number
of queries.
"""
names = self.visible()
return {
'total': names.count(),
'personal': len([n for n in names if n.is_personal()]),
'organization': len([n for n in names if n.is_organization()]),
'event': len([n for n in names if n.is_event()]),
'software': len([n for n in names if n.is_software()]),
'building': len([n for n in names if n.is_building()])
}
def _counts_per_month(self, date_column):
"""Calculates the number of Names by month according to the
date_column passed in.
This will return a ValueQuerySet where each element is in the form
of
{
count: <Number of Names for the month>,
month: <Datetime object for first day of the given month>
}
"""
def grouper(name):
return (getattr(name, date_column).year,
getattr(name, date_column).month)
def convert_key(year, month):
datetime_obj = datetime(year=year, month=month, day=1)
tzinfo = timezone.get_current_timezone()
return timezone.make_aware(datetime_obj, tzinfo)
results = self.all().order_by(date_column)
return [
dict(month=convert_key(*key), count=len(list(value)))
for key, value in groupby(results, grouper)
]
def created_stats(self):
"""Returns a list of the number of Names created per
month.
"""
return self._counts_per_month('date_created')
def modified_stats(self):
"""Returns a list of the number of Names modified per
month.
"""
return self._counts_per_month('last_modified')
class Name(models.Model):
"""The authorized version of a name that is used to unambiguously
refer to a person, organization, event, building or piece of
software.
"""
ACTIVE = 0
DELETED = 1
SUPPRESSED = 2
RECORD_STATUS_CHOICES = (
(ACTIVE, 'Active'),
(DELETED, 'Deleted'),
(SUPPRESSED, 'Suppressed')
)
PERSONAL = 0
ORGANIZATION = 1
EVENT = 2
SOFTWARE = 3
BUILDING = 4
NAME_TYPE_CHOICES = (
(PERSONAL, 'Personal'),
(ORGANIZATION, 'Organization'),
(EVENT, 'Event'),
(SOFTWARE, 'Software'),
(BUILDING, 'Building')
)
DATE_DISPLAY_LABELS = {
PERSONAL: {
'type': 'Personal',
'begin': 'Date of Birth',
'end': 'Date of Death'
},
ORGANIZATION: {
'type': 'Organization',
'begin': 'Founded Date',
'end': 'Defunct'
},
EVENT: {
'type': 'Event',
'begin': 'Begin Date',
'end': 'End Date'
},
SOFTWARE: {
'type': 'Software',
'begin': 'Begin Date',
'end': 'End Date'
},
BUILDING: {
'type': 'Building',
'begin': 'Erected Date',
'end': 'Demolished Date',
},
None: {
'type': None,
'begin': 'Born/Founded Date',
'end': 'Died/Defunct Date'
}
}
NAME_TYPE_SCHEMAS = {
PERSONAL: 'http://schema.org/Person',
ORGANIZATION: 'http://schema.org/Organization',
BUILDING: 'http://schema.org/Place'
}
name = models.CharField(
max_length=255,
help_text='Please use the general reverse order: LAST, FIRST')
normalized_name = models.CharField(
max_length=255,
editable=False,
help_text='NACO normalized form of the name')
name_type = models.IntegerField(choices=NAME_TYPE_CHOICES)
# Date, month or year of birth or incorporation of the name
begin = models.CharField(
max_length=25,
blank=True,
help_text='Conforms to EDTF format YYYY-MM-DD')
# Date, month of year of death or un-incorporation of the name
end = models.CharField(
max_length=25,
blank=True,
help_text='Conforms to EDTF format YYYY-MM-DD')
disambiguation = models.CharField(
max_length=255,
blank=True,
help_text='Clarify to whom or what this record pertains.')
biography = models.TextField(
blank=True,
help_text='Compatible with MARKDOWN')
record_status = models.IntegerField(
default=ACTIVE,
choices=RECORD_STATUS_CHOICES)
merged_with = models.ForeignKey(
'self',
blank=True,
null=True,
related_name='merged_with_name',
on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True, editable=False)
last_modified = models.DateTimeField(auto_now=True, editable=False)
name_id = models.CharField(max_length=10, unique=True, editable=False)
objects = NameManager()
def get_absolute_url(self):
"""Get the absolute url to the Name detail page."""
return reverse('name:detail', args=[self.name_id])
def get_schema_url(self):
"""Get the appropriate schema url based on the name type."""
return self.NAME_TYPE_SCHEMAS.get(self.name_type, None)
def get_name_type_label(self):
"""Get the string form of the Name's name type."""
id, name_type = self.NAME_TYPE_CHOICES[self.name_type]
return name_type
def get_date_display(self):
"""Get the date display labels according to the Name's
name type
See Name.DATE_DISPLAY_LABELS
"""
return self.DATE_DISPLAY_LABELS.get(self.name_type)
def has_current_location(self):
"""True if the Name has a current location in the location_set."""
return self.location_set.current_location is not None
def has_geocode(self):
"""True if the instance has one or more related Locations."""
if self.location_set.count():
return True
else:
return False
has_geocode.boolean = True # Enables icon display in the Django admin.
def has_schema_url(self):
"""True if the instance has a schema url."""
return self.get_schema_url() is not None
def _is_name_type(self, type_id):
"""Test if the instance of Name is a certain
Name Type.
Accepts the id of the Name Type, and returns a boolean.
"""
return type_id == self.name_type
def is_personal(self):
"""True if the Name has the Name Type Personal."""
return self._is_name_type(self.PERSONAL)
def is_organization(self):
"""True if the Name has the Name Type Organization."""
return self._is_name_type(self.ORGANIZATION)
def is_event(self):
"""True if the Name has the Name Type Event."""
return self._is_name_type(self.EVENT)
def is_software(self):
"""True if the Name has the Name Type Software."""
return self._is_name_type(self.SOFTWARE)
def is_building(self):
"""True if the Name has the Name Type Building."""
return self._is_name_type(self.BUILDING)
def _is_record_status(self, status_id):
"""Test if the instance of Name has a particular
record_status.
Accepts the id of the Name Type, and returns a boolean.
"""
return status_id == self.record_status
def is_active(self):
"""True if the Name has the Active status."""
return self._is_record_status(self.ACTIVE)
def is_deleted(self):
"""True if the Name has the Deleted status."""
return self._is_record_status(self.DELETED)
def is_suppressed(self):
"""True if the Name has the Suppressed status."""
return self._is_record_status(self.SUPPRESSED)
def render_biography(self):
"""Render the Markdown biography to HTML."""
return markdown2.markdown(self.biography)
def __normalize_name(self):
"""Normalize the name attribute and assign it the normalized_name
attribute.
"""
self.normalized_name = normalizeSimplified(self.name)
def __find_location(self):
"""Use the normalized_name attribute and the Location.URL to
attempt to find the instance's location.
A location is only attached if the server responds with a single
result.
"""
URL_RESOURCE = 'http://maps.googleapis.com/maps/api/geocode/json'
URL_QUERY_TEMPLATE = '?address={address}&sensor=true'
URL = URL_RESOURCE + URL_QUERY_TEMPLATE
url = URL.format(address=quote(self.normalized_name))
payload = json.load(urlopen(url))
# Only add the location if the Name matched one and only one
# location from the API.
if payload.get('status') == "OK" and len(payload.get('results')) == 1:
coordinate = payload['results'][0]['geometry']['location']
self.location_set.create(latitude=coordinate['lat'],
longitude=coordinate['lng'])
def __assign_name_id(self):
"""Use the BaseTicketing object to assign a name_id."""
if not self.name_id:
self.name_id = str(BaseTicketing.objects.create())
def save(self, **kwargs):
self.__normalize_name()
self.__assign_name_id()
super(Name, self).save()
if self.is_building() and not self.location_set.count():
self.__find_location()
def clean(self, *args, **kwargs):
# Call merged_with_validator here so that we can pass in
# the model instance.
validate_merged_with(self)
super(Name, self).clean(*args, **kwargs)
def __str__(self):
return self.name_id
class Meta:
ordering = ['name']
unique_together = (('name', 'name_id'),)
class LocationManager(models.Manager):
"""Custom Manager for the Location model."""
def _get_current_location(self):
"""Filters through a Name object's related locations and
returns the one marked as current.
"""
return self.get_queryset().filter(status=self.model.CURRENT).first()
# Makes the current location available as a property on
# the RelatedManager.
current_location = property(_get_current_location)
class Location(models.Model):
"""Defines the location of a related Name model instance."""
CURRENT = 0
FORMER = 1
LOCATION_STATUS_CHOICES = (
(CURRENT, 'current'),
(FORMER, 'former')
)
HELP_TEXT = """
<strong>
<a target="_blank" href="https://getlatlong.net">
getLatLong.net
</a>
: this service might be useful for filling in the lat/long data
</strong>
"""
belong_to_name = models.ForeignKey('Name', on_delete=models.CASCADE)
latitude = models.DecimalField(
max_digits=13,
decimal_places=10,
help_text=HELP_TEXT)
longitude = models.DecimalField(
max_digits=13,
decimal_places=10,
help_text=HELP_TEXT)
status = models.IntegerField(
choices=LOCATION_STATUS_CHOICES,
default=CURRENT)
objects = LocationManager()
class Meta:
ordering = ['status']
base_manager_name = 'objects'
def geo_point(self):
return '{lat} {lng}'.format(lat=self.latitude, lng=self.longitude)
def is_current(self):
"""True if the Location has a status of Current."""
return self.CURRENT == self.status
def save(self, **kwargs):
super(Location, self).save()
# When this instance's status is CURRENT, get all other locations
# related the belong_to_name, and set the status to FORMER.
if self.is_current():
former_locs = self.belong_to_name.location_set.exclude(id=self.id)
for l in former_locs:
l.status = self.FORMER
l.save()
def __str__(self):
return self.geo_point()
| |
# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary.generate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usable as a library or script to generate automatic RST source files for
items referred to in autosummary:: directives.
Each generated RST file contains a single auto*:: directive which
extracts the docstring of the referred item.
Example Makefile rule::
generate:
sphinx-autogen -o source/generated source/*.rst
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import pydoc
import optparse
import inspect
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.sandbox import SandboxedEnvironment
from sphinx import package_dir
from ..autosummary import import_by_name, get_documenter
from sphinx.jinja2glue import BuiltinTemplateLoader
from sphinx.util.osutil import ensuredir
from sphinx.util.inspect import safe_getattr
from sphinx.pycode import ModuleAnalyzer
def main(argv=sys.argv):
usage = """%prog [OPTIONS] SOURCEFILE ..."""
p = optparse.OptionParser(usage.strip())
p.add_option("-o", "--output-dir", action="store", type="string",
dest="output_dir", default=None,
help="Directory to place all output in")
p.add_option("-s", "--suffix", action="store", type="string",
dest="suffix", default="rst",
help="Default suffix for files (default: %default)")
p.add_option("-t", "--templates", action="store", type="string",
dest="templates", default=None,
help="Custom template directory (default: %default)")
options, args = p.parse_args(argv[1:])
if len(args) < 1:
p.error('no input files given')
generate_autosummary_docs(args, options.output_dir,
"." + options.suffix,
template_dir=options.templates)
def _simple_info(msg):
print msg
def _simple_warn(msg):
print >> sys.stderr, 'WARNING: ' + msg
# -- Generating output ---------------------------------------------------------
def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None):
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
info('[autosummary] generating autosummary for: %s' %
', '.join(showed_sources))
if output_dir:
info('[autosummary] writing to %s' % output_dir)
if base_path is not None:
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
template_loader.init(builder, dirs=template_dirs)
else:
if template_dir:
template_dirs.insert(0, template_dir)
template_loader = FileSystemLoader(template_dirs)
template_env = SandboxedEnvironment(loader=template_loader)
# read
items = find_autosummary_in_files(sources)
# remove possible duplicates
items = dict([(item, True) for item in items]).keys()
# keep track of new files
new_files = []
# write
for name, path, template_name in sorted(items):
if path is None:
# The corresponding autosummary:: directive did not have
# a :toctree: option
continue
path = output_dir or os.path.abspath(path)
ensuredir(path)
try:
name, obj, parent = import_by_name(name)
except ImportError, e:
warn('[autosummary] failed to import %r: %s' % (name, e))
continue
# skip base modules
if name.endswith(".base"):
continue
fn = os.path.join(path, name + suffix)
# skip it if it exists
if os.path.isfile(fn):
continue
new_files.append(fn)
f = open(fn, 'w')
try:
doc = get_documenter(obj, parent)
if template_name is not None:
template = template_env.get_template(template_name)
else:
try:
template = template_env.get_template('autosummary/%s.rst'
% doc.objtype)
except TemplateNotFound:
template = template_env.get_template('autosummary/base.rst')
def exclude_member(obj, name):
if sys.skip_member(name, obj):
return True
live = getattr(obj, name)
if inspect.isbuiltin(live):
return True
real_module = inspect.getmodule(live)
if real_module is not None:
if real_module.__name__ in ["ctypes",
"unittest"]:
return True
c = getattr(obj, name)
if inspect.isclass(c) or inspect.isfunction(c):
if (c.__module__!=obj.__name__+".base" and
c.__module__!=obj.__name__):
return True
return False
def get_members(obj, typ, include_public=[]):
items = []
for name in dir(obj):
# skip_member
if exclude_member(obj, name):
continue
try:
documenter = get_documenter(safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype == typ:
items.append(name)
elif typ=='function' and documenter.objtype=='boundmethod':
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
def def_members(obj, typ, include_public=[]):
items = []
try:
obj_dict = safe_getattr(obj, '__dict__')
except AttributeError:
return []
defined = obj_dict.keys()
defined.sort()
for name in defined:
if exclude_member(obj, name):
continue
try:
documenter = get_documenter(safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype == typ:
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public
def get_iattributes(obj):
items = []
name = obj.__name__
obj_attr = dir(obj)
analyzer = ModuleAnalyzer.for_module(obj.__module__)
attr_docs = analyzer.find_attr_docs()
for pair, doc in attr_docs.iteritems():
if name!=pair[0]:
continue
if not pair[1] in obj_attr:
items.append({"name":pair[1],
"doc":'\n '.join(doc)})
items.sort(key=lambda d: d["name"])
return items
ns = {}
if doc.objtype == 'module':
ns['all_members'] = dir(obj)
ns['classes'], ns['all_classes'] = \
get_members(obj, 'class')
ns['functions'], ns['all_functions'] = \
get_members(obj, 'function')
ns['exceptions'], ns['all_exceptions'] = \
get_members(obj, 'exception')
ns['data'], ns['all_data'] = \
get_members(obj, 'data')
documented = ns['classes']+ns['functions'] +ns['exceptions']+ns['data']
if sys.all_submodules.has_key(obj.__name__):
ns['submodules'] = sys.all_submodules[obj.__name__]
# Hide base submodule
if "base" in ns['submodules']:
ns['submodules'].remove("base")
documented += ns['submodules']
ns['members'] = ns['all_members']
try:
obj_dict = safe_getattr(obj, '__dict__')
except AttributeError:
obj_dict = []
public = [x for x in obj_dict if not x.startswith('_')]
for item in documented:
if item in public:
public.remove(item)
public.sort()
ns['members'] = public
ns['constants'] = [x for x in public
#if not sys.skip_member(x, obj)]
if not exclude_member(obj, x)]
elif doc.objtype == 'class':
ns['members'] = dir(obj)
ns['events'], ns['all_events'] = \
get_members(obj, 'event')
ns['methods'], ns['all_methods'] = \
get_members(obj, 'method', ['__init__'])
ns['attributes'], ns['all_attributes'] = \
get_members(obj, 'attribute')
# Add instance attributes
ns['iattributes'] = get_iattributes(obj)
ns['def_events'] = def_members(obj, 'event')
ns['def_methods'] = def_members(obj, 'method')
ns['def_attributes'] = def_members(obj, 'attribute')
# Constructor method special case
if '__init__' in ns['methods']:
ns['methods'].remove('__init__')
if '__init__' in ns['def_methods']:
ns['def_methods'].remove('__init__')
ns['constructor']=['__init__']
else:
ns['constructor']=[]
ns['inherited'] = []
for t in ['events', 'methods', 'attributes']:
key = 'inh_' + t
ns[key]=[]
for item in ns[t]:
if not item in ns['def_' + t]:
ns['inherited'].append(item)
ns[key].append(item)
parts = name.split('.')
if doc.objtype in ('method', 'attribute'):
mod_name = '.'.join(parts[:-2])
cls_name = parts[-2]
obj_name = '.'.join(parts[-2:])
ns['class'] = cls_name
else:
mod_name, obj_name = '.'.join(parts[:-1]), parts[-1]
ns['fullname'] = name
ns['module'] = mod_name
ns['objname'] = obj_name
ns['name'] = parts[-1]
ns['objtype'] = doc.objtype
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
f.write(rendered)
finally:
f.close()
# descend recursively to new files
if new_files:
generate_autosummary_docs(new_files, output_dir=output_dir,
suffix=suffix, warn=warn, info=info,
base_path=base_path, builder=builder,
template_dir=template_dir)
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
documented = []
for filename in filenames:
f = open(filename, 'r')
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, filename=filename))
f.close()
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
"""
try:
real_name, obj, parent = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
return find_autosummary_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError, e:
print "Failed to import '%s': %s" % (name, e)
return []
def find_autosummary_in_lines(lines, module=None, filename=None):
"""Find out what items appear in autosummary:: directives in the
given lines.
Returns a list of (name, toctree, template) where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
autosummary directive (relative to the root of the file name), and
*template* the value of the :template: option. *toctree* and
*template* ``None`` if the directive does not have the
corresponding options set.
"""
autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*')
automodule_re = re.compile(
r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
module_re = re.compile(
r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
documented = []
toctree = None
template = None
current_module = module
in_autosummary = False
base_indent = ""
for line in lines:
if in_autosummary:
m = toctree_arg_re.match(line)
if m:
toctree = m.group(1)
if filename:
toctree = os.path.join(os.path.dirname(filename),
toctree)
continue
m = template_arg_re.match(line)
if m:
template = m.group(1).strip()
continue
if line.strip().startswith(':'):
continue # skip options
m = autosummary_item_re.match(line)
if m:
name = m.group(1).strip()
if name.startswith('~'):
name = name[1:]
if current_module and \
not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.append((name, toctree, template))
continue
if not line.strip() or line.startswith(base_indent + " "):
continue
in_autosummary = False
m = autosummary_re.match(line)
if m:
in_autosummary = True
base_indent = m.group(1)
toctree = None
template = None
continue
m = automodule_re.search(line)
if m:
current_module = m.group(1).strip()
# recurse into the automodule docstring
documented.extend(find_autosummary_in_docstring(
current_module, filename=filename))
continue
m = module_re.match(line)
if m:
current_module = m.group(2)
continue
return documented
if __name__ == '__main__':
main()
| |
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.base.base_entity import BaseEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class LicenseManager(BaseEntity):
'''This managed object type controls entitlements for a given VMware platform.
VMware platforms include VirtualCenter, ESX Server, VMware Server, Workstation
and Player. Entitlements define what software capabilities this host may
use.Entitlements are identified by a short string 'key'. Keys can represent
either a particular edition (Full, Starter) or a particular feature/function
(featureKey) (backup, nas). An edition implies zero one or more functions which
are express, denied or optional. For example a 'Full' edition includes 'iscsi'
function but a Starter edition might disallow it.Which edition a given VMware
platform uses can be defined at any time. Generally this is done right after
first install and boot as installation software may not set it. For editions
that are similar in nature, any future changes to edition type will only impact
future requests for functionality. Current functionality is left unaffected.
The same is true for optional functions enabled/disabled after some period of
time. For dissimilar editions, such transitions may require entering
maintenance mode first else an exception of InvalidState will be thrown.To
specify the edition type and any optional functions, use updateLicense for ESX
Server and addLicense follow by LicenseAssingmentManager.updateAssignedLicense
for VirtualCenter.When an edition is specified for a given host, the cost of
that edition (how many licenses are needed) is determined. The cost is computed
using the license's CostUnit value multiplied by the number of units activated.
For example, when a VMware platform is set to an edition which uses a
'cpuPackage' on a two socket server, two licenses would be needed to
successfully install that edition.Here is a diagram of the unit costs supported
by this API and their relationships.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.LicenseManager):
super(LicenseManager, self).__init__(core, name=name, ref=ref, type=type)
@property
def diagnostics(self):
'''Return current diagnostic information.'''
return self.update('diagnostics')
@property
def evaluation(self):
'''vSphere API 4.0'''
return self.update('evaluation')
@property
def featureInfo(self):
'''The list of features that can be licensed.'''
return self.update('featureInfo')
@property
def licenseAssignmentManager(self):
'''License Assignment Manager'''
return self.update('licenseAssignmentManager')
@property
def licensedEdition(self):
'''The product's license edition. The edition defines which product license the
server requires. This, in turn, determines the core set of functionalities
provided by the product and the additional features that can be licensed. If no
edition is set the property is set to the empty string (""). To set the edition
use SetLicenseEdition.'''
return self.update('licensedEdition')
@property
def licenses(self):
'''Get information about all the licenses avaiable.'''
return self.update('licenses')
@property
def source(self):
'''Set or return a data object type of LocalLicense or LicenseServer.'''
return self.update('source')
@property
def sourceAvailable(self):
'''Current state of the license source. License sources that are LocalSource are
always available.'''
return self.update('sourceAvailable')
def AddLicense(self, licenseKey, labels=None):
'''Adds a license to the inventory of available licenses.
:param licenseKey: A license. E.g. a serial license.
:param labels: array of key-value labels. Ignored by ESX Server.
'''
return self.delegate("AddLicense")(licenseKey, labels)
def CheckLicenseFeature(self, featureKey, host=None):
'''<b>Deprecated.</b> <i>As of vSphere API 4.0, use QueryAssignedLicenses
instead.</i> Returns whether or not a given feature is enabled.
:param host: Host to act on if LicenseManager is not on a host.
:param featureKey: Name of the feature to enable.
'''
return self.delegate("CheckLicenseFeature")(host, featureKey)
def ConfigureLicenseSource(self, licenseSource, host=None):
'''<b>Deprecated.</b> <i>As of vSphere API 4.0, use UpdateLicense instead.</i>
Allows for reconfiguration of the License Manager license source.
:param host: Host for which the license manager should be reconfigured.
:param licenseSource: ServedSource or LocalSource.
'''
return self.delegate("ConfigureLicenseSource")(host, licenseSource)
def DecodeLicense(self, licenseKey):
'''Decodes licensing information on the license specified.
:param licenseKey: A license. E.g. a serial license.
'''
return self.delegate("DecodeLicense")(licenseKey)
def DisableFeature(self, featureKey, host=None):
'''<b>Deprecated.</b> <i>As of vSphere API 4.0, use RemoveAssignedLicense
instead.</i> Release licenses for an optional feature.
:param host: Host to act on if LicenseManager is not on a host.
:param featureKey: key of the feature to disable.
'''
return self.delegate("DisableFeature")(host, featureKey)
def EnableFeature(self, featureKey, host=None):
'''<b>Deprecated.</b> <i>As of vSphere API 4.0, use UpdateAssignedLicense
instead.</i> Enable a feature that has an optional state.
:param host: Host to act on if LicenseManager is not on a host.
:param featureKey: Name of the feature to enable.
'''
return self.delegate("EnableFeature")(host, featureKey)
def QueryLicenseSourceAvailability(self, host=None):
'''<b>Deprecated.</b> <i>As of vSphere API 4.0, use QueryAssignedLicenses
instead.</i> Queries the current license source for total and available
licenses available for each feature known to this system.
:param host: Use the license source of the specified host.
'''
return self.delegate("QueryLicenseSourceAvailability")(host)
def QueryLicenseUsage(self, host=None):
'''<b>Deprecated.</b> <i>As of vSphere API 4.0, use QueryAssignedLicenses
instead.</i> Returns the license usage. The license usage is a list of
supported features and the number of licenses that have been reserved.
:param host: Host to query for usage. If missing, query the server the LicenseManager is on.
'''
return self.delegate("QueryLicenseUsage")(host)
def QuerySupportedFeatures(self, host=None):
'''<b>Deprecated.</b> <i>As of vSphere API 4.0, use QueryAssignedLicenses
instead.</i> Queries the current license source for a list of available
licenses that can be licensed from this system.
:param host: Use the license source of the specified host.
'''
return self.delegate("QuerySupportedFeatures")(host)
def RemoveLicense(self, licenseKey):
'''Remove license from the available set.
:param licenseKey: A licenses. E.g. a serial license.
'''
return self.delegate("RemoveLicense")(licenseKey)
def RemoveLicenseLabel(self, licenseKey, labelKey):
'''Removed a license's label.
:param licenseKey: A license.
:param labelKey: A label key.
'''
return self.delegate("RemoveLicenseLabel")(licenseKey, labelKey)
def SetLicenseEdition(self, host=None, featureKey=None):
'''<b>Deprecated.</b> <i>As of vSphere API 4.0, use QueryAssignedLicenses
instead.</i> Defines the product's license edition. The edition defines which
product license the server requires. This, in turn, determines the core set of
functionality provided by the product and the additional features that can be
licensed.
:param host: Host to act on if LicenseManager is not on a host.
:param featureKey: Name of edition feature to select. If featureKey is not set or set to empty string, the product becomes unlicensed.
'''
return self.delegate("SetLicenseEdition")(host, featureKey)
def UpdateLicense(self, licenseKey, labels=None):
'''Updates the available licenses to the one provided in licenseKey. This is the
same as removing all the licenses using RemoveLicense and adding licenseKey
using AddLicense If the optional parameter labels is specify this is the same
as calling updateLicense without the optioal parameter and calling updateLabel
for each pair in the labels array.
:param licenseKey: A license. E.g. a serial license.
:param labels: array of key-value labels.
'''
return self.delegate("UpdateLicense")(licenseKey, labels)
def UpdateLicenseLabel(self, licenseKey, labelKey, labelValue):
'''Update a license's label. It creates a label entry if the labelKey doesn't
already exist
:param licenseKey: A license.
:param labelKey: A label key.
:param labelValue: Value for the label.
'''
return self.delegate("UpdateLicenseLabel")(licenseKey, labelKey, labelValue)
| |
# -*- coding: utf-8 -*-
"""Prompt manager."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import re
#------------------------------------------------------------------------------
# Base Prompt manager
#------------------------------------------------------------------------------
def _to_lines(code):
return [line.rstrip() for line in code.rstrip().splitlines()]
def _to_code(lines):
return '\n'.join(line.rstrip() for line in lines)
def _add_line_prefix(lines, prefix):
return [prefix + line for line in lines]
def _template_to_regex(template):
regex = template
# Escape special characters.
for char in r'{}[]()+*?:-':
regex = regex.replace(char, '\\' + char)
regex = regex.replace(r'\{n\}', r'\d+')
return regex
def _starts_with_regex(line, regex):
"""Return whether a line starts with a regex or not."""
if not regex.startswith('^'):
regex = '^' + regex
reg = re.compile(regex)
return reg.match(line)
class BasePromptManager(object):
"""Add and remove prompt from code cells."""
input_prompt_template = '' # may contain {n} for the input number
output_prompt_template = ''
input_prompt_regex = ''
output_prompt_regex = ''
def __init__(self):
self.reset()
if not self.input_prompt_regex:
self.input_prompt_regex = _template_to_regex(
self.input_prompt_template)
if not self.output_prompt_regex:
self.output_prompt_regex = _template_to_regex(
self.output_prompt_template)
def reset(self):
self._number = 1
def _replace_template(self, pattern, **by):
if not by:
by = dict(n=self._number)
if '{n}' in pattern:
return pattern.format(**by)
else:
return pattern
@property
def input_prompt(self):
return self._replace_template(self.input_prompt_template)
@property
def output_prompt(self):
return self._replace_template(self.output_prompt_template)
def is_input(self, line):
"""Return whether a code line is an input, based on the input
prompt."""
return _starts_with_regex(line, self.input_prompt_regex)
def split_input_output(self, text):
"""Split code into input lines and output lines, according to the
input and output prompt templates."""
lines = _to_lines(text)
i = 0
for line in lines:
if _starts_with_regex(line, self.input_prompt_regex):
i += 1
else:
break
return lines[:i], lines[i:]
def from_cell(self, input, output):
"""Convert input and output to code text with prompts."""
raise NotImplementedError()
def to_cell(self, code):
"""Convert code text with prompts to input and output."""
raise NotImplementedError()
#------------------------------------------------------------------------------
# Simple prompt manager
#------------------------------------------------------------------------------
class SimplePromptManager(BasePromptManager):
"""No prompt number, same input prompt at every line, idem for output."""
input_prompt_template = ''
output_prompt_template = ''
def from_cell(self, input, output):
input_l = _to_lines(input)
output_l = _to_lines(output)
input_l = _add_line_prefix(input_l, self.input_prompt)
output_l = _add_line_prefix(output_l, self.output_prompt)
return _to_code(input_l) + '\n' + _to_code(output_l)
def to_cell(self, code):
input_l, output_l = self.split_input_output(code)
n = len(self.input_prompt_template)
input = _to_code([line[n:] for line in input_l])
n = len(self.output_prompt_template)
output = _to_code([line[n:] for line in output_l])
return input.rstrip(), output.rstrip()
#------------------------------------------------------------------------------
# IPython prompt manager
#------------------------------------------------------------------------------
class IPythonPromptManager(BasePromptManager):
input_prompt_template = 'In [{n}]: '
input_prompt_regex = '(In \[\d+\]\: | {6,})'
output_prompt_template = 'Out[{n}]: '
def _add_prompt(self, lines, prompt):
lines[:1] = _add_line_prefix(lines[:1], prompt)
lines[1:] = _add_line_prefix(lines[1:], ' ' * len(prompt))
return lines
def from_cell(self, input, output=None):
input_l = _to_lines(input)
output_l = _to_lines(output)
input_l = self._add_prompt(input_l, self.input_prompt)
output_l = self._add_prompt(output_l, self.output_prompt)
input_p = _to_code(input_l)
output_p = _to_code(output_l)
self._number += 1
return input_p + '\n' + output_p
def to_cell(self, text):
input_l, output_l = self.split_input_output(text)
m = _starts_with_regex(input_l[0], self.input_prompt_regex)
assert m
input_prompt = m.group(0)
n_in = len(input_prompt)
input_l = [line[n_in:] for line in input_l]
input = _to_code(input_l)
m = _starts_with_regex(output_l[0], self.output_prompt_regex)
assert m
output_prompt = m.group(0)
n_out = len(output_prompt)
output_l = [line[n_out:] for line in output_l]
output = _to_code(output_l)
return input, output
#------------------------------------------------------------------------------
# Python prompt manager
#------------------------------------------------------------------------------
class PythonPromptManager(SimplePromptManager):
input_prompt_template = '>>> '
second_input_prompt_template = '... '
input_prompt_regex = r'>>>|\.\.\.'
output_prompt_template = ''
def from_cell(self, input, output):
lines = _to_lines(input)
first = self.input_prompt_template
second = self.second_input_prompt_template
lines_prompt = []
prompt = first
lock = False
for line in lines:
if line.startswith('%%'):
lines_prompt.append(prompt + line)
prompt = second
lock = True
elif line.startswith('#') or line.startswith('@'):
lines_prompt.append(prompt + line)
prompt = second
# Empty line = second prompt.
elif line.rstrip() == '':
lines_prompt.append((second + line).rstrip())
elif line.startswith(' '):
prompt = second
lines_prompt.append(prompt + line)
if not lock:
prompt = first
else:
lines_prompt.append(prompt + line)
if not lock:
prompt = first
return _to_code(lines_prompt) + '\n' + output.rstrip()
def create_prompt(prompt):
"""Create a prompt manager.
Parameters
----------
prompt : str or class driving from BasePromptManager
The prompt name ('python' or 'ipython') or a custom PromptManager
class.
"""
if prompt is None:
prompt = 'python'
if prompt == 'python':
prompt = PythonPromptManager
elif prompt == 'ipython':
prompt = IPythonPromptManager
# Instanciate the class.
if isinstance(prompt, BasePromptManager):
return prompt
else:
return prompt()
| |
"""All constants related to the ZHA component."""
import enum
import logging
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND = "command"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_NWK = "nwk"
ATTR_POWER_SOURCE = "power_source"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
COMPONENTS = (BINARY_SENSOR, COVER, DEVICE_TRACKER, FAN, LIGHT, LOCK, SENSOR, SWITCH)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONTROLLER = "controller"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = "ezsp"
xbee = "xbee"
deconz = "deconz"
zigate = "zigate"
@classmethod
def list(cls):
"""Return list of enum's values."""
return [e.value for e in RadioType]
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
ZHA_GW_RADIO = "radio"
ZHA_GW_RADIO_DESCRIPTION = "radio_description"
| |
#!/usr/bin/env python
# Phusion Passenger - https://www.phusionpassenger.com/
# Copyright (c) 2010-2014 Phusion Holding B.V.
#
# "Passenger", "Phusion Passenger" and "Union Station" are registered
# trademarks of Phusion Holding B.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os, re, imp, threading, signal, traceback, socket, select, struct, logging, errno
import tempfile
options = {}
def abort(message):
sys.stderr.write(message + "\n")
sys.exit(1)
def readline():
result = sys.stdin.readline()
if result == "":
raise EOFError
else:
return result
def handshake_and_read_startup_request():
global options
print("!> I have control 1.0")
if readline() != "You have control 1.0\n":
abort("Invalid initialization header")
line = readline()
while line != "\n":
result = re.split(': *', line.strip(), 2)
name = result[0]
value = result[1]
options[name] = value
line = readline()
def load_app():
global options
sys.path.insert(0, os.getcwd())
startup_file = options.get('startup_file', 'passenger_wsgi.py')
return imp.load_source('passenger_wsgi', startup_file)
def create_server_socket():
global options
UNIX_PATH_MAX = int(options.get('UNIX_PATH_MAX', 100))
if 'socket_dir' in options:
socket_dir = options['socket_dir']
socket_prefix = 'wsgi'
else:
socket_dir = tempfile.gettempdir()
socket_prefix = 'PsgWsgiApp'
i = 0
while i < 128:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
socket_suffix = format(struct.unpack('Q', os.urandom(8))[0], 'x')
filename = socket_dir + '/' + socket_prefix + '.' + socket_suffix
filename = filename[0:UNIX_PATH_MAX]
try:
s.bind(filename)
break
except socket.error as e:
if e.errno == errno.EADDRINUSE:
i += 1
if i == 128:
raise e
else:
raise e
s.listen(1000)
return (filename, s)
def install_signal_handlers():
def debug(sig, frame):
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for thread_id, stack in sys._current_frames().items():
code.append("\n# Thread: %s(%d)" % (id2name.get(thread_id,""), thread_id))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append(' File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
print("\n".join(code))
def debug_and_exit(sig, frame):
debug(sig, frame)
sys.exit(1)
# Unfortunately, there's no way to install a signal handler that prints
# the backtrace without interrupting the current system call. os.siginterrupt()
# doesn't seem to work properly either. That is why we only have a SIGABRT
# handler and no SIGQUIT handler.
signal.signal(signal.SIGABRT, debug_and_exit)
def advertise_sockets(socket_filename):
print("!> socket: main;unix:%s;session;1" % socket_filename)
print("!> ")
if sys.version_info[0] >= 3:
def reraise_exception(exc_info):
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
def bytes_to_str(b):
return b.decode()
def str_to_bytes(s):
return s.encode('latin-1')
else:
def reraise_exception(exc_info):
exec("raise exc_info[0], exc_info[1], exc_info[2]")
def bytes_to_str(b):
return b
def str_to_bytes(s):
return s
class RequestHandler:
def __init__(self, server_socket, owner_pipe, app):
self.server = server_socket
self.owner_pipe = owner_pipe
self.app = app
def main_loop(self):
done = False
try:
while not done:
client, address = self.accept_connection()
if not client:
done = True
break
socket_hijacked = False
try:
try:
env, input_stream = self.parse_request(client)
if env:
if env['REQUEST_METHOD'] == 'ping':
self.process_ping(env, input_stream, client)
else:
socket_hijacked = self.process_request(env, input_stream, client)
except KeyboardInterrupt:
done = True
except IOError:
e = sys.exc_info()[1]
if not getattr(e, 'passenger', False) or e.errno != errno.EPIPE:
logging.exception("WSGI application raised an I/O exception!")
except Exception:
logging.exception("WSGI application raised an exception!")
finally:
if not socket_hijacked:
try:
# Shutdown the socket like this just in case the app
# spawned a child process that keeps it open.
client.shutdown(socket.SHUT_WR)
except:
pass
try:
client.close()
except:
pass
except KeyboardInterrupt:
pass
def accept_connection(self):
result = select.select([self.owner_pipe, self.server.fileno()], [], [])[0]
if self.server.fileno() in result:
return self.server.accept()
else:
return (None, None)
def parse_request(self, client):
buf = b''
while len(buf) < 4:
tmp = client.recv(4 - len(buf))
if len(tmp) == 0:
return (None, None)
buf += tmp
header_size = struct.unpack('>I', buf)[0]
buf = b''
while len(buf) < header_size:
tmp = client.recv(header_size - len(buf))
if len(tmp) == 0:
return (None, None)
buf += tmp
headers = buf.split(b"\0")
headers.pop() # Remove trailing "\0"
env = {}
i = 0
while i < len(headers):
env[bytes_to_str(headers[i])] = bytes_to_str(headers[i + 1])
i += 2
return (env, client)
if hasattr(socket, '_fileobject'):
def wrap_input_socket(self, sock):
return socket._fileobject(sock, 'rb', 512)
else:
def wrap_input_socket(self, sock):
return socket.socket.makefile(sock, 'rb', 512)
def process_request(self, env, input_stream, output_stream):
# The WSGI specification says that the input parameter object passed needs to
# implement a few file-like methods. This is the reason why we "wrap" the socket._socket
# into the _fileobject to solve this.
#
# Otherwise, the POST data won't be correctly retrieved by Django.
#
# See: http://www.python.org/dev/peps/pep-0333/#input-and-error-streams
env['wsgi.input'] = self.wrap_input_socket(input_stream)
env['wsgi.errors'] = sys.stderr
env['wsgi.version'] = (1, 0)
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = True
env['wsgi.run_once'] = False
if env.get('HTTPS','off') in ('on', '1', 'true', 'yes'):
env['wsgi.url_scheme'] = 'https'
else:
env['wsgi.url_scheme'] = 'http'
headers_set = []
headers_sent = []
is_head = env['REQUEST_METHOD'] == 'HEAD'
def write(data):
try:
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
# Before the first output, send the stored headers.
status, response_headers = headers_sent[:] = headers_set
output_stream.sendall(str_to_bytes(
'HTTP/1.1 %s\r\nStatus: %s\r\nConnection: close\r\n' %
(status, status)))
for header in response_headers:
output_stream.sendall(str_to_bytes('%s: %s\r\n' % header))
output_stream.sendall(b'\r\n')
if not is_head:
output_stream.sendall(data)
except IOError:
# Mark this exception as coming from the Phusion Passenger
# socket and not some other socket.
e = sys.exc_info()[1]
setattr(e, 'passenger', True)
raise e
def start_response(status, response_headers, exc_info = None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent.
reraise_exception(exc_info)
finally:
# Avoid dangling circular ref.
exc_info = None
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, response_headers]
return write
# Django's django.template.base module goes through all WSGI
# environment values, and calls each value that is a callable.
# No idea why, but we work around that with the `do_it` parameter.
def hijack(do_it = False):
if do_it:
env['passenger.hijacked_socket'] = output_stream
return output_stream
env['passenger.hijack'] = hijack
result = self.app(env, start_response)
if 'passenger.hijacked_socket' in env:
# Socket connection hijacked. Don't do anything.
return True
try:
for data in result:
# Don't send headers until body appears.
if data:
write(data)
if not headers_sent:
# Send headers now if body was empty.
write(b'')
finally:
if hasattr(result, 'close'):
result.close()
return False
def process_ping(self, env, input_stream, output_stream):
output_stream.sendall(b"pong")
if __name__ == "__main__":
logging.basicConfig(
level = logging.WARNING,
format = "[ pid=%(process)d, time=%(asctime)s ]: %(message)s")
if hasattr(logging, 'captureWarnings'):
logging.captureWarnings(True)
handshake_and_read_startup_request()
app_module = load_app()
socket_filename, server_socket = create_server_socket()
install_signal_handlers()
handler = RequestHandler(server_socket, sys.stdin, app_module.application)
print("!> Ready")
advertise_sockets(socket_filename)
handler.main_loop()
try:
os.remove(socket_filename)
except OSError:
pass
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from heat.common import exception
from heat.common import template_format
from heat.engine import environment
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class TestValue(common.HeatTestCase):
simple_template = '''
heat_template_version: '2016-10-14'
parameters:
param1:
type: <the type>
resources:
my_value:
type: OS::Heat::Value
properties:
value: {get_param: param1}
my_value2:
type: OS::Heat::Value
properties:
value: {get_attr: [my_value, value]}
outputs:
myout:
value: {get_attr: [my_value2, value]}
'''
def get_strict_and_loose_templates(self, param_type):
template_loose = template_format.parse(self.simple_template)
template_loose['parameters']['param1']['type'] = param_type
template_strict = copy.deepcopy(template_loose)
template_strict['resources']['my_value']['properties']['type'] \
= param_type
template_strict['resources']['my_value2']['properties']['type'] \
= param_type
return (template_strict, template_loose)
def parse_stack(self, templ_obj):
stack_name = 'test_value_stack'
stack = parser.Stack(utils.dummy_context(), stack_name, templ_obj)
stack.validate()
stack.store()
return stack
def create_stack(self, templ, env=None):
if isinstance(templ, str):
return self.create_stack(template_format.parse(templ), env=env)
if isinstance(templ, dict):
tmpl_obj = template.Template(templ, env=env)
return self.create_stack(tmpl_obj)
assert isinstance(templ, template.Template)
stack = self.parse_stack(templ)
self.assertIsNone(stack.create())
return stack
class TestValueSimple(TestValue):
scenarios = [
('boolean', dict(
param1=True, param_type="boolean")),
('list', dict(
param1=['a', 'b', 'Z'], param_type="comma_delimited_list")),
('map', dict(
param1={'a': 'Z', 'B': 'y'}, param_type="json")),
('number-int', dict(
param1=-11, param_type="number")),
('number-float', dict(
param1=100.999, param_type="number")),
('string', dict(
param1='Perchance to dream', param_type="string")),
]
def test_value(self):
ts, tl = self.get_strict_and_loose_templates(self.param_type)
env = environment.Environment({
'parameters': {'param1': self.param1}})
for templ_dict in [ts, tl]:
stack = self.create_stack(templ_dict, env)
self.assertEqual(self.param1, stack['my_value'].FnGetAtt('value'))
self.assertEqual(self.param1, stack['my_value2'].FnGetAtt('value'))
self.assertEqual(self.param1, stack.output('myout'))
class TestValueLessSimple(TestValue):
template_bad = '''
heat_template_version: '2016-10-14'
parameters:
param1:
type: json
resources:
my_value:
type: OS::Heat::Value
properties:
value: {get_param: param1}
type: number
'''
template_map = '''
heat_template_version: '2016-10-14'
parameters:
param1:
type: json
param2:
type: json
resources:
my_value:
type: OS::Heat::Value
properties:
value: {get_param: param1}
type: json
my_value2:
type: OS::Heat::Value
properties:
value: {map_merge: [{get_attr: [my_value, value]}, {get_param: param2}]}
type: json
'''
template_yaql = '''
heat_template_version: '2016-10-14'
parameters:
param1:
type: number
param2:
type: comma_delimited_list
resources:
my_value:
type: OS::Heat::Value
properties:
value: {get_param: param1}
type: number
my_value2:
type: OS::Heat::Value
properties:
value:
yaql:
expression: $.data.param2.select(int($)).min()
data:
param2: {get_param: param2}
type: number
my_value3:
type: OS::Heat::Value
properties:
value:
yaql:
expression: min($.data.v1,$.data.v2)
data:
v1: {get_attr: [my_value, value]}
v2: {get_attr: [my_value2, value]}
'''
def test_validation_fail(self):
param1 = {"one": "croissant"}
env = environment.Environment({
'parameters': {'param1': json.dumps(param1)}})
self.assertRaises(exception.StackValidationFailed,
self.create_stack, self.template_bad, env)
def test_map(self):
param1 = {"one": "skipper", "two": "antennae"}
param2 = {"one": "monarch", "three": "sky"}
env = environment.Environment({
'parameters': {'param1': json.dumps(param1),
'param2': json.dumps(param2)}})
stack = self.create_stack(self.template_map, env)
my_value = stack['my_value']
self.assertEqual(param1, my_value.FnGetAtt('value'))
my_value2 = stack['my_value2']
self.assertEqual({"one": "monarch",
"two": "antennae",
"three": "sky"}, my_value2.FnGetAtt('value'))
def test_yaql(self):
param1 = -800
param2 = [-8, 0, 4, -11, 2]
env = environment.Environment({
'parameters': {'param1': param1, 'param2': param2}})
stack = self.create_stack(self.template_yaql, env)
my_value = stack['my_value']
self.assertEqual(param1, my_value.FnGetAtt('value'))
my_value2 = stack['my_value2']
self.assertEqual(min(param2), my_value2.FnGetAtt('value'))
my_value3 = stack['my_value3']
self.assertEqual(param1, my_value3.FnGetAtt('value'))
class TestValueUpdate(TestValue):
scenarios = [
('boolean-to-number', dict(
param1=True, param_type1="boolean",
param2=-100.999, param_type2="number")),
('number-to-string', dict(
param1=-77, param_type1="number",
param2='mellors', param_type2="string")),
('string-to-map', dict(
param1='mellors', param_type1="string",
param2={'3': 'turbo'}, param_type2="json")),
('map-to-boolean', dict(
param1={'hey': 'there'}, param_type1="json",
param2=False, param_type2="boolean")),
('list-to-boolean', dict(
param1=['hey', '!'], param_type1="comma_delimited_list",
param2=True, param_type2="boolean")),
]
def test_value(self):
ts1, tl1 = self.get_strict_and_loose_templates(self.param_type1)
ts2, tl2 = self.get_strict_and_loose_templates(self.param_type2)
env1 = environment.Environment({
'parameters': {'param1': self.param1}})
env2 = environment.Environment({
'parameters': {'param1': self.param2}})
updates = [(ts1, ts2), (ts1, tl2), (tl1, ts2), (tl1, tl2)]
updates_other_way = [(b, a) for a, b in updates]
updates.extend(updates_other_way)
for t_initial, t_updated in updates:
if t_initial == ts1 or t_initial == tl1:
p1, p2, e1, e2 = self.param1, self.param2, env1, env2
else:
# starting with param2, updating to param1
p2, p1, e2, e1 = self.param1, self.param2, env1, env2
stack = self.create_stack(t_initial, env=e1)
self.assertEqual(p1, stack['my_value2'].FnGetAtt('value'))
res1_id = stack['my_value'].id
res2_id = stack['my_value2'].id
res2_uuid = stack['my_value2'].uuid
updated_stack = parser.Stack(
stack.context, 'updated_stack',
template.Template(t_updated, env=e2))
updated_stack.validate()
stack.update(updated_stack)
self.assertEqual(p2, stack['my_value2'].FnGetAtt('value'))
# Make sure resources not replaced after update
self.assertEqual(res1_id, stack['my_value'].id)
self.assertEqual(res2_id, stack['my_value2'].id)
self.assertEqual(res2_uuid, stack['my_value2'].uuid)
| |
#!/usr/bin/env python
import sys
import os
import re
import time
from xml.dom.minidom import Document
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree # NOQA
except ImportError:
try:
import elementtree.ElementTree as ElementTree # NOQA
except ImportError:
import lxml.etree as ElementTree # NOQA
missing_deps = False
try:
import json
except ImportError:
try:
import simplejson as json # NOQA
except ImportError, E:
missing_deps = E
try:
from BeautifulSoup import BeautifulSoup
except ImportError, E:
missing_deps = E
feedName = "example-list.xml"
feedPath = "http://openlayers.github.io/ol3/master/examples/"
def getListOfExamples(relPath):
"""
returns list of .html filenames within a given path - excludes
index.html
"""
examples = os.listdir(relPath)
examples = [example for example in examples if
example.endswith('.html') and example != "index.html"]
return examples
def getExampleHtml(path):
"""
returns html of a specific example
"""
print '.',
f = open(path)
html = f.read()
f.close()
return html
def extractById(soup, tagId, value=None):
"""
returns full contents of a particular tag id
"""
beautifulTag = soup.find(id=tagId)
if beautifulTag:
if beautifulTag.contents:
value = str(beautifulTag.renderContents()).strip()
value = value.replace('\t', '')
value = value.replace('\n', '')
return value
def getRelatedClasses(html):
"""
parses the html, and returns a list of all OpenLayers Classes
used within (ie what parts of OL the javascript uses).
"""
rawstr = r'''(?P<class>ol\..*?)\('''
return re.findall(rawstr, html)
def parseHtml(html, ids):
"""
returns dictionary of items of interest
"""
soup = BeautifulSoup(html)
d = {}
for tagId in ids:
d[tagId] = extractById(soup, tagId)
#classes should eventually be parsed from docs - not automatically created.
classes = getRelatedClasses(html)
d['classes'] = classes
return d
def getGitInfo(exampleDir, exampleName):
orig = os.getcwd()
os.chdir(exampleDir)
h = os.popen("git log -n 1 --pretty=format:'%an|%ai' " + exampleName)
os.chdir(orig)
log = h.read()
h.close()
d = {}
if log:
parts = log.split("|")
d["author"] = parts[0]
# compensate for spaces in git log time
td = parts[1].split(" ")
td.insert(1, "T")
d["date"] = "".join(td)
else:
d["author"] = ""
d["date"] = ""
return d
def createFeed(examples):
doc = Document()
atomuri = "http://www.w3.org/2005/Atom"
feed = doc.createElementNS(atomuri, "feed")
feed.setAttribute("xmlns", atomuri)
title = doc.createElementNS(atomuri, "title")
title.appendChild(doc.createTextNode("OpenLayers Examples"))
feed.appendChild(title)
link = doc.createElementNS(atomuri, "link")
link.setAttribute("rel", "self")
link.setAttribute("href", feedPath + feedName)
modtime = time.strftime("%Y-%m-%dT%I:%M:%SZ", time.gmtime())
id = doc.createElementNS(atomuri, "id")
id.appendChild(doc.createTextNode(
"%s%s#%s" % (feedPath, feedName, modtime)))
feed.appendChild(id)
updated = doc.createElementNS(atomuri, "updated")
updated.appendChild(doc.createTextNode(modtime))
feed.appendChild(updated)
examples.sort(key=lambda x: x["modified"])
for example in sorted(examples, key=lambda x: x["modified"], reverse=True):
entry = doc.createElementNS(atomuri, "entry")
title = doc.createElementNS(atomuri, "title")
title.appendChild(doc.createTextNode(example["title"] or
example["example"]))
entry.appendChild(title)
tags = doc.createElementNS(atomuri, "tags")
tags.appendChild(doc.createTextNode(example["tags"] or
example["example"]))
entry.appendChild(tags)
link = doc.createElementNS(atomuri, "link")
link.setAttribute("href", "%s%s" % (feedPath, example["example"]))
entry.appendChild(link)
summary = doc.createElementNS(atomuri, "summary")
summary.appendChild(doc.createTextNode(example["shortdesc"] or
example["example"]))
entry.appendChild(summary)
updated = doc.createElementNS(atomuri, "updated")
updated.appendChild(doc.createTextNode(example["modified"]))
entry.appendChild(updated)
author = doc.createElementNS(atomuri, "author")
name = doc.createElementNS(atomuri, "name")
name.appendChild(doc.createTextNode(example["author"]))
author.appendChild(name)
entry.appendChild(author)
id = doc.createElementNS(atomuri, "id")
id.appendChild(doc.createTextNode("%s%s#%s" % (feedPath,
example["example"],
example["modified"])))
entry.appendChild(id)
feed.appendChild(entry)
doc.appendChild(feed)
return doc
def wordIndex(examples):
"""
Create an inverted index based on words in title and shortdesc. Keys are
lower cased words. Values are dictionaries with example index keys and
count values.
"""
index = {}
unword = re.compile("\\W+")
keys = ["shortdesc", "title", "tags"]
for i in range(len(examples)):
for key in keys:
text = examples[i][key]
if text:
words = unword.split(text)
for word in words:
if word:
word = word.lower()
if word in index:
if i in index[word]:
index[word][i] += 1
else:
index[word][i] = 1
else:
index[word] = {i: 1}
return index
if __name__ == "__main__":
if missing_deps:
print """This script requires json or simplejson and BeautifulSoup.
You don't have them. \n(%s)""" % E
sys.exit()
if len(sys.argv) == 3:
inExampleDir = sys.argv[1]
outExampleDir = sys.argv[2]
else:
inExampleDir = "../examples"
outExampleDir = "../examples"
outFile = open(os.path.join(outExampleDir, "example-list.js"), "w")
print 'Reading examples from %s and writing out to %s' % (inExampleDir,
outFile.name)
exampleList = []
docIds = ['title', 'shortdesc', 'tags']
examples = getListOfExamples(inExampleDir)
modtime = time.strftime("%Y-%m-%dT%I:%M:%SZ", time.gmtime())
for example in examples:
path = os.path.join(inExampleDir, example)
html = getExampleHtml(path)
tagvalues = parseHtml(html, docIds)
tagvalues['example'] = example
# add in author/date info
d = getGitInfo(inExampleDir, example)
tagvalues["author"] = d["author"] or "anonymous"
tagvalues["modified"] = d["date"] or modtime
tagvalues['link'] = example
exampleList.append(tagvalues)
print
exampleList.sort(key=lambda x: x['example'].lower())
index = wordIndex(exampleList)
json = json.dumps({"examples": exampleList, "index": index})
# Give the json a global variable we can use in our js.
# This should be replaced or made optional.
json = 'var info=' + json + ';'
outFile.write(json)
outFile.close()
outFeedPath = os.path.join(outExampleDir, feedName)
print "writing feed to %s " % outFeedPath
atom = open(outFeedPath, 'w')
doc = createFeed(exampleList)
atom.write(doc.toxml())
atom.close()
print 'complete'
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# XCode Project Creator
#
import os, sys, re, shutil, codecs
from shutil import copyfile
from os.path import join, splitext, split, exists
from datetime import date
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
fileTargets = ['.c','.cpp','.h','.m','.mm','.pbxproj']
ignoreFiles = ['.gitignore', '.cvsignore','bridge.txt','libTitanium.a']
ignoreDirs = ['.git','.svn', 'CVS']
symbolicMap = ['Titanium','Appcelerator']
exclusions = ['TiCore']
class Projector(object):
def make_self(self,s):
r = re.compile('[0-9a-zA-Z_]')
buf = ''
for i in s:
if i=='-':
buf+='_'
continue
if r.match(i)!=None:
buf+=i
# if name starts with number, we simply append a k to it
if re.match('^[0-9]+',buf):
buf = 'k%s' % buf
return buf
def __init__(self,name,sdk_version,sdk_root,project_root,appid):
self.sdk_version = sdk_version
self.sdk_root = os.path.abspath(sdk_root)
self.project_root = os.path.abspath(project_root)
self.project_id = appid
self.name = name
self.namespace = self.make_self(name)
self.namespace_upper = self.namespace.upper()+'_'
def form_target_filename(self,fn):
return fn
def process_file(self,source,target,cb=None):
for exclude in exclusions:
if source.find(exclude)>0:
return False
# first deal with the filename
target_filename = self.form_target_filename(target)
print "[DEBUG] processing %s => %s" % (source,target_filename)
content = codecs.open(source,'r','utf-8','replace').read()
# fixup special case
content = content.replace('TitaniumViewController','%s$ViewController'%self.namespace)
content = content.replace('TitaniumModule','%s$Module'%self.namespace)
for symbol in symbolicMap:
content = content.replace(symbol,self.namespace)
# fixup titanium vars
content = content.replace('titanium','_%s'%self.namespace.lower())
# fixup double module replacement
content = content.replace('%s%sModule' %(self.namespace,self.namespace),'%sModule'%self.namespace)
content = content.replace('%s%s$Module' %(self.namespace,self.namespace),'%s$Module'%self.namespace)
# fixup namespaces
content = content.replace('org.appcelerator','org.%s'%self.namespace.lower())
content = content.replace('com.appcelerator','com.%s'%self.namespace.lower())
# fixup Copyright
content = content.replace('* %s %s Mobile'%(self.namespace,self.namespace),'* Appcelerator Titanium Mobile')
content = content.replace('* Copyright (c) 2009-2010 by %s, Inc.'%(self.namespace),'* Copyright (c) 2009-%s by Appcelerator, Inc.' % date.today().strftime('%Y'))
content = content.replace("""* Please see the LICENSE included with this distribution for details.
*/""", """* Please see the LICENSE included with this distribution for details.
*
* WARNING: This is generated code. Modify at your own risk and without support.
*/""")
if cb!=None:
content = cb(content)
target_file = codecs.open(target_filename,'w','utf-8','replace')
target_file.write(content)
target_file.close()
# then deal with the contents
return True
def copy_module_resources(self, source, target):
if not os.path.exists(os.path.expanduser(target)):
os.mkdir(os.path.expanduser(target))
for root, dirs, files in os.walk(source):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file_ in files:
if file_ in ignoreFiles:
continue
from_ = join(root, file_)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
processed = False
if splitext(file_)[-1] in fileTargets:
processed = self.process_file(from_,to_)
if not processed:
if os.path.exists(to_): os.remove(to_)
print "[DEBUG] copying: %s => %s" % (from_,to_)
copyfile(from_, to_)
def fix_xcode_script(self,content,script_name,script_contents):
# fix up xcode compile scripts in build phase
start = 0
while start >= 0:
start = content.find("name = \"%s\";" % script_name, start)
if start > 0:
begin = content.find("shellScript = ",start)
if begin > 0:
end = content.find("};",begin+1)
if end > 0:
before = content[0:begin+15]
after = content[end:]
script = "%s\";\n " % script_contents
content = before + script + after
start = begin
return content
def process_xcode(self,content):
content = content.replace('../Classes','Classes')
content = content.replace('../Resources','Resources')
content = content.replace('../headers/%sCore'%self.namespace,'headers/TiCore')
content = content.replace('../headers','headers')
content = content.replace('../lib','lib')
content = content.replace('Titanium.plist','Info.plist')
content = content.replace('Titanium-KitchenSink',self.name)
content = content.replace('Titanium',self.namespace)
content = content.replace('path = %s.app;' % self.namespace, 'path = "%s.app";'%self.name)
content = content.replace('PRODUCT_NAME = %s'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('PRODUCT_NAME = %s-iPad'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('PRODUCT_NAME = "%s-iPad"'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('PRODUCT_NAME = %s-universal'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('PRODUCT_NAME = "%s-universal"'%self.namespace,'PRODUCT_NAME = "%s"'%self.name)
content = content.replace('Resources-iPad','Resources')
content = content.replace('%s.app'%self.namespace,'%s.app'%self.name)
content = content.replace('path = %s_Prefix.pch;'%self.namespace,'path = "%s_Prefix.pch";'%self.name)
content = content.replace('%s_Prefix.pch'%self.namespace,'%s_Prefix.pch'%self.name)
content = content.replace('GCC_PREFIX_HEADER = %s_Prefix.pch;'%self.name,'GCC_PREFIX_HEADER = "%s_Prefix.pch";'%self.name)
builder_py = os.path.abspath(os.path.join(self.sdk_root,"builder.py"))
pre_compile_script = "\\\"%s\\\" xcode\\nexit $?" % (builder_py)
content = self.fix_xcode_script(content,"Pre-Compile",pre_compile_script)
content = self.fix_xcode_script(content,"Post-Compile","echo 'post-compile'")
return content
def create(self,in_dir,out_dir):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for dir_ in ['Classes','lib','Resources','headers']:
from_ = os.path.join(in_dir,dir_)
to_ = os.path.join(out_dir,dir_)
if not os.path.exists(to_): os.makedirs(to_)
self.copy_module_resources(from_,to_)
copyfile(os.path.join(in_dir,'iphone','Titanium_Prefix.pch'),os.path.join(out_dir,'%s_Prefix.pch'%self.name))
copyfile(os.path.join(in_dir,'iphone','Titanium.plist'),os.path.join(out_dir,'Info.plist'))
xcode_dir = os.path.join(out_dir,'%s.xcodeproj'%self.name)
if not os.path.exists(xcode_dir):
os.makedirs(xcode_dir)
xcode_proj = os.path.join(xcode_dir,'project.pbxproj')
src_xcode_proj = os.path.join(in_dir,'iphone','Titanium.xcodeproj','project.pbxproj')
# we do special processing here
c = open(src_xcode_proj).read()
c = self.process_xcode(c)
f = codecs.open(os.path.join(out_dir,'%s.xcodeproj'%self.name,'project.pbxproj'),'w',encoding='utf-8')
f.write(c)
f.close()
xcconfig = os.path.join(out_dir,"project.xcconfig")
xcconfig = open(xcconfig,'w')
xcconfig.write("TI_VERSION=%s\n" % self.sdk_version)
xcconfig.write("TI_SDK_DIR=%s\n" % self.sdk_root.replace(self.sdk_version,'$(TI_VERSION)'))
xcconfig.write("TI_APPID=%s\n" % self.project_id)
xcconfig.write("OTHER_LDFLAGS[sdk=iphoneos*]=$(inherited) -weak_framework iAd\n")
xcconfig.write("OTHER_LDFLAGS[sdk=iphonesimulator*]=$(inherited) -weak_framework iAd\n")
xcconfig.write("#include \"module\"\n")
xcconfig.close()
xcconfig = os.path.join(out_dir,"module.xcconfig")
xcconfig = open(xcconfig,'w')
xcconfig.write("// this is a generated file - DO NOT EDIT\n\n")
xcconfig.close()
def usage(args):
print "%s <name> <in> <out>" % (os.path.basename(args[0]))
sys.exit(-1)
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def main(args):
if len(args) < 4:
usage(args)
name = dequote(args[1])
version = dequote(args[2])
sdk_root = os.path.expanduser(dequote(args[3]))
project_root = os.path.expanduser(dequote(args[4]))
p = Projector(name,version,sdk_root,project_root,"com.appcelerator.test")
p.create(sdk_root,project_root)
sys.exit(0)
if __name__ == "__main__":
#main(sys.argv)
main([sys.argv[0],"KitchenSink-iPad","1.3.0","/Library/Application Support/Titanium/mobilesdk/osx/1.3.0/iphone","/Users/jhaynie/tmp/one_three"])
| |
import math
import random
import time
from pandac.PandaModules import TextNode, BitMask32, Point3, Vec3, Vec4, deg2Rad, Mat3, NodePath, VBase4, OdeTriMeshData, OdeTriMeshGeom, OdeRayGeom, CollisionTraverser, CollisionSegment, CollisionNode, CollisionHandlerQueue
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPGlobals
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownTimer
from direct.gui.DirectGui import DirectWaitBar, DGG, DirectLabel
from direct.task import Task
from direct.fsm.FSM import FSM
from toontown.minigame import ArrowKeys
from direct.showbase import PythonUtil
from toontown.golf import BuildGeometry
from toontown.golf import DistributedPhysicsWorld
from toontown.golf import GolfGlobals
from direct.interval.IntervalGlobal import Sequence, Parallel, LerpScaleInterval, LerpFunctionInterval, Func, Wait, SoundInterval, ParallelEndTogether, LerpPosInterval, ActorInterval, LerpPosHprInterval, LerpColorScaleInterval, WaitInterval
from direct.actor import Actor
from toontown.golf import GolfHoleBase
from toontown.distributed import DelayDelete
class DistributedGolfHole(DistributedPhysicsWorld.DistributedPhysicsWorld, FSM, GolfHoleBase.GolfHoleBase):
defaultTransitions = {'Off': ['Cleanup', 'ChooseTee', 'WatchTee'],
'ChooseTee': ['Aim', 'Cleanup'],
'WatchTee': ['WatchAim',
'Cleanup',
'WatchTee',
'ChooseTee',
'Aim'],
'Wait': ['Aim',
'WatchAim',
'Playback',
'Cleanup',
'ChooseTee',
'WatchTee'],
'Aim': ['Shoot',
'Playback',
'Cleanup',
'Aim',
'WatchAim'],
'WatchAim': ['WatchAim',
'WatchShoot',
'Playback',
'Cleanup',
'Aim',
'ChooseTee',
'WatchTee'],
'Playback': ['Wait',
'Aim',
'WatchAim',
'Cleanup',
'ChooseTee',
'WatchTee'],
'Cleanup': ['Off']}
id = 0
notify = directNotify.newCategory('DistributedGolfHole')
unlimitedAimTime = base.config.GetBool('unlimited-aim-time', 0)
unlimitedTeeTime = base.config.GetBool('unlimited-tee-time', 0)
golfPowerSpeed = base.config.GetDouble('golf-power-speed', 3)
golfPowerExponent = base.config.GetDouble('golf-power-exponent', 0.75)
DefaultCamP = -16
MaxCamP = -90
def __init__(self, cr):
self.notify.debug('Hole Init')
DistributedPhysicsWorld.DistributedPhysicsWorld.__init__(self, base.cr)
GolfHoleBase.GolfHoleBase.__init__(self, 1)
FSM.__init__(self, 'Golf_%s_FSM' % self.id)
self.currentGolfer = 0
self.ballDict = {}
self.ballShadowDict = {}
self.holeNodes = []
self.golfCourse = None
self.golfCourseRequest = None
self.holePositions = []
self.timer = None
self.teeTimer = None
self.aimStart = None
self.titleLabel = None
self.teeInstructions = None
self.aimInstructions = None
self.powerReminder = None
self.lastTimeHeadingSent = 0
self.lastTempHeadingSent = 0
self.holdCycleTime = 0.0
self.inPlayBack = 0
self.swingInterval = None
self.sfxInterval = None
self.isLookingAtPutt = False
self.clubs = {}
self.camInterval = None
self.flyOverInterval = None
self.needToDoFlyOver = True
self.translucentLastFrame = []
self.translucentCurFrame = []
self.localMissedSwings = 0
self.localToonHitControl = False
self.warningInterval = None
self.playBackDelayDelete = None
self.aimMomentum = 0.0
self.lastBumpSfxPos = Point3(0, 0, 0)
self.__textGen = TextNode('golfHoleText')
self.__textGen.setFont(ToontownGlobals.getSignFont())
self.__textGen.setAlign(TextNode.ACenter)
if TTLocalizer.getLanguage() in ['castillian',
'japanese',
'german',
'portuguese',
'french']:
self.__textGen.setGlyphScale(0.7)
self.avIdList = []
self.enterAimStart = 0
return
def generate(self):
self.notify.debug('Hole Generate')
DistributedPhysicsWorld.DistributedPhysicsWorld.generate(self)
self.golfPowerTaskName = self.uniqueName('updateGolfPower')
def announceGenerate(self):
DistributedPhysicsWorld.DistributedPhysicsWorld.announceGenerate(self)
self.setup()
self.sendReady()
self.request('Off')
index = 1
for avId in self.avIdList:
self.createBall(avId, index)
self.createClub(avId)
index += 1
if self.avIdList:
avId = self.avIdList[0]
self.currentGolfer = avId
self.currentGolferActive = False
def delete(self):
self.removePlayBackDelayDelete()
self.request('Cleanup')
taskMgr.remove(self.golfPowerTaskName)
DistributedPhysicsWorld.DistributedPhysicsWorld.delete(self)
GolfHoleBase.GolfHoleBase.delete(self)
if hasattr(self, 'perfectIval'):
self.perfectIval.pause()
del self.perfectIval
self.golfCourse = None
if self.teeInstructions:
self.teeInstructions.destroy()
self.teeInstructions = None
if self.aimInstructions:
self.aimInstructions.destory()
self.aimInstructions = None
if self.powerReminder:
self.powerReminder.destroy()
self.powerReminder = None
if self.swingInterval:
self.swingInterval.pause()
self.swingInterval = None
if self.sfxInterval:
self.sfxInterval.pause()
self.sfxInterval = None
if self.camInterval:
self.camInterval.pause()
self.camInterval = None
for club in self.clubs:
self.clubs[club].removeNode()
del self.clubs
if hasattr(self, 'scoreBoard'):
if hasattr(self.scoreBoard, 'maximizeB'):
if self.scoreBoard.maximizeB:
self.scoreBoard.maximizeB.hide()
if not self.titleLabel == None:
self.titleLabel.destroy()
self.notify.debug('Deleted title label')
self.notify.debug('Delete function')
if self.flyOverInterval:
self.flyOverInterval.pause()
self.flyOverInterval = None
for key in self.ballShadowDict:
self.ballShadowDict[key].removeNode()
self.dropShadowModel.removeNode()
return
def sendReady(self):
self.sendUpdate('setAvatarReadyHole', [])
def createClub(self, avId):
club = NodePath('club-%s' % avId)
clubModel = loader.loadModel('phase_6/models/golf/putter')
clubModel.reparentTo(club)
clubModel.setR(clubModel, 45)
self.clubs[avId] = club
def attachClub(self, avId, pointToBall = False):
club = self.clubs[avId]
if club:
av = base.cr.doId2do.get(avId)
if av:
av.useLOD(1000)
lHand = av.getLeftHands()[0]
club.setPos(0, 0, 0)
club.reparentTo(lHand)
netScale = club.getNetTransform().getScale()[1]
counterActToonScale = lHand.find('**/counteractToonScale')
if counterActToonScale.isEmpty():
counterActToonScale = lHand.attachNewNode('counteractToonScale')
counterActToonScale.setScale(1 / netScale)
self.notify.debug('creating counterActToonScale for %s' % av.getName())
club.reparentTo(counterActToonScale)
club.setX(-0.25 * netScale)
if pointToBall:
club.lookAt(self.clubLookatSpot)
def createToonRay(self):
self.toonRay = OdeRayGeom(self.space, 10.0)
self.toonRay.setCollideBits(BitMask32(16777215))
self.toonRay.setCategoryBits(BitMask32(0))
self.toonRay.setRotation(Mat3(1, 0, 0, 0, -1, 0, 0, 0, -1))
self.space.setCollideId(self.toonRay, GolfGlobals.TOON_RAY_COLLIDE_ID)
self.rayList.append(self.toonRay)
def createSkyRay(self):
self.skyRay = OdeRayGeom(self.space, 100.0)
self.skyRay.setCollideBits(BitMask32(240))
self.skyRay.setCategoryBits(BitMask32(0))
self.skyRay.setRotation(Mat3(1, 0, 0, 0, -1, 0, 0, 0, -1))
self.space.setCollideId(self.skyRay, 78)
self.rayList.append(self.skyRay)
def createCameraRay(self):
self.cameraRay = OdeRayGeom(self.space, 30.0)
self.cameraRay.setCollideBits(BitMask32(8388608))
self.cameraRay.setCategoryBits(BitMask32(0))
self.space.setCollideId(self.cameraRay, GolfGlobals.CAMERA_RAY_COLLIDE_ID)
self.cameraRayNodePath = self.terrainModel.attachNewNode('cameraRayNodePath')
self.rayList.append(self.cameraRay)
def loadLevel(self):
GolfHoleBase.GolfHoleBase.loadLevel(self)
self.teeNodePath = self.terrainModel.find('**/tee0')
if self.teeNodePath.isEmpty():
teePos = Vec3(0, 0, 10)
else:
teePos = self.teeNodePath.getPos()
teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS)
self.notify.debug('teeNodePath heading = %s' % self.teeNodePath.getH())
self.teePositions = [teePos]
teeIndex = 1
teeNode = self.terrainModel.find('**/tee%d' % teeIndex)
while not teeNode.isEmpty():
teePos = teeNode.getPos()
teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS)
self.teePositions.append(teePos)
self.notify.debug('teeNodeP heading = %s' % teeNode.getH())
teeIndex += 1
teeNode = self.terrainModel.find('**/tee%d' % teeIndex)
self.holeBottomNodePath = self.terrainModel.find('**/holebottom0')
if self.holeBottomNodePath.isEmpty():
self.holeBottomPos = Vec3(*self.holeInfo['holePos'][0])
else:
self.holeBottomPos = self.holeBottomNodePath.getPos()
self.holePositions.append(self.holeBottomPos)
minHard = Point3(0, 0, 0)
maxHard = Point3(0, 0, 0)
self.hardSurfaceNodePath.calcTightBounds(minHard, maxHard)
centerX = (minHard[0] + maxHard[0]) / 2.0
centerY = (minHard[1] + maxHard[1]) / 2.0
heightX = (centerX - minHard[0]) / math.tan(deg2Rad(23))
heightY = (centerY - minHard[1]) / math.tan(deg2Rad(18))
height = max(heightX, heightY)
self.camTopViewPos = Point3(centerX, centerY, height)
self.camTopViewHpr = Point3(0, -90, 0)
self.createRays()
self.createToonRay()
self.createCameraRay()
def createLocatorDict(self):
self.locDict = {}
locatorNum = 1
curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum)
while not curNodePath.isEmpty():
self.locDict[locatorNum] = curNodePath
locatorNum += 1
curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum)
def loadBlockers(self):
loadAll = base.config.GetBool('golf-all-blockers', 0)
self.createLocatorDict()
self.blockerNums = self.holeInfo['blockers']
for locatorNum in self.locDict:
if locatorNum in self.blockerNums or loadAll:
locator = self.locDict[locatorNum]
locatorParent = locator.getParent()
locator.getChildren().wrtReparentTo(locatorParent)
else:
self.locDict[locatorNum].removeNode()
self.hardSurfaceNodePath.flattenStrong()
def loadSounds(self):
self.hitBallSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Hit_Ball.ogg')
self.holeInOneSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Hole_In_One.ogg')
self.holeInTwoPlusSfx = loader.loadSfx('phase_4/audio/sfx/MG_sfx_vine_game_fall.ogg')
self.ballGoesInStartSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Ball_Goes_In_Start.ogg')
self.ballGoesInLoopSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Ball_Goes_In_Loop.ogg')
self.ballGoesToRestSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Ball_Rest_In_Cup.ogg')
self.kickedOutSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Sad_Noise_Kicked_Off_Hole.ogg')
self.crowdBuildupSfx = []
self.crowdApplauseSfx = []
self.crowdMissSfx = []
for i in xrange(4):
self.crowdBuildupSfx.append(loader.loadSfx('phase_6/audio/sfx/Golf_Crowd_Buildup.ogg'))
self.crowdApplauseSfx.append(loader.loadSfx('phase_6/audio/sfx/Golf_Crowd_Applause.ogg'))
self.crowdMissSfx.append(loader.loadSfx('phase_6/audio/sfx/Golf_Crowd_Miss.ogg'))
self.bumpHardSfx = loader.loadSfx('phase_6/audio/sfx/Golf_Hit_Barrier_3.ogg')
self.bumpMoverSfx = loader.loadSfx('phase_4/audio/sfx/Golf_Hit_Barrier_2.ogg')
self.bumpWindmillSfx = loader.loadSfx('phase_4/audio/sfx/Golf_Hit_Barrier_1.ogg')
def setup(self):
self.notify.debug('setup golf hole')
self.loadLevel()
self.loadSounds()
self.camMove = 0
self.arrowKeys = ArrowKeys.ArrowKeys()
self.arrowKeys.setPressHandlers([None,
None,
self.__leftArrowPressed,
self.__rightArrowPressed,
self.__beginTossGolf])
self.arrowKeys.setReleaseHandlers([None,
None,
None,
None,
self.__endTossGolf])
self.targets = render.attachNewNode('targetGameTargets')
self.ballFollow = render.attachNewNode('nodeAtBall')
self.startingTeeHeading = self.teeNodePath.getH()
self.ballFollow.setH(self.startingTeeHeading)
self.ballFollowToonSpot = self.ballFollow.attachNewNode('toonAimSpot')
self.ballFollowToonSpot.setX(-2.0)
self.ballFollowToonSpot.setY(0)
self.ballFollowToonSpot.setH(-90)
self.clubLookatSpot = self.ballFollow.attachNewNode('clubLookat')
self.clubLookatSpot.setY(-(GolfGlobals.GOLF_BALL_RADIUS + 0.1))
camera.reparentTo(self.ballFollow)
self.camPosBallFollow = Point3(0.0, -23.0, 12.0)
self.camHprBallFollow = Point3(0, -16.0, 0)
camera.setPos(self.camPosBallFollow)
camera.setHpr(self.camHprBallFollow)
if self.holeBottomNodePath.isEmpty():
holePositions = self.holePositions
for index in xrange(len(holePositions)):
holePos = holePositions[index]
targetNodePathGeom, t1, t2 = BuildGeometry.addCircleGeom(self.targets, 16, 1)
targetNodePathGeom.setPos(holePos)
targetNodePathGeom.setBin('ground', 0)
targetNodePathGeom.setDepthWrite(False)
targetNodePathGeom.setDepthTest(False)
targetNodePathGeom.setTransparency(TransparencyAttrib.MAlpha)
targetNodePathGeom.setColorScale(0.0, 0.0, 0.0, 1.0)
self.holeNodes.append(targetNodePathGeom)
holeSphere = CollisionSphere(0, 0, 0, 1)
holeSphere.setTangible(1)
holeCNode = CollisionNode('Hole')
holeCNode.addSolid(holeSphere)
holeC = targetNodePathGeom.attachNewNode(holeCNode)
holeC.show()
holeC.setCollideMask(ToontownGlobals.PieBitmask)
toon = base.localAvatar
toon.setPos(0.0, 0.0, -100.0)
toon.b_setAnimState('neutral', 1.0)
self.pollingCtrl = 0
self.timeLastCtrl = 0.0
self.powerBar = DirectWaitBar(guiId='launch power bar', pos=(0.0, 0, -0.65), relief=DGG.SUNKEN, frameSize=(-2.0,
2.0,
-0.2,
0.2), borderWidth=(0.02, 0.02), scale=0.25, range=100, sortOrder=50, frameColor=(0.5, 0.5, 0.5, 0.5), barColor=(1.0, 0.0, 0.0, 1.0), text='', text_scale=0.26, text_fg=(1, 1, 1, 1), text_align=TextNode.ACenter, text_pos=(0, -0.05))
self.power = 0
self.powerBar['value'] = self.power
self.powerBar.hide()
self.accept('tab', self.tabKeyPressed)
self.putAwayAllToons()
base.transitions.irisOut(t=0)
self.dropShadowModel = loader.loadModel('phase_3/models/props/drop_shadow')
self.dropShadowModel.setColor(0, 0, 0, 0.5)
self.dropShadowModel.flattenMedium()
self.dropShadowModel.hide()
return
def switchToAnimState(self, animStateName, forced = False):
curAnimState = base.localAvatar.animFSM.getCurrentState()
curAnimStateName = ''
if curAnimState:
curAnimStateName = curAnimState.getName()
if curAnimStateName != animStateName or forced:
base.localAvatar.b_setAnimState(animStateName)
def __aimTask(self, task):
self.attachClub(self.currentGolfer, True)
x = -math.sin(self.ballFollow.getH() * 0.0174532925)
y = math.cos(self.ballFollow.getH() * 0.0174532925)
dt = globalClock.getDt()
b = self.curGolfBall()
forceMove = 500
forceMoveDt = forceMove * dt
posUpdate = False
momentumChange = dt * 60.0
if (self.arrowKeys.upPressed() or self.arrowKeys.downPressed()) and not self.golfCourse.canDrive(self.currentGolfer):
posUpdate = True
self.aimMomentum = 0.0
self.ballFollow.headsUp(self.holeBottomNodePath)
elif self.arrowKeys.rightPressed() and not self.arrowKeys.leftPressed():
self.aimMomentum -= momentumChange
if self.aimMomentum > 0:
self.aimMomentum = 0.0
elif self.aimMomentum < -30.0:
self.aimMomentum = -30.0
posUpdate = True
self.switchToAnimState('GolfRotateLeft')
self.scoreBoard.hide()
elif self.arrowKeys.leftPressed() and not self.arrowKeys.rightPressed():
self.aimMomentum += momentumChange
if self.aimMomentum < 0.0:
self.aimMomentum = 0.0
elif self.aimMomentum > 30.0:
self.aimMomentum = 30.0
posUpdate = True
self.switchToAnimState('GolfRotateRight')
self.scoreBoard.hide()
else:
self.aimMomentum = 0.0
self.switchToAnimState('GolfPuttLoop')
self.ballFollow.setH(self.ballFollow.getH() + self.aimMomentum * dt)
if self.arrowKeys.upPressed() and self.golfCourse.canDrive(self.currentGolfer):
b.enable()
b.addForce(Vec3(x * forceMoveDt, y * forceMoveDt, 0))
if self.arrowKeys.downPressed() and self.golfCourse.canDrive(self.currentGolfer):
b.enable()
b.addForce(Vec3(-x * forceMoveDt, -y * forceMoveDt, 0))
if self.arrowKeys.leftPressed() and self.arrowKeys.rightPressed() and self.golfCourse.canDrive(self.currentGolfer):
b.enable()
b.addForce(Vec3(0, 0, 3000 * dt))
if posUpdate:
if globalClock.getFrameTime() - self.lastTimeHeadingSent > 0.2:
self.sendUpdate('setTempAimHeading', [localAvatar.doId, self.ballFollow.getH()])
self.lastTimeHeadingSent = globalClock.getFrameTime()
self.lastTempHeadingSent = self.ballFollow.getH()
elif self.lastTempHeadingSent != self.ballFollow.getH():
self.sendUpdate('setTempAimHeading', [localAvatar.doId, self.ballFollow.getH()])
self.lastTimeHeadingSent = globalClock.getFrameTime()
self.lastTempHeadingSent = self.ballFollow.getH()
self.setCamera2Ball()
self.fixCurrentGolferFeet()
self.adjustClub()
self.orientCameraRay()
return task.cont
def fixCurrentGolferFeet(self):
golfer = base.cr.doId2do.get(self.currentGolfer)
if not golfer:
return
golferPos = golfer.getPos(render)
newPos = Vec3(golferPos[0], golferPos[1], golferPos[2] + 5)
self.toonRay.setPosition(newPos)
def adjustClub(self):
club = self.clubs[self.currentGolfer]
if club:
distance = club.getDistance(self.clubLookatSpot)
scaleFactor = distance / 2.058
club.setScale(1, scaleFactor, 1)
def resetPowerBar(self):
self.power = 0
self.powerBar['value'] = self.power
self.powerBar['text'] = ''
def sendSwingInfo(self):
kickHimOut = self.updateWarning()
if kickHimOut:
return
curAimTime = globalClock.getRealTime() - self.enterAimStart
if curAimTime < 0:
curAimTime = 0
if curAimTime > GolfGlobals.AIM_DURATION:
curAimTime = GolfGlobals.AIM_DURATION
self.notify.debug('curAimTime = %f' % curAimTime)
x = -math.sin(self.ballFollow.getH() * 0.0174532925)
y = math.cos(self.ballFollow.getH() * 0.0174532925)
b = self.curGolfBall()
if hasattr(base, 'golfPower') and base.golfPower != None:
self.power = float(base.golfPower)
if not self.swingInfoSent:
self.sendUpdate('postSwingState', [self.getCycleTime(),
self.power,
b.getPosition()[0],
b.getPosition()[1],
b.getPosition()[2],
x,
y,
curAimTime,
self.getCommonObjectData()])
self.swingInfoSent = True
if self.power < 15 and self.golfCourse.scores[localAvatar.doId][self.golfCourse.curHoleIndex] == 0:
self.powerReminder = DirectLabel(text=TTLocalizer.GolfPowerReminder, text_shadow=(0, 0, 0, 1), text_fg=VBase4(1, 1, 0.0, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.8), scale=0.12)
return
def updateWarning(self):
retval = False
if not self.localToonHitControl:
self.localMissedSwings += 1
else:
self.localMissedSwings = 0
if self.localMissedSwings == GolfGlobals.KICKOUT_SWINGS - 1:
self.warningLabel = DirectLabel(parent=aspect2d, relief=None, pos=(0, 0, 0), text_align=TextNode.ACenter, text=TTLocalizer.GolfWarningMustSwing, text_scale=0.12, text_font=ToontownGlobals.getSignFont(), text_fg=(1, 0.1, 0.1, 1), text_wordwrap=20)
self.warningInterval = Sequence(LerpColorScaleInterval(self.warningLabel, 10, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'), Func(self.warningLabel.destroy))
self.warningInterval.start()
elif self.localMissedSwings >= GolfGlobals.KICKOUT_SWINGS:
self.golfCourse.handleFallingAsleepGolf(None)
retval = True
return retval
def assignRecordSwing(self, avId, cycleTime, power, x, y, z, dirX, dirY, commonObjectData):
ball = self.ballDict[avId]['golfBall']
holdBallPos = ball.getPosition()
self.useCommonObjectData(commonObjectData)
self.trackRecordBodyFlight(ball, cycleTime, power, Vec3(x, y, z), dirX, dirY)
ball.setPosition(holdBallPos)
self.sendUpdate('ballMovie2AI', [cycleTime,
avId,
self.recording,
self.aVRecording,
self.ballInHoleFrame,
self.ballTouchedHoleFrame,
self.ballFirstTouchedHoleFrame,
commonObjectData])
self.ballMovie2Client(cycleTime, avId, self.recording, self.aVRecording, self.ballInHoleFrame, self.ballTouchedHoleFrame, self.ballFirstTouchedHoleFrame, commonObjectData)
def __watchAimTask(self, task):
self.setCamera2Ball()
self.attachClub(self.currentGolfer, True)
self.adjustClub()
self.fixCurrentGolferFeet()
self.orientCameraRay()
return task.cont
def __watchTeeTask(self, task):
self.setCamera2Ball()
return task.cont
def curGolfBall(self):
return self.ballDict[self.currentGolfer]['golfBall']
def curGolfBallGeom(self):
return self.ballDict[self.currentGolfer]['golfBallGeom']
def curBallShadow(self):
return self.ballShadowDict[self.currentGolfer]
def cleanupGeom(self):
self.targets.remove()
self.terrainModel.remove()
self.powerBar.destroy()
def cleanupPowerBar(self):
self.powerBar.hide()
def cleanupPhysics(self):
pass
def curBall(self):
return self.ballDict[self.currentGolfer]['ball']
def curBallANP(self):
return self.ballDict[self.currentGolfer]['ballActorNodePath']
def curBallActor(self):
return self.ballDict[self.currentGolfer]['ballActor']
def enterAim(self):
self.notify.debug('Aim')
self.notify.debug('currentGolfer = %s' % self.currentGolfer)
self.switchToAnimState('GolfPuttLoop', forced=True)
self.swingInfoSent = False
self.lastState = self.state
self.aimMomentum = 0.0
self.enterAimStart = globalClock.getRealTime()
taskMgr.add(self.__aimTask, 'Aim Task')
self.showOnlyCurGolfer()
strokes = self.golfCourse.getStrokesForCurHole(self.currentGolfer)
self.camPivot = self.ballFollow.attachNewNode('golf-camPivot')
self.targetCamPivot = self.ballFollow.attachNewNode('golf-targetCamPivot')
self.targetCamPivot.setP(self.DefaultCamP)
self.curCamPivot = self.ballFollow.attachNewNode('golf-curCamPivot')
self.curCamPivot.setP(self.DefaultCamP)
self.ccTrav = CollisionTraverser('golf.ccTrav')
self.ccLine = CollisionSegment(0.0, 0.0, 0.0, 1.0, 0.0, 0.0)
self.ccLineNode = CollisionNode('golf.ccLineNode')
self.ccLineNode.addSolid(self.ccLine)
self.ccLineNodePath = self.camPivot.attachNewNode(self.ccLineNode)
self.ccLineBitMask = BitMask32(1048576)
self.ccLineNode.setFromCollideMask(self.ccLineBitMask)
self.ccLineNode.setIntoCollideMask(BitMask32.allOff())
self.camCollisionQueue = CollisionHandlerQueue()
self.ccTrav.addCollider(self.ccLineNodePath, self.camCollisionQueue)
if strokes:
self.ballFollow.headsUp(self.holeBottomNodePath)
self.camPivot.setP(self.DefaultCamP)
self._golfBarrierCollection = self.terrainModel.findAllMatches('**/collision?')
self._camAdjust = ScratchPad()
self._camAdjust.iters = 0
self._camAdjust.lower = self.DefaultCamP
self._camAdjust.upper = self.MaxCamP
base.camera.setPos(self.camPosBallFollow)
base.camera.setHpr(self.camHprBallFollow)
self.camPivot.setP(self.DefaultCamP)
base.camera.wrtReparentTo(self.camPivot)
A = Point3(0, 0, 0)
B = base.camera.getPos()
AtoB = B - A
AtoBnorm = Point3(AtoB)
AtoBnorm.normalize()
A += AtoBnorm * 0.4
self.ccLine.setPointA(A)
self.ccLine.setPointB(B)
self.camPivot.setP(self.DefaultCamP)
self._camAdjust.task = taskMgr.add(self._adjustCamera, 'adjustCamera')
self.resetPowerBar()
self.powerBar.show()
self.aimDuration = GolfGlobals.AIM_DURATION
if not self.unlimitedAimTime:
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.setTime(self.aimDuration)
self.timer.countdown(self.aimDuration, self.timerExpired)
self.aimInstructions = DirectLabel(text=TTLocalizer.GolfAimInstructions, text_shadow=(0, 0, 0, 1), text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, -0.8), scale=TTLocalizer.DGHaimInstructions)
self.skyContact = 1
self.localToonHitControl = False
self._adjustCamera()
return
def exitAim(self):
localAvatar.wrtReparentTo(render)
taskMgr.remove(self._camAdjust.task)
taskMgr.remove('Aim Task')
taskMgr.remove(self.golfPowerTaskName)
if self.timer:
self.timer.stop()
self.timer.destroy()
self.timer = None
self.powerBar.hide()
self.ccLineNodePath.detachNode()
self.targetCamPivot.detachNode()
self.curCamPivot.detachNode()
self.camPivot.detachNode()
if self.aimInstructions:
self.aimInstructions.destroy()
self.aimInstructions = None
return
def timerExpired(self):
taskMgr.remove(self.golfPowerTaskName)
self.aimStart = None
self.sendSwingInfo()
self.resetPowerBar()
return
def _adjustCamera(self, task=None, first=True):
if task is None and first:
while 1:
self._adjustCamera(first=False)
if self._camAdjust.iters == 0:
return Task.cont
MaxIters = 5
finalP = self._camAdjust.lower
localAvatar.stash()
for barrier in self._golfBarrierCollection:
barrier.stash()
self.ccTrav.traverse(render)
for barrier in self._golfBarrierCollection:
barrier.unstash()
localAvatar.unstash()
midP = (self._camAdjust.lower + self._camAdjust.upper)/2
if self.camCollisionQueue.getNumEntries() > 0:
self.camCollisionQueue.sortEntries()
entry = self.camCollisionQueue.getEntry(0)
sPoint = entry.getSurfacePoint(self.camPivot)
self._camAdjust.lower = self.camPivot.getP()
finalP = midP
self.camPivot.setP(finalP)
else:
self._camAdjust.upper = self.camPivot.getP()
finalP = self._camAdjust.upper
self.camPivot.setP(midP)
if abs(self._camAdjust.lower - self._camAdjust.upper) < 1.0:
self._camAdjust.iters = MaxIters
self._camAdjust.iters += 1
if self._camAdjust.iters >= MaxIters:
self.targetCamPivot.setP(self._camAdjust.upper)
if task is None:
self.curCamPivot.setP(finalP)
self._camAdjust.iters = 0
self._camAdjust.lower = self.DefaultCamP
self._camAdjust.upper = self.MaxCamP
self.camPivot.setP(self.DefaultCamP)
if task is not None:
self.curCamPivot.setP(self.curCamPivot,
self.targetCamPivot.getP(self.curCamPivot)*min(1.0, 1.0*globalClock.getDt()))
curP = self.curCamPivot.getP()
self.curCamPivot.setP(self.DefaultCamP)
base.camera.reparentTo(self.ballFollow)
base.camera.setPos(self.camPosBallFollow)
base.camera.setHpr(self.camHprBallFollow)
base.camera.wrtReparentTo(self.curCamPivot)
self.curCamPivot.setP(curP)
base.camera.wrtReparentTo(self.ballFollow)
return Task.cont
def enterChooseTee(self):
self.notify.debug('ChooseTee')
self.curGolfBallGeom().show()
self.curBallShadow().show()
self.lastState = self.state
taskMgr.add(self.__chooseTeeTask, 'ChooseTee Task')
self.ballFollow.setH(self.startingTeeHeading)
self.localAvatarChosenTee = False
self.localTempTee = 0
if len(self.teePositions) > 1:
self.localTempTee = 1
self.chooseTeeDuration = GolfGlobals.TEE_DURATION
if not self.unlimitedTeeTime:
self.teeTimer = ToontownTimer.ToontownTimer()
self.teeTimer.posInTopRightCorner()
self.teeTimer.setTime(self.chooseTeeDuration)
self.teeTimer.countdown(self.chooseTeeDuration, self.teeTimerExpired)
self.teeInstructions = DirectLabel(text=TTLocalizer.GolfChooseTeeInstructions, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, text_shadow=(0, 0, 0, 1), relief=None, pos=(0, 0, -0.75), scale=TTLocalizer.DGHteeInstructions)
self.powerBar.hide()
return
def exitChooseTee(self):
localAvatar.wrtReparentTo(render)
if hasattr(self, 'teeInstructions') and self.teeInstructions:
self.teeInstructions.destroy()
self.teeInstructions = None
taskMgr.remove('ChooseTee Task')
taskMgr.remove(self.golfPowerTaskName)
if self.teeTimer:
self.teeTimer.stop()
self.teeTimer.destroy()
self.teeTimer = None
self.powerBar.show()
return
def sendTeeInfo(self):
self.sendUpdate('setAvatarTee', [self.localTempTee])
self.localAvatarChosenTee = True
def __chooseTeeTask(self, task):
if self.localAvatarChosenTee:
return task.done
if self.arrowKeys.jumpPressed():
if self.flyOverInterval and self.flyOverInterval.isPlaying():
pass
else:
self.sendTeeInfo()
return task.cont
def changeTee(self, newTee):
ball = self.curGolfBall()
ball.setPosition(self.teePositions[newTee])
self.setCamera2Ball()
self.fixCurrentGolferFeet()
self.adjustClub()
def changeLocalTee(self, newTee):
self.changeTee(newTee)
self.sendUpdate('setAvatarTempTee', [localAvatar.doId, newTee])
self.fixCurrentGolferFeet()
self.adjustClub()
def __leftArrowPressed(self):
if self.state != 'ChooseTee':
return
self.localTempTee -= 1
if self.localTempTee < 0:
self.localTempTee = len(self.teePositions) - 1
self.changeLocalTee(self.localTempTee)
def __rightArrowPressed(self):
if self.state != 'ChooseTee':
return
self.localTempTee += 1
self.localTempTee %= len(self.teePositions)
self.changeLocalTee(self.localTempTee)
def teeTimerExpired(self):
self.sendTeeInfo()
def enterWatchAim(self):
self.notify.debug('Watch Aim')
self.notify.debugStateCall(self)
self.notify.debug('currentGolfer = %s' % self.currentGolfer)
strokes = self.golfCourse.getStrokesForCurHole(self.currentGolfer)
if strokes:
self.ballFollow.lookAt(self.holeBottomNodePath)
self.ballFollow.setP(0)
self.showOnlyCurGolfer()
taskMgr.add(self.__watchAimTask, 'Watch Aim Task')
def exitWatchAim(self):
self.notify.debugStateCall(self)
av = base.cr.doId2do.get(self.currentGolfer)
if av:
heading = av.getH(render)
toonPos = av.getPos(render)
av.reparentTo(render)
av.setH(heading)
av.setPos(toonPos)
self.notify.debug('av %s now at position %s' % (av.getName(), av.getPos()))
else:
self.notify.debug('could not get avId %d' % self.currentGolfer)
taskMgr.remove('Watch Aim Task')
def enterWatchTee(self):
self.notify.debug('Watch Tee')
self.notify.debugStateCall(self)
self.curGolfBallGeom().show()
self.ballFollow.setH(self.startingTeeHeading)
self.ballShadowDict[self.currentGolfer].show()
def exitWatchTee(self):
self.notify.debugStateCall(self)
av = base.cr.doId2do.get(self.currentGolfer)
taskMgr.remove('Watch Tee Task')
def enterWait(self):
self.notify.debug('Wait')
self.notify.debugStateCall(self)
def exitWait(self):
self.notify.debugStateCall(self)
def removePlayBackDelayDelete(self):
if self.playBackDelayDelete:
self.playBackDelayDelete.destroy()
self.playBackDelayDelete = None
return
def enterPlayback(self):
def shiftClubToRightHand():
club = self.clubs[self.currentGolfer]
av = base.cr.doId2do.get(self.currentGolfer)
if av and club:
club.wrtReparentTo(av.getRightHands()[0])
av = base.cr.doId2do.get(self.currentGolfer)
if not av:
return
else:
self.removePlayBackDelayDelete()
self.playBackDelayDelete = DelayDelete.DelayDelete(av, 'GolfHole.enterPlayback')
self.accept('clientCleanup', self._handleClientCleanup)
self.inPlayBack = 1
self.setLookingAtPutt(False)
self.swingInterval = Sequence(ActorInterval(av, 'swing-putt', startFrame=0, endFrame=GolfGlobals.BALL_CONTACT_FRAME), Func(self.startBallPlayback), ActorInterval(av, 'swing-putt', startFrame=GolfGlobals.BALL_CONTACT_FRAME, endFrame=23), Func(shiftClubToRightHand), Func(self.setLookingAtPutt, True), Func(self.removePlayBackDelayDelete))
adjustedBallTouchedHoleTime = self.ballTouchedHoleTime + GolfGlobals.BALL_CONTACT_TIME
adjustedBallFirstTouchedHoleTime = self.ballFirstTouchedHoleTime + GolfGlobals.BALL_CONTACT_TIME
adjustedBallDropTime = self.ballDropTime + GolfGlobals.BALL_CONTACT_TIME
adjustedPlaybackEndTime = self.playbackMovieDuration + GolfGlobals.BALL_CONTACT_TIME
self.notify.debug('adjustedTimes ballTouched=%.2f ballFirstTouched=%.2f ballDrop=%.2f playbaybackEnd=%.2f' % (adjustedBallTouchedHoleTime,
adjustedBallFirstTouchedHoleTime,
adjustedBallDropTime,
adjustedPlaybackEndTime))
if self.ballWillGoInHole:
curDuration = self.swingInterval.getDuration()
lookPuttInterval = ActorInterval(av, 'look-putt')
if curDuration < adjustedBallDropTime:
self.swingInterval.append(lookPuttInterval)
curDuration = self.swingInterval.getDuration()
diffTime = adjustedBallDropTime - curDuration
if diffTime > 0:
self.swingInterval.append(ActorInterval(av, 'lookloop-putt', endTime=diffTime))
self.swingInterval.append(ActorInterval(av, 'good-putt', endTime=self.playbackMovieDuration, loop=1))
elif self.ballTouchedHoleTime:
self.notify.debug('doing self.ballTouchedHoleTime')
curDuration = self.swingInterval.getDuration()
lookPuttInterval = ActorInterval(av, 'look-putt')
if curDuration < adjustedBallTouchedHoleTime:
self.swingInterval.append(lookPuttInterval)
curDuration = self.swingInterval.getDuration()
diffTime = adjustedBallTouchedHoleTime - curDuration
if diffTime > 0:
self.swingInterval.append(ActorInterval(av, 'lookloop-putt', endTime=diffTime))
self.swingInterval.append(ActorInterval(av, 'bad-putt', endFrame=32))
self.swingInterval.append(ActorInterval(av, 'badloop-putt', endTime=self.playbackMovieDuration, loop=1))
else:
self.swingInterval.append(ActorInterval(av, 'look-putt'))
self.swingInterval.append(ActorInterval(av, 'lookloop-putt', endTime=self.playbackMovieDuration, loop=1))
sfxInterval = Parallel()
ballHitInterval = Sequence(Wait(GolfGlobals.BALL_CONTACT_TIME), SoundInterval(self.hitBallSfx))
sfxInterval.append(ballHitInterval)
if self.ballWillGoInHole:
ballRattle = Sequence()
timeToPlayBallRest = adjustedPlaybackEndTime - self.ballGoesToRestSfx.length()
if adjustedBallFirstTouchedHoleTime < timeToPlayBallRest:
diffTime = timeToPlayBallRest - adjustedBallFirstTouchedHoleTime
if self.ballGoesInStartSfx.length() < diffTime:
ballRattle.append(Wait(adjustedBallFirstTouchedHoleTime))
ballRattle.append(SoundInterval(self.ballGoesInStartSfx))
timeToPlayLoop = adjustedBallFirstTouchedHoleTime + self.ballGoesInStartSfx.length()
loopTime = timeToPlayBallRest - timeToPlayLoop
if self.ballGoesInLoopSfx.length() == 0.0:
numLoops = 0
else:
numLoops = int(loopTime / self.ballGoesInLoopSfx.length())
self.notify.debug('numLoops=%d loopTime=%f' % (numLoops, loopTime))
if loopTime > 0:
ballRattle.append(SoundInterval(self.ballGoesInLoopSfx, loop=1, duration=loopTime, seamlessLoop=True))
ballRattle.append(SoundInterval(self.ballGoesToRestSfx))
self.notify.debug('playing full rattling')
else:
self.notify.debug('playing abbreviated rattling')
timeToPlayBallGoesIn = adjustedBallFirstTouchedHoleTime
ballRattle.append(Wait(timeToPlayBallGoesIn))
startTime = self.ballGoesInStartSfx.length() - diffTime
self.notify.debug('adjustedBallDropTime=%s diffTime=%s starTime=%s' % (adjustedBallDropTime, diffTime, startTime))
ballRattle.append(SoundInterval(self.ballGoesInStartSfx, startTime=startTime))
ballRattle.append(SoundInterval(self.ballGoesToRestSfx))
else:
self.notify.debug('playing abbreviated ball goes to rest')
ballRattle.append(Wait(adjustedBallFirstTouchedHoleTime))
diffTime = adjustedPlaybackEndTime - adjustedBallFirstTouchedHoleTime
startTime = self.ballGoesToRestSfx.length() - diffTime
self.notify.debug('adjustedBallDropTime=%s diffTime=%s starTime=%s' % (adjustedBallDropTime, diffTime, startTime))
ballRattle.append(SoundInterval(self.ballGoesToRestSfx, startTime=startTime))
sfxInterval.append(ballRattle)
crowdBuildupSfx = self.crowdBuildupSfx[self.avIdList.index(self.currentGolfer)]
crowdApplauseSfx = self.crowdApplauseSfx[self.avIdList.index(self.currentGolfer)]
crowdMissSfx = self.crowdMissSfx[self.avIdList.index(self.currentGolfer)]
if self.ballWillGoInHole:
crowdIval = Sequence()
buildupLength = crowdBuildupSfx.length()
self.notify.debug('buildupLength=%s' % buildupLength)
diffTime = adjustedBallFirstTouchedHoleTime - buildupLength
if diffTime > 0:
crowdIval.append(Wait(diffTime))
crowdIval.append(SoundInterval(crowdBuildupSfx))
crowdIval.append(SoundInterval(crowdApplauseSfx))
else:
startTime = buildupLength - adjustedBallFirstTouchedHoleTime
self.notify.debug('playing abbreviated crowd build and applause diffTime=%s startTime=%s' % (diffTime, startTime))
crowdIval.append(SoundInterval(crowdBuildupSfx, startTime=startTime))
crowdIval.append(SoundInterval(crowdApplauseSfx))
sfxInterval.append(crowdIval)
elif self.ballFirstTouchedHoleTime:
crowdIval = Sequence()
buildupLength = crowdBuildupSfx.length()
self.notify.debug('touched but not going in buildupLength=%s' % buildupLength)
diffTime = adjustedBallFirstTouchedHoleTime - buildupLength
if diffTime > 0:
self.notify.debug('waiting %.2f to play crowd buildup' % diffTime)
crowdIval.append(Wait(diffTime))
crowdIval.append(SoundInterval(crowdBuildupSfx))
crowdIval.append(SoundInterval(crowdMissSfx))
else:
startTime = buildupLength - adjustedBallFirstTouchedHoleTime
self.notify.debug('playing abbreviated crowd build and miss diffTime=%s startTime=%s' % (diffTime, startTime))
crowdIval.append(SoundInterval(crowdBuildupSfx, startTime=startTime))
crowdIval.append(SoundInterval(crowdMissSfx))
sfxInterval.append(crowdIval)
if self.sfxInterval:
sfxInterval.finish()
self.sfxInterval = sfxInterval
self.sfxInterval.start()
self.swingInterval.start()
def exitPlayback(self):
self.notify.debug('Exiting Playback')
if self.swingInterval:
self.swingInterval.pause()
av = base.cr.doId2do.get(self.currentGolfer)
if av:
if self.ballWillGoInHole:
av.loop('good-putt', restart=0)
elif self.ballTouchedHoleTime:
pass
else:
av.loop('neutral')
self.setLookingAtPutt(False)
if av == base.localAvatar:
if self.ballWillGoInHole:
av.b_setAnimState('GolfGoodPutt')
elif self.ballTouchedHoleTime:
av.b_setAnimState('GolfBadPutt')
else:
av.b_setAnimState('neutral')
taskMgr.remove('playback task')
self.curGolfBall().disable()
self.readyCurrentGolfer(None)
self.inPlayBack = 0
if self.powerReminder:
self.powerReminder.destroy()
self.powerReminder = None
return
def setLookingAtPutt(self, newVal):
self.isLookingAtPutt = newVal
def getLookingAtPutt(self):
return self.isLookingAtPutt
def startBallPlayback(self):
self.playbackFrameNum = 0
self.sourceFrame = self.recording[0]
self.destFrameNum = 1
self.destFrame = self.recording[self.destFrameNum]
self.aVSourceFrame = self.aVRecording[0]
self.aVDestFrameNum = 1
self.aVDestFrame = self.aVRecording[self.aVDestFrameNum]
self.inPlayBack = 2
def isCurBallInHole(self):
retval = False
ball = self.curGolfBall()
ballPos = ball.getPosition()
for holePos in self.holePositions:
displacement = ballPos - holePos
length = displacement.length()
self.notify.debug('hole %s length=%s' % (holePos, length))
if length <= GolfGlobals.DistanceToBeInHole:
retval = True
break
return retval
def handleBallGoingInHole(self):
par = GolfGlobals.HoleInfo[self.holeId]['par']
unlimitedSwing = False
av = base.cr.doId2do.get(self.currentGolfer)
if av:
unlimitedSwing = av.getUnlimitedSwing()
if not unlimitedSwing:
self.curGolfBall().setPosition(0, 0, -100)
self.ballShadowDict[self.currentGolfer].setPos(0, 0, -100)
self.ballShadowDict[self.currentGolfer].hide()
strokes = 3
if self.golfCourse:
strokes = self.golfCourse.getStrokesForCurHole(self.currentGolfer)
else:
self.notify.warning('self.golfCourse is None')
diff = strokes - par
if diff > 0:
textStr = '+' + str(diff)
else:
textStr = diff
if strokes == 1:
textStr = TTLocalizer.GolfHoleInOne
elif diff in TTLocalizer.GolfShotDesc:
if self.ballWillGoInHole:
textStr = TTLocalizer.GolfShotDesc[diff]
perfectTextSubnode = hidden.attachNewNode(self.__genText(textStr))
perfectText = hidden.attachNewNode('perfectText')
perfectTextSubnode.reparentTo(perfectText)
frame = self.__textGen.getCardActual()
offsetY = -abs(frame[2] + frame[3]) / 2.0 - 1.35
perfectTextSubnode.setPos(0, 0, offsetY)
perfectText.setColor(1, 0.1, 0.1, 1)
def fadeFunc(t, text = perfectText):
text.setColorScale(1, 1, 1, t)
def destroyText(text = perfectText):
text.removeNode()
animTrack = Sequence()
av = base.cr.doId2do.get(self.currentGolfer)
animTrack.append(Func(self.golfCourse.updateScoreBoard))
textTrack = Sequence(Func(perfectText.reparentTo, aspect2d), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=0.3, startScale=0.0), LerpFunctionInterval(fadeFunc, fromData=0.0, toData=1.0, duration=0.5)), Wait(2.0), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=1.0), LerpFunctionInterval(fadeFunc, fromData=1.0, toData=0.0, duration=0.5, blendType='easeIn')), Func(destroyText), WaitInterval(0.5), Func(self.sendUpdate, 'turnDone', []))
soundTrack = Sequence()
if strokes == 1:
soundTrack.append(SoundInterval(self.holeInOneSfx))
elif self.hasCurGolferReachedMaxSwing and not self.ballWillGoInHole:
soundTrack.append(SoundInterval(self.kickedOutSfx))
self.perfectIval = Parallel(textTrack, soundTrack, animTrack)
self.perfectIval.start()
def __playbackTask(self, task):
return self.playBackFrame(task)
def toonRayCollisionCallback(self, x, y, z):
if self.state not in ('Aim', 'WatchAim', 'ChooseTee', 'WatchTee'):
return
tempPath = render.attachNewNode('temp')
tempPath.setPos(x, y, z)
relPos = tempPath.getPos(self.ballFollowToonSpot)
av = base.cr.doId2do.get(self.currentGolfer)
if av:
zToUse = relPos[2]
if zToUse < 0 - GolfGlobals.GOLF_BALL_RADIUS:
zToUse = 0 - GolfGlobals.GOLF_BALL_RADIUS
av.setPos(0, 0, zToUse)
tempPath.removeNode()
def preStep(self):
if self.currentGolferActive:
GolfHoleBase.GolfHoleBase.preStep(self)
def postStep(self):
if self.currentGolferActive:
GolfHoleBase.GolfHoleBase.postStep(self)
DistributedPhysicsWorld.DistributedPhysicsWorld.postStep(self)
if self.inPlayBack == 2:
self.playBackFrame()
self.makeCurGolferLookAtBall()
elif self.state == 'Playback' and self.inPlayBack == 0:
self.request('Wait')
self.updateTranslucentObjects()
def updateTranslucentObjects(self):
for translucentNodePathLastFrame in self.translucentLastFrame:
if translucentNodePathLastFrame not in self.translucentCurFrame:
translucentNodePathLastFrame.setColorScale(1, 1, 1, 1)
for transNpCurFrame in self.translucentCurFrame:
if transNpCurFrame not in self.translucentLastFrame:
self.notify.debug('making translucent %s' % transNpCurFrame)
transNpCurFrame.setColorScale(1, 1, 1, 0.25)
transNpCurFrame.setTransparency(1)
def makeCurGolferLookAtBall(self):
if self.getLookingAtPutt():
av = base.cr.doId2do.get(self.currentGolfer)
if av:
ballPos = self.curGolfBall().getPosition()
av.headsUp(ballPos[0], ballPos[1], ballPos[2])
av.setH(av.getH() - 90)
def playBackFrame(self):
doPrint = 0
doAVPrint = 0
lastFrame = self.recording[len(self.recording) - 1][0]
if self.playbackFrameNum >= self.destFrame[0]:
self.sourceFrame = self.destFrame
self.destFrameNum += 1
doPrint = 1
if self.destFrameNum < len(self.recording):
self.destFrame = self.recording[self.destFrameNum]
else:
self.notify.debug('recording length %s' % len(self.recording))
if self.isCurBallInHole() or self.hasCurGolferReachedMaxSwing():
self.handleBallGoingInHole()
self.request('Wait')
else:
self.golfCourse.updateScoreBoard()
self.request('Wait')
self.sendUpdate('turnDone', [])
return
self.projLength = self.destFrame[0] - self.sourceFrame[0]
self.projPen = self.destFrame[0] - self.playbackFrameNum
propSource = float(self.projPen) / float(self.projLength)
propDest = 1.0 - propSource
projX = self.sourceFrame[1] * propSource + self.destFrame[1] * propDest
projY = self.sourceFrame[2] * propSource + self.destFrame[2] * propDest
projZ = self.sourceFrame[3] * propSource + self.destFrame[3] * propDest
newPos = Vec3(projX, projY, projZ)
ball = self.curGolfBall()
ball.setPosition(newPos)
if self.playbackFrameNum >= self.aVDestFrame[0]:
self.aVSourceFrame = self.aVDestFrame
self.aVDestFrameNum += 1
doAVPrint = 1
if self.aVDestFrameNum < len(self.aVRecording):
self.aVDestFrame = self.aVRecording[self.aVDestFrameNum]
newAV = Vec3(self.aVSourceFrame[1], self.aVSourceFrame[2], self.aVSourceFrame[3])
self.projLength = self.aVDestFrame[0] - self.aVSourceFrame[0]
self.projPen = self.aVDestFrame[0] - self.playbackFrameNum
propSource = float(self.projPen) / float(self.projLength)
propDest = 1.0 - propSource
projX = self.aVSourceFrame[1] * propSource + self.aVDestFrame[1] * propDest
projY = self.aVSourceFrame[2] * propSource + self.aVDestFrame[2] * propDest
projZ = self.aVSourceFrame[3] * propSource + self.aVDestFrame[3] * propDest
newAV = Vec3(projX, projY, projZ)
ball = self.curGolfBall()
ball.setAngularVel(newAV)
if self.playbackFrameNum < lastFrame - 1:
ball.enable()
else:
ball.disable()
self.setCamera2Ball()
self.placeBodies()
if doAVPrint:
pass
if doPrint:
self.notify.debug('. %s %s %s %s %s' % (self.playbackFrameNum,
self.sourceFrame[0],
self.destFrame[0],
self.destFrameNum,
newPos))
self.playbackFrameNum += 1
def enterCleanup(self):
taskMgr.remove('update task')
if hasattr(self, 'arrowKeys'):
self.arrowKeys.destroy()
self.arrowKeys = None
self.ignoreAll()
if self.swingInterval:
self.swingInterval.pause()
self.swingInterval = None
if self.sfxInterval:
self.sfxInterval.pause()
self.sfxInterval = None
self.cleanupGeom()
return
def exitCleanup(self):
pass
def setCamera2Ball(self):
b = self.curGolfBall()
ballPos = Point3(b.getPosition()[0], b.getPosition()[1], b.getPosition()[2])
self.ballFollow.setPos(ballPos)
def hitBall(self, ball, power, x, y):
self.performSwing(self, ball, power, x, y)
def ballMovie2Client(self, cycleTime, avId, movie, spinMovie, ballInFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame, commonObjectData):
self.notify.debug('received Movie, number of frames %s %s ballInFrame=%d ballTouchedHoleFrame=%d ballFirstTouchedHoleFrame=%d' % (len(movie),
len(spinMovie),
ballInFrame,
ballTouchedHoleFrame,
ballFirstTouchedHoleFrame))
if self.state == 'Playback':
self.notify.debug('SMASHED PLAYBACK')
return
self.ballShadowDict[avId].show()
self.holdCycleTime = cycleTime
self.holdCommonObjectData = commonObjectData
self.useCommonObjectData(self.holdCommonObjectData)
self.recording = movie
self.aVRecording = spinMovie
endingBallPos = Vec3(movie[-1][1], movie[-1][2], movie[-1][3])
endingFrame = movie[-1][0]
self.playbackMovieDuration = endingFrame * self.DTAStep
self.notify.debug('playback movie duration=%s' % self.playbackMovieDuration)
displacement = self.holePositions[0] - endingBallPos
self.ballWillGoInHole = False
if displacement.length() <= GolfGlobals.DistanceToBeInHole:
self.ballWillGoInHole = True
self.notify.debug('endingBallPos=%s, distanceToHole=%s, ballWillGoInHole=%s' % (endingBallPos, displacement.length(), self.ballWillGoInHole))
self.ballDropTime = ballInFrame * self.DTAStep
self.ballTouchedHoleTime = ballTouchedHoleFrame * self.DTAStep
self.ballFirstTouchedHoleTime = ballFirstTouchedHoleFrame * self.DTAStep
if self.state == 'WatchTee':
self.request('WatchAim')
self.request('Playback')
def golfersTurn(self, avId):
self.readyCurrentGolfer(avId)
if avId == localAvatar.doId:
self.setCamera2Ball()
self.request('Aim')
else:
self.setCamera2Ball()
self.request('WatchAim')
def readyCurrentGolfer(self, avId):
for index in self.ballDict:
self.ballDict[index]['golfBallOdeGeom'].setCollideBits(BitMask32(0))
self.ballDict[index]['golfBallOdeGeom'].setCategoryBits(BitMask32(0))
self.ballDict[index]['golfBall'].disable()
if avId:
self.currentGolfer = avId
self.currentGolferActive = True
if avId in self.ballDict:
self.ballDict[avId]['golfBallOdeGeom'].setCollideBits(BitMask32(16777215))
self.ballDict[avId]['golfBallOdeGeom'].setCategoryBits(BitMask32(4278190080L))
else:
self.currentGolferActive = False
def setGolferIds(self, avIds):
self.avIdList = avIds
self.numPlayers = len(self.avIdList)
self.teeChosen = {}
for avId in self.avIdList:
self.teeChosen[avId] = -1
def setHoleId(self, holeId):
self.holeId = holeId
self.holeInfo = GolfGlobals.HoleInfo[holeId]
def createBall(self, avId, index = None):
golfBallGeom, golfBall, odeGeom = self.createSphere(self.world, self.space, GolfGlobals.GOLF_BALL_DENSITY, GolfGlobals.GOLF_BALL_RADIUS, index)
startPos = self.teePositions[0]
if len(self.teePositions) > 1:
startPos = self.teePositions[1]
golfBall.setPosition(startPos)
golfBallGeom.hide()
if self.notify.getDebug():
self.notify.debug('golf ball body id')
golfBall.write()
self.notify.debug(' -')
golfBallGeom.setName('golfBallGeom%s' % avId)
self.ballDict[avId] = {'golfBall': golfBall,
'golfBallGeom': golfBallGeom,
'golfBallOdeGeom': odeGeom}
golfBall.disable()
shadow = self.dropShadowModel.copyTo(render)
shadow.setBin('shadow', 100)
shadow.setScale(0.09)
shadow.setDepthWrite(False)
shadow.setDepthTest(True)
self.ballShadowDict[avId] = shadow
shadow.hide()
def setGolfCourseDoId(self, golfCourseDoId):
self.golfCourseDoId = golfCourseDoId
self.golfCourse = base.cr.doId2do.get(self.golfCourseDoId)
if not self.golfCourse:
self.cr.relatedObjectMgr.abortRequest(self.golfCourseRequest)
self.golfCourseRequest = self.cr.relatedObjectMgr.requestObjects([self.golfCourseDoId], eachCallback=self.__gotGolfCourse)
else:
self.scoreBoard = self.golfCourse.scoreBoard
self.scoreBoard.hide()
def __gotGolfCourse(self, golfCourse):
self.golfCourseRequest = None
self.golfCourse = golfCourse
return
def __genText(self, text):
self.__textGen.setText(text)
return self.__textGen.generate()
def sendBox(self, pos0, pos1, pos2, quat0, quat1, quat2, quat3, anV0, anV1, anV2, lnV0, lnV1, lnV2):
self.swingBox.setPosition(pos0, pos1, pos2)
self.swingBox.setQuaternion(Quat(quat0, quat1, quat2, quat3))
self.swingBox.setAngularVel(anV0, anV1, anV2)
self.swingBox.setLinearVel(lnV0, lnV1, lnV2)
def hasCurGolferReachedMaxSwing(self):
strokes = self.golfCourse.getStrokesForCurHole(self.currentGolfer)
maxSwing = self.holeInfo['maxSwing']
retval = strokes >= maxSwing
if retval:
pass
return retval
def __getGolfPower(self, time):
elapsed = max(time - self.aimStart, 0.0)
t = elapsed / self.golfPowerSpeed
t = math.pow(t, self.golfPowerExponent)
power = int(t * 100) % 200
if power > 100:
power = 200 - power
return power
def __beginTossGolf(self):
if self.aimStart != None:
return
if not self.state == 'Aim':
return
if self.swingInfoSent:
return
self.localToonHitControl = True
time = globalClock.getFrameTime()
self.aimStart = time
messenger.send('wakeup')
self.scoreBoard.hide()
taskMgr.add(self.__updateGolfPower, self.golfPowerTaskName)
return
def __endTossGolf(self):
if self.aimStart == None:
return
if not self.state == 'Aim':
return
messenger.send('wakeup')
taskMgr.remove(self.golfPowerTaskName)
self.aimStart = None
self.sendSwingInfo()
self.resetPowerBar()
return
def __updateGolfPower(self, task):
if not self.powerBar:
print '### no power bar!!!'
return Task.done
newPower = self.__getGolfPower(globalClock.getFrameTime())
self.power = newPower
self.powerBar['value'] = newPower
self.powerBar['text'] = TTLocalizer.GolfPowerBarText % {'power': newPower}
return Task.cont
def golferChooseTee(self, avId):
self.readyCurrentGolfer(avId)
self.putAwayAllToons()
if self.needToDoFlyOver and self.doFlyOverMovie(avId):
pass
else:
if avId == localAvatar.doId:
self.setCamera2Ball()
if not self.state == 'ChooseTee':
self.request('ChooseTee')
else:
self.setCamera2Ball()
self.request('WatchTee')
self.takeOutToon(self.currentGolfer)
def setAvatarTempTee(self, avId, tempTee):
if self.state != 'WatchTee':
return
if avId != self.currentGolfer:
self.notify.warning('setAvatarTempTee avId=%s not equal to self.currentGolfer=%s' % (avId, self.currentGolfer))
return
self.changeTee(tempTee)
def setAvatarFinalTee(self, avId, finalTee):
if avId != self.currentGolfer:
self.notify.warning('setAvatarTempTee avId=%s not equal to self.currentGolfer=%s' % (avId, self.currentGolfer))
return
self.changeTee(finalTee)
def setTempAimHeading(self, avId, heading):
if avId != self.currentGolfer:
self.notify.warning('setAvatarTempTee avId=%s not equal to self.currentGolfer=%s' % (avId, self.currentGolfer))
return
if self.state != 'WatchAim':
return
if avId != localAvatar.doId:
self.ballFollow.setH(heading)
def stickToonToBall(self, avId):
av = base.cr.doId2do.get(avId)
if av:
av.reparentTo(self.ballFollowToonSpot)
av.setPos(0, 0, 0)
av.setH(0)
def putAwayToon(self, avId):
av = base.cr.doId2do.get(avId)
if av:
av.reparentTo(render)
av.setPos(0, 0, -1000)
av.setH(0)
def putAwayAllToons(self):
for avId in self.avIdList:
self.putAwayToon(avId)
def takeOutToon(self, avId):
self.stickToonToBall(avId)
self.fixCurrentGolferFeet()
self.attachClub(avId)
def showOnlyCurGolfer(self):
self.notify.debug('curGolfer = %s' % self.currentGolfer)
self.stickToonToBall(self.currentGolfer)
self.fixCurrentGolferFeet()
self.attachClub(self.currentGolfer)
for avId in self.avIdList:
if avId != self.currentGolfer:
self.putAwayToon(avId)
def tabKeyPressed(self):
doInterval = True
self.notify.debug('tab key pressed')
if not hasattr(self, 'ballFollow'):
return
if self.flyOverInterval and self.flyOverInterval.isPlaying():
return
if self.camInterval and self.camInterval.isPlaying():
self.camInterval.pause()
if base.camera.getParent() == self.ballFollow:
if doInterval:
curHpr = camera.getHpr(render)
angle = PythonUtil.closestDestAngle2(curHpr[0], 0)
self.camInterval = Sequence(Func(base.camera.wrtReparentTo, render), LerpPosHprInterval(base.camera, 2, self.camTopViewPos, self.camTopViewHpr))
self.camInterval.start()
else:
base.camera.reparentTo(render)
base.camera.setPos(self.camTopViewPos)
base.camera.setHpr(self.camTopViewHpr)
elif doInterval:
curHpr = camera.getHpr(self.ballFollow)
angle = PythonUtil.closestDestAngle2(curHpr[0], 0)
self.camInterval = Sequence(Func(base.camera.wrtReparentTo, self.ballFollow), LerpPosHprInterval(base.camera, 2, self.camPosBallFollow, self.camHprBallFollow))
self.camInterval.start()
else:
base.camera.reparentTo(self.ballFollow)
base.camera.setPos(self.camPosBallFollow)
base.camera.setHpr(self.camHprBallFollow)
def doFlyOverMovie(self, avId):
title = GolfGlobals.getCourseName(self.golfCourse.courseId) + ' :\n ' + GolfGlobals.getHoleName(self.holeId) + '\n' + TTLocalizer.GolfPar + ' : ' + '%s' % self.holeInfo['par']
self.titleLabel = DirectLabel(parent=aspect2d, relief=None, pos=(0, 0, 0.8), text_align=TextNode.ACenter, text=title, text_scale=0.12, text_font=ToontownGlobals.getSignFont(), text_fg=(1, 0.8, 0.4, 1))
self.titleLabel.setBin('opaque', 19)
self.titleLabel.hide()
self.needToDoFlyOver = False
bamFile = self.holeInfo['terrainModel']
fileName = bamFile.split('/')[-1]
dotIndex = fileName.find('.')
baseName = fileName[0:dotIndex]
camModelName = baseName + '_cammodel.bam'
cameraName = baseName + '_camera.bam'
path = bamFile[0:bamFile.find(fileName)]
camModelFullPath = path + camModelName
cameraAnimFullPath = path + cameraName
try:
self.flyOverActor = Actor.Actor(camModelFullPath, {'camera': cameraAnimFullPath})
except StandardError:
self.notify.debug("Couldn't find flyover %s" % camModelFullPath)
return False
base.transitions.noIris()
self.flyOverActor.reparentTo(render)
self.flyOverActor.setBlend(frameBlend=True)
flyOverJoint = self.flyOverActor.find('**/camera1')
children = flyOverJoint.getChildren()
numChild = children.getNumPaths()
for i in xrange(numChild):
childNodePath = children.getPath(i)
childNodePath.removeNode()
self.flyOverJoint = flyOverJoint
self.flyOverInterval = Sequence(Func(base.camera.reparentTo, flyOverJoint), Func(base.camera.clearTransform), Func(self.titleLabel.show), ActorInterval(self.flyOverActor, 'camera'), Func(base.camera.reparentTo, self.ballFollow), Func(base.camera.setPos, self.camPosBallFollow), Func(base.camera.setHpr, self.camHprBallFollow))
if avId == localAvatar.doId:
self.flyOverInterval.append(Func(self.setCamera2Ball))
self.flyOverInterval.append(Func(self.safeRequestToState, 'ChooseTee'))
else:
self.flyOverInterval.append(Func(self.setCamera2Ball))
self.flyOverInterval.append(Func(self.safeRequestToState, 'WatchTee'))
self.flyOverInterval.append(Func(self.titleLabel.hide))
self.flyOverInterval.append(Func(self.takeOutToon, avId))
self.flyOverInterval.start()
return True
def avExited(self, avId):
if self.state == 'Playback' and self.currentGolfer == avId:
pass
else:
self.ballDict[avId]['golfBallGeom'].hide()
def orientCameraRay(self):
pos = base.camera.getPos(self.terrainModel)
self.cameraRayNodePath.setPos(pos)
self.cameraRayNodePath.lookAt(self.ballFollow)
renderPos = self.cameraRayNodePath.getPos(render)
if renderPos != pos:
self.notify.debug('orientCamerRay this should not happen')
ballPos = self.ballFollow.getPos(self.terrainModel)
dirCam = Vec3(ballPos - pos)
dirCam.normalize()
self.cameraRay.set(pos, dirCam)
def performSwing(self, ball, power, dirX, dirY):
startTime = globalClock.getRealTime()
avId = base.localAvatar.doId
position = ball.getPosition()
x = position[0]
y = position[1]
z = position[2]
if avId not in self.golfCourse.drivingToons:
x = position[0]
y = position[1]
z = position[2]
self.swingTime = cycleTime
lift = 0
ball = self.ball
forceMove = 2500
if power > 50:
lift = 0
ball.enable()
ball.setPosition(x, y, z)
ball.setLinearVel(0.0, 0.0, 0.0)
ball.setAngularVel(0.0, 0.0, 0.0)
ball.addForce(Vec3(dirX * forceMove * power / 100.0, dirY * forceMove * power / 100.0, lift))
self.initRecord()
safety = 0
self.llv = None
self.record(ball)
while ball.isEnabled() and len(self.recording) < 2000:
self.preStep()
self.simulate()
self.postStep()
self.record(ball)
safety += 1
self.record(ball)
midTime = globalClock.getRealTime()
self.processRecording()
self.processAVRecording()
self.notify.debug('Recording End time %s cycle %s len %s avLen %s' % (self.timingSimTime,
self.getSimCycleTime(),
len(self.recording),
len(self.aVRecording)))
self.request('WaitPlayback')
length = len(self.recording) - 1
x = self.recording[length][1]
y = self.recording[length][2]
z = self.recording[length][3]
self.ballPos[avId] = Vec3(x, y, z)
endTime = globalClock.getRealTime()
diffTime = endTime - startTime
fpsTime = self.frame / diffTime
self.notify.debug('Time Start %s Mid %s End %s Diff %s Fps %s frames %s' % (startTime,
midTime,
endTime,
diffTime,
fpsTime,
self.frame))
self.ballMovie2Client(cycleTime, avId, self.recording, self.aVRecording, self.ballInHoleFrame, self.ballTouchedHoleFrame, self.ballFirstTouchedHoleFrame)
return
def handleBallHitNonGrass(self, c0, c1):
if not self.inPlayBack:
return
golfBallPos = self.curGolfBall().getPosition()
if self.lastBumpSfxPos == golfBallPos:
return
if GolfGlobals.HARD_COLLIDE_ID in [c0, c1]:
if not self.bumpHardSfx.status() == self.bumpHardSfx.PLAYING:
distance = (golfBallPos - self.lastBumpSfxPos).length()
if distance > 2.0:
base.playSfx(self.bumpHardSfx)
self.lastBumpSfxPos = golfBallPos
elif GolfGlobals.MOVER_COLLIDE_ID in [c0, c1]:
if not self.bumpMoverSfx.status() == self.bumpMoverSfx.PLAYING:
base.playSfx(self.bumpMoverSfx)
self.lastBumpSfxPos = golfBallPos
elif GolfGlobals.WINDMILL_BASE_COLLIDE_ID in [c0, c1]:
if not self.bumpWindmillSfx.status() == self.bumpWindmillSfx.PLAYING:
base.playSfx(self.bumpWindmillSfx)
self.lastBumpSfxPos = golfBallPos
def safeRequestToState(self, newState):
doingRequest = False
if self.state in self.defaultTransitions:
if newState in self.defaultTransitions[self.state]:
self.request(newState)
doingRequest = True
if not doingRequest:
self.notify.warning('ignoring transition from %s to %s' % (self.state, newState))
def doMagicWordHeading(self, heading):
if self.state == 'Aim':
self.aimMomentum = 0.0
self.ballFollow.setH(float(heading))
def _handleClientCleanup(self):
self.removePlayBackDelayDelete()
self.ignore('clientCleanup')
| |
from __future__ import absolute_import
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import warnings, time, copy, pprint
from six.moves import range
import six
from . import optimizers
from . import objectives
from . import regularizers
from . import constraints
from . import callbacks as cbks
from .utils.layer_utils import container_from_config
from .utils.generic_utils import Progbar, printv
from .layers import containers
def standardize_y(y):
if not hasattr(y, 'shape'):
y = np.asarray(y)
if len(y.shape) == 1:
y = np.expand_dims(y, 1)
return y
def batch_shuffle(index_array, batch_size):
batch_count = int(len(index_array)/batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count*batch_size:]
index_array = index_array[:batch_count*batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
nb_batch = int(np.ceil(size/float(batch_size)))
return [(i*batch_size, min(size, (i+1)*batch_size)) for i in range(0, nb_batch)]
def standardize_X(X):
if type(X) == list:
return X
else:
return [X]
def slice_X(X, start=None, stop=None):
if type(X) == list:
if hasattr(start, '__len__'):
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
def weighted(y_true, y_pred, weights, mask=None):
# it's important that 0 * Inf == 0, not NaN, so we need to filter
# those out first
filtered_y_true = y_true[weights.nonzero()[:-1]]
filtered_y_pred = y_pred[weights.nonzero()[:-1]]
filtered_weights = weights[weights.nonzero()]
obj_output = fn(filtered_y_true, filtered_y_pred)
weighted = filtered_weights * obj_output
if mask is None:
# Instead of calling mean() here, we divide by the sum of filtered_weights.
return weighted.sum() / filtered_weights.sum()
else:
filtered_mask = mask[weights.nonzero()[:-1]]
return weighted.sum() / (filtered_mask * filtered_weights).sum()
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None):
if sample_weight is not None:
return standardize_y(sample_weight)
elif isinstance(class_weight, dict):
if len(y.shape) > 3:
raise Exception('class_weight not supported for 4+ dimensional targets.')
yshape = y.shape
y = np.reshape(y, (-1, yshape[-1])) # for time-distributed data, collapse time and sample
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
class_weights = np.asarray([class_weight[cls] for cls in y_classes])
return np.reshape(class_weights, yshape[:-1] + (1,)) # uncollapse initial dimensions
else:
return np.ones(y.shape[:-1] + (1,))
def model_from_yaml(yaml_string, custom_layers={}):
'''
Returns a model generated from a local yaml file,
which is either created by hand or from to_yaml method of Sequential or Graph
'''
import yaml
config = yaml.load(yaml_string)
return model_from_config(config, custom_layers=custom_layers)
def model_from_json(json_string, custom_layers={}):
import json
config = json.loads(json_string)
return model_from_config(config, custom_layers=custom_layers)
def model_from_config(config, custom_layers={}):
model_name = config.get('name')
if model_name not in {'Graph', 'Sequential'}:
raise Exception('Unrecognized model:', model_name)
# Create a container then set class to appropriate model
model = container_from_config(config, custom_layers=custom_layers)
if model_name == 'Graph':
model.__class__ = Graph
elif model_name == 'Sequential':
model.__class__ = Sequential
if 'optimizer' in config:
# if it has an optimizer, the model is assumed to be compiled
loss = config.get('loss')
class_mode = config.get('class_mode')
theano_mode = config.get('theano_mode')
optimizer_params = dict([(k, v) for k, v in config.get('optimizer').items()])
optimizer_name = optimizer_params.pop('name')
optimizer = optimizers.get(optimizer_name, optimizer_params)
if model_name == 'Sequential':
model.compile(loss=loss, optimizer=optimizer, class_mode=class_mode, theano_mode=theano_mode)
elif model_name == 'Graph':
model.compile(loss=loss, optimizer=optimizer, theano_mode=theano_mode)
return model
def get_function_name(o):
if isinstance(o, six.string_types):
return o
else:
return o.__name__
class Model(object):
def _fit(self, f, ins, out_labels=[], batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True, metrics=[]):
'''
Abstract fit function for f(*ins). Assume that f returns a list, labelled by out_labels.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0])))
nb_train_sample = len(ins[0])
index_array = np.arange(nb_train_sample)
history = cbks.History()
if verbose:
callbacks = [history, cbks.BaseLogger()] + callbacks
else:
callbacks = [history] + callbacks
callbacks = cbks.CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': metrics,
})
callbacks.on_train_begin()
self.stop_training = False
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
ins_batch = slice_X(ins, batch_ids)
except TypeError as err:
raise Exception('TypeError while preparing batch. \
If using HDF5 input data, pass shuffle="batch".\n')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(*ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
epoch_logs = {}
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins, batch_size=batch_size, verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if self.stop_training:
break
callbacks.on_train_end()
return history
def _predict_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
return outs
def _test_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
return outs
def get_config(self, verbose=0):
config = super(Model, self).get_config()
for p in ['class_mode', 'theano_mode']:
if hasattr(self, p):
config[p] = getattr(self, p)
if hasattr(self, 'optimizer'):
config['optimizer'] = self.optimizer.get_config()
if hasattr(self, 'loss'):
if type(self.loss) == dict:
config['loss'] = dict([(k, get_function_name(v)) for k, v in self.loss.items()])
else:
config['loss'] = get_function_name(self.loss)
if verbose:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
return config
def to_yaml(self, **kwargs):
# dump model configuration to yaml string
import yaml
config = self.get_config()
return yaml.dump(config, **kwargs)
def to_json(self, **kwargs):
# dump model configuration to json string
import json
config = self.get_config()
return json.dumps(config, **kwargs)
class Sequential(Model, containers.Sequential):
'''
Inherits from Model the following methods:
- _fit
- _predict
- _evaluate
Inherits from containers.Sequential the following methods:
- __init__
- add
- get_output
- get_input
- get_weights
- set_weights
'''
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
weighted_loss = weighted_objective(objectives.get(loss))
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
self.weights = T.ones_like(self.y_train)
if hasattr(self.layers[-1], "get_output_mask"):
mask = self.layers[-1].get_output_mask()
else:
mask = None
train_loss = weighted_loss(self.y, self.y_train, self.weights, mask)
test_loss = weighted_loss(self.y, self.y_test, self.weights, mask)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
self.y.name = 'y'
if class_mode == "categorical":
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
self.theano_mode = theano_mode
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
updates += self.updates
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = theano.function(train_ins, train_loss, updates=updates,
allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy], updates=updates,
allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
def train_on_batch(self, X, y, accuracy=False, class_weight=None, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight)
ins = X + [y, sample_weight]
if accuracy:
return self._train_with_acc(*ins)
else:
return self._train(*ins)
def test_on_batch(self, X, y, accuracy=False, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, sample_weight=sample_weight)
ins = X + [y, sample_weight]
if accuracy:
return self._test_with_acc(*ins)
else:
return self._test(*ins)
def predict_on_batch(self, X):
ins = standardize_X(X)
return self._predict(*ins)
def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True, show_accuracy=False,
class_weight=None, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
val_f = None
val_ins = None
if validation_data or validation_split:
if show_accuracy:
val_f = self._test_with_acc
else:
val_f = self._test
if validation_data:
if len(validation_data) == 2:
X_val, y_val = validation_data
X_val = standardize_X(X_val)
y_val = standardize_y(y_val)
sample_weight_val = np.ones(y_val.shape[:-1] + (1,))
elif len(validation_data) == 3:
X_val, y_val, sample_weight_val = validation_data
X_val = standardize_X(X_val)
y_val = standardize_y(y_val)
sample_weight_val = standardize_weights(y_val, sample_weight=sample_weight_val)
else:
raise Exception("Invalid format for validation data; provide a tuple (X_val, y_val) or (X_val, y_val, sample_weight). \
X_val may be a numpy array or a list of numpy arrays depending on your model input.")
val_ins = X_val + [y_val, sample_weight_val]
elif 0 < validation_split < 1:
split_at = int(len(X[0]) * (1 - validation_split))
X, X_val = (slice_X(X, 0, split_at), slice_X(X, split_at))
y, y_val = (slice_X(y, 0, split_at), slice_X(y, split_at))
if sample_weight is not None:
sample_weight, sample_weight_val = (slice_X(sample_weight, 0, split_at), slice_X(sample_weight, split_at))
sample_weight_val = standardize_weights(y_val, sample_weight=sample_weight_val)
else:
sample_weight_val = np.ones(y_val.shape[:-1] + (1,))
val_ins = X_val + [y_val, sample_weight_val]
if show_accuracy:
f = self._train_with_acc
out_labels = ['loss', 'acc']
else:
f = self._train
out_labels = ['loss']
sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight)
ins = X + [y, sample_weight]
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins,
shuffle=shuffle, metrics=metrics)
def predict(self, X, batch_size=128, verbose=0):
X = standardize_X(X)
return self._predict_loop(self._predict, X, batch_size, verbose)[0]
def predict_proba(self, X, batch_size=128, verbose=1):
preds = self.predict(X, batch_size, verbose)
if preds.min() < 0 or preds.max() > 1:
warnings.warn("Network returning invalid probability values.")
return preds
def predict_classes(self, X, batch_size=128, verbose=1):
proba = self.predict(X, batch_size=batch_size, verbose=verbose)
if self.class_mode == "categorical":
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def evaluate(self, X, y, batch_size=128, show_accuracy=False, verbose=1, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, sample_weight=sample_weight)
ins = X + [y, sample_weight]
if show_accuracy:
f = self._test_with_acc
else:
f = self._test
outs = self._test_loop(f, ins, batch_size, verbose)
if show_accuracy:
return outs
else:
return outs[0]
def save_weights(self, filepath, overwrite=False):
# Save weights from all layers to HDF5
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? [y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
f.attrs['nb_layers'] = len(self.layers)
for k, l in enumerate(self.layers):
g = f.create_group('layer_{}'.format(k))
weights = l.get_weights()
g.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = g.create_dataset(param_name, param.shape, dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
'''
This method does not make use of Sequential.set_weights()
for backwards compatibility.
'''
# Loads weights from HDF5 file
import h5py
f = h5py.File(filepath)
for k in range(f.attrs['nb_layers']):
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
self.layers[k].set_weights(weights)
f.close()
class Graph(Model, containers.Graph):
def compile(self, optimizer, loss, theano_mode=None):
# loss is a dictionary mapping output name to loss functions
ys = []
ys_train = []
ys_test = []
weights = []
train_loss = 0.
test_loss = 0.
for output_name in self.output_order:
loss_fn = loss[output_name]
output = self.outputs[output_name]
y_train = output.get_output(True)
y_test = output.get_output(False)
y = T.zeros_like(y_test)
ys.append(y)
ys_train.append(y_train)
ys_test.append(y_test)
if hasattr(output, "get_output_mask"):
mask = output.get_output_mask()
else:
mask = None
weight = T.ones_like(y_test)
weights.append(weight)
weighted_loss = weighted_objective(objectives.get(loss_fn))
train_loss += weighted_loss(y, y_train, weight, mask)
test_loss += weighted_loss(y, y_test, weight, mask)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
ins = [self.inputs[name].input for name in self.input_order]
train_ins = ins + ys + weights
test_ins = ins + ys + weights
for r in self.regularizers:
train_loss = r(train_loss)
self.optimizer = optimizers.get(optimizer)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
updates += self.updates
self.theano_mode = theano_mode
self.loss = loss
self._train = theano.function(train_ins, train_loss, updates=updates,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(inputs=ins, outputs=ys_test,
allow_input_downcast=True, mode=theano_mode)
def train_on_batch(self, data, class_weight={}, sample_weight={}):
# data is a dictionary mapping output and input names to arrays
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name),
class_weight=class_weight.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
return self._train(*ins)
def test_on_batch(self, data, sample_weight={}):
# data is a dictionary mapping input names to arrays
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
return self._test(*ins)
def predict_on_batch(self, data):
# data is a dictionary mapping input names to arrays
ins = [data[name] for name in self.input_order]
return self._predict(*ins)
def fit(self, data, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True, class_weight={}, sample_weight={}):
X = [data[name] for name in self.input_order]
y = [standardize_y(data[name]) for name in self.output_order]
sample_weight_list = [standardize_weights(y[i],
sample_weight=sample_weight.get(self.output_order[i])) for i in range(len(self.output_order))]
class_weight_list = [class_weight.get(name) for name in self.output_order]
val_f = None
val_ins = None
if validation_data or validation_split:
val_f = self._test
if validation_data:
# can't use sample weights with validation data at this point
sample_weight = [standardize_weights(validation_data[name]) for name in self.output_order]
val_ins = [validation_data[name] for name in self.input_order] + [standardize_y(validation_data[name]) for name in self.output_order] + sample_weight
elif 0 < validation_split < 1:
split_at = int(len(X[0]) * (1 - validation_split))
X, X_val = (slice_X(X, 0, split_at), slice_X(X, split_at))
y, y_val = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weight_list, sample_weight_list_val = (slice_X(sample_weight_list, 0, split_at), slice_X(sample_weight_list, split_at))
val_ins = X_val + y_val + sample_weight_list_val
f = self._train
out_labels = ['loss']
metrics = ['loss', 'val_loss']
sample_weight_list = [standardize_weights(y[i],
sample_weight=sample_weight_list[i],
class_weight=class_weight_list[i]) for i in range(len(self.output_order))]
ins = X + y + sample_weight_list
history = self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins,
shuffle=shuffle, metrics=metrics)
return history
def evaluate(self, data, batch_size=128, verbose=0, sample_weight={}):
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
outs = self._test_loop(self._test, ins, batch_size, verbose)
return outs[0]
def predict(self, data, batch_size=128, verbose=0):
ins = [data[name] for name in self.input_order]
outs = self._predict_loop(self._predict, ins, batch_size, verbose)
return dict(zip(self.output_order, outs))
def save_weights(self, filepath, overwrite=False):
# Save weights from all layers to HDF5
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? [y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
g = f.create_group('graph')
weights = self.get_weights()
g.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = g.create_dataset(param_name, param.shape, dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
# Loads weights from HDF5 file
import h5py
f = h5py.File(filepath)
g = f['graph']
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
self.set_weights(weights)
f.close()
| |
# -*- coding:utf-8 -*-
import tkinter
from tkinter import *
from tkinter import ttk
import tkinter.messagebox
from PIL import Image, ImageTk
from digraph import draw
class GUI:
def __init__(self, automaton, word):
self.automaton = automaton
self.header_position = 1
self.word = word
self.win = Tk()
self.win.title("Turign Magic")
self.win.resizable(0, 0)
self.contador = 0
self.pf = Frame(
self.win, width=850, height=600
)
self.df = Frame(
self.pf, relief=GROOVE, borderwidth=1
)
self.df.place(
relx=0.01, rely=0.02, width=500, height=460
)
Label(self.pf, text='Digrafo').place(relx=0.02, rely=0.02, anchor=W)
self.asf = Frame(
self.pf, relief=GROOVE, borderwidth=1
)
self.asf.place(
relx=0.62, rely=0.02, width=300, height=580
)
Label(self.pf, text='Configuracion Automata').place(
relx=0.64, rely=0.02, anchor=W
)
self.imgPath = 'temp/blank.gif'
self.image = Image.open(self.imgPath)
self.photo = ImageTk.PhotoImage(self.image)
self.digraph_image = Label(self.pf, image=self.photo, bd=1)
self.digraph_image.image = self.photo
# == Componentes ==
# ----- STATE
self.new_state_string = StringVar()
self.new_state = ttk.Entry(
self.asf, width=17, textvariable=self.new_state_string, justify='center'
)
self.btn_new_state = Button(
self.asf, text="New State", command=self.add_new_state
)
self.list_state = Listbox(self.asf, height=4, width=30)
self.list_state.bind('<<ListboxSelect>>', self.select_init_state)
# ----- alphabet
self.new_character_string = StringVar()
self.new_character = ttk.Entry(
self.asf, width=14, textvariable=self.new_character_string, justify='center'
)
self.btn_new_character = Button(
self.asf, text="New Character", command=self.add_new_character
)
self.list_character = Listbox(self.asf, height=4, width=30)
# ----- transition
self.new_tran_orig = StringVar()
self.new_tran_read = StringVar()
self.new_tran_dest = StringVar()
self.new_orig = ttk.Entry(
self.asf, width=3, textvariable=self.new_tran_orig, justify='center'
)
self.new_read = ttk.Entry(
self.asf, width=3, textvariable=self.new_tran_read, justify='center'
)
self.new_dest = ttk.Entry(
self.asf, width=3, textvariable=self.new_tran_dest, justify='center'
)
self.btn_new_transition = Button(
self.asf, text="New Transition", command=self.add_new_transition
)
self.list_transition = Listbox(self.asf, height=4, width=30)
# ----- final
self.new_final_state_string = StringVar()
self.new_final_state = ttk.Entry(
self.asf, width=14, textvariable=self.new_final_state_string, justify='center'
)
self.btn_final_state = Button(
self.asf, text="New Final", command=self.add_final_state
)
self.list_final_state = Listbox(self.asf, height=4, width=30)
# -----
self.btn_play = Button(
self.asf, text="Play", command=self.play_read_string
)
self.btn_next = Button(
self.asf, text=">", command=self.next_read_string
)
self.alpha_string = StringVar()
self.alpha = ttk.Entry(
self.pf, width=17, textvariable=self.alpha_string, justify='center'
)
self.alpha.bind('<KeyRelease>', self.test_input)
self.list_run = Listbox(self.pf, height=6, width=35)
self.alpha.place(relx=0.01, rely=0.80)
self.list_run.place(relx=0.22, rely=0.80)
# == Posiciones ==
self.digraph_image.place(relx=0.05, rely=0.05)
self.new_state.place(relx=0.03, rely=0.03)
self.btn_new_state.place(relx=0.64, rely=0.03)
self.list_state.place(relx=0.03, rely=0.10)
self.new_character.place(relx=0.03, rely=0.25)
self.btn_new_character.place(relx=0.55, rely=0.25)
self.list_character.place(relx=0.03, rely=0.31)
self.new_final_state.place(relx=0.03, rely=0.46)
self.btn_final_state.place(relx=0.55, rely=0.46)
self.list_final_state.place(relx=0.03, rely=0.52)
self.new_orig.place(relx=0.03, rely=0.70)
self.new_read.place(relx=0.22, rely=0.70)
self.new_dest.place(relx=0.41, rely=0.70)
self.btn_new_transition.place(relx=0.55, rely=0.70)
self.list_transition.place(relx=0.03, rely=0.76)
self.btn_play.place(relx=0.6, rely=0.90)
self.btn_next.place(relx=0.8, rely=0.90)
self.pf.pack()
def test_input(self, evt):
alphabet = self.alpha_string.get()
result = True
word_temp = []
for x in alphabet:
if x not in self.automaton.alfabet:
result = False
else:
word_temp.append(x)
if result:
self.word = word_temp
else:
string_temp = ""
for y in self.word:
string_temp += y
self.alpha_string.set(string_temp)
def _msgBox(self):
tkMessageBox.showinfo('Python Message Info Box', 'A Python GUI created using tkinter:\nThe year is 2015.')
def add_new_state(self):
if self.automaton.add_state(self.new_state_string.get()):
self.list_state.insert('end', self.new_state_string.get())
def select_init_state(self, evt):
# Note here that Tkinter passes an event object to onselect()
w = evt.widget
index = int(w.curselection()[0])
value = w.get(index)
if self.automaton.set_init(value):
print('ok')
def add_new_character(self):
if self.automaton.add_character_alfabet(self.new_character_string.get()):
self.list_character.insert('end', self.new_character_string.get())
def add_new_transition(self):
result = self.automaton.add_transition(
self.new_tran_orig.get(), self.new_tran_read.get(), self.new_tran_dest.get()
)
if result:
self.list_transition.insert(
'end',
'State: %s, Lee: %s -> Cambia: %s ' % (
self.new_tran_orig.get(), self.new_tran_read.get(), self.new_tran_dest.get()
)
)
self.view_preview()
def add_final_state(self):
if self.automaton.add_final_state(self.new_final_state_string.get()):
self.list_final_state.insert('end', self.new_final_state_string.get())
def set_image_in_lbl_digraph(self, image):
self.imgPath = image
self.image = Image.open(self.imgPath)
self.photo = ImageTk.PhotoImage(self.image)
self.digraph_image = Label(self.pf, image=self.photo, bd=1)
self.digraph_image.place(relx=0.05, rely=0.05)
self.digraph_image.image = self.photo
def view_preview(self):
draw(
self.automaton.alfabet,
self.automaton.state,
self.automaton.init,
self.automaton.get_transition_table(),
self.automaton.final,
0
)
self.set_image_in_lbl_digraph("temp//blank.gif")
self.set_image_in_lbl_digraph("temp//Digraph_0.gif")
def play_read_string(self):
self.header_position = 1
self.select_init_state(self.automaton.init)
for x in self.word:
new_state_string = ''
for y in self.word[:self.header_position-1]:
new_state_string += y
new_state_string += '[%s]' % x
for y in self.word[self.header_position:]:
new_state_string += y
self.list_run.insert('end', new_state_string)
draw(
self.automaton.alfabet,
self.automaton.get_state_mar_current(),
self.automaton.init,
self.automaton.read_character(x),
self.automaton.final,
self.header_position
)
self.header_position += 1
self.set_image_in_lbl_digraph("temp//blank.gif")
self.set_image_in_lbl_digraph(
"temp//Digraph_" + str(self.header_position-1) + ".gif"
)
def next_read_string(self):
if (self.header_position - 1) >= len(self.word):
self.header_position = 1
self.select_init_state(self.automaton.init)
x = self.word[self.header_position-1]
new_state_string = ''
for y in self.word[:self.header_position-1]:
new_state_string += y
new_state_string += '[%s]' % x
for y in self.word[self.header_position:]:
new_state_string += y
self.list_run.insert('end', new_state_string)
draw(
self.automaton.alfabet,
self.automaton.get_state_mar_current(),
self.automaton.init,
self.automaton.read_character(x),
self.automaton.final,
self.header_position
)
self.set_image_in_lbl_digraph("temp//blank.gif")
self.set_image_in_lbl_digraph(
"temp//Digraph_" + str(self.header_position) + ".gif"
)
self.header_position += 1
def show(self):
self.win.mainloop()
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinderclient.v1 import client as cinder_client
from oslotest import mockpatch
from cloudferrylib.os.storage import cinder_storage
from cloudferrylib.utils import utils
from tests import test
FAKE_CONFIG = utils.ext_dict(
cloud=utils.ext_dict({'user': 'fake_user',
'password': 'fake_password',
'tenant': 'fake_tenant',
'host': '1.1.1.1'}),
migrate=utils.ext_dict({'speed_limit': '10MB',
'retry': '7',
'time_wait': '5',
'keep_volume_storage': False,
'keep_volume_snapshots': False}),
mysql=utils.ext_dict({'host': '1.1.1.1'}),
storage=utils.ext_dict({'backend': 'ceph',
'rbd_pool': 'volumes',
'volume_name_template': 'volume-',
'host': '1.1.1.1'}))
class CinderStorageTestCase(test.TestCase):
def setUp(self):
super(CinderStorageTestCase, self).setUp()
self.mock_client = mock.Mock()
self.cs_patch = mockpatch.PatchObject(cinder_client, 'Client',
new=self.mock_client)
self.useFixture(self.cs_patch)
self.identity_mock = mock.Mock()
self.compute_mock = mock.Mock()
self.fake_cloud = mock.Mock()
self.fake_cloud.mysql_connector = mock.Mock()
self.fake_cloud.resources = dict(identity=self.identity_mock,
compute=self.compute_mock)
self.cinder_client = cinder_storage.CinderStorage(FAKE_CONFIG,
self.fake_cloud)
self.fake_volume_0 = mock.Mock()
self.fake_volume_1 = mock.Mock()
self.mock_client().volumes.get.return_value = self.fake_volume_0
def test_get_cinder_client(self):
# To check self.mock_client call only from this test method
self.mock_client.reset_mock()
client = self.cinder_client.get_client(FAKE_CONFIG)
self.mock_client.assert_called_once_with('fake_user', 'fake_password',
'fake_tenant',
'http://1.1.1.1:35357/v2.0/')
self.assertEqual(self.mock_client(), client)
def test_get_volumes_list(self):
fake_volume_list = [self.fake_volume_0, self.fake_volume_1]
self.mock_client().volumes.list.return_value = fake_volume_list
volumes_list = self.cinder_client.get_volumes_list()
self.mock_client().volumes.list.assert_called_once_with(True, None)
self.assertEqual(volumes_list, fake_volume_list)
def test_create_volume(self):
self.mock_client().volumes.create.return_value = self.fake_volume_0
volume = self.cinder_client.create_volume(100500, name='fake')
self.mock_client().volumes.create.assert_called_once_with(100500,
name='fake')
self.assertEqual(self.fake_volume_0, volume)
def test_get_volume_by_id(self):
volume = self.cinder_client.get_volume_by_id('fake_id')
self.mock_client().volumes.get.assert_called_once_with('fake_id')
self.assertEqual(self.fake_volume_0, volume)
def test_delete_volume(self):
self.cinder_client.delete_volume('fake_id')
self.mock_client().volumes.get.assert_called_once_with('fake_id')
self.mock_client().volumes.delete.assert_called_once_with(
self.fake_volume_0)
def test_update_volume(self):
self.cinder_client.update_volume('fake_id', name='new_fake_name')
self.mock_client().volumes.get.assert_called_once_with('fake_id')
self.mock_client().volumes.update.assert_called_once_with(
self.fake_volume_0, name='new_fake_name')
def test_attach_volume(self):
self.mock_client().volumes.attach.return_value = (
'fake_response', 'fake_body')
response, body = self.cinder_client.attach_volume('fake_vol_id',
'fake_instance_id',
'/fake/mountpoint')
test_args = {'instance_uuid': 'fake_instance_id',
'mountpoint': '/fake/mountpoint',
'mode': 'rw'}
self.mock_client().volumes.get.assert_called_once_with('fake_vol_id')
self.mock_client().volumes.attach.assert_called_once_with(
self.fake_volume_0, **test_args)
self.assertEqual(('fake_response', 'fake_body'), (response, body))
def test_detach_volume(self):
self.mock_client().volumes.detach.return_value = (
'fake_response', 'fake_body')
response, body = self.cinder_client.detach_volume('fake_vl_id')
self.mock_client().volumes.detach.assert_called_once_with('fake_vl_id')
self.assertEqual(('fake_response', 'fake_body'), (response, body))
def test_upload_volume_to_image(self):
image = {'os-volume_upload_image': {'image_id': "fake_body"}}
self.mock_client().volumes.upload_to_image.return_value = (
'fake_response', image)
response, body = self.cinder_client.upload_volume_to_image(
'fake_vol_id', True, 'fake_image_name', 'fake_cont_format',
'fake_disk_format')
test_args = {'volume': self.fake_volume_0,
'container_format': 'fake_cont_format',
'force': True,
'image_name': 'fake_image_name',
'disk_format': 'fake_disk_format'}
self.mock_client().volumes.get.assert_called_once_with('fake_vol_id')
self.mock_client().volumes.upload_to_image.assert_called_once_with(
**test_args)
self.assertEqual(('fake_response', 'fake_body'), (response, body))
def test_read_info(self):
temp = self.cinder_client.get_volumes_list
self.cinder_client.get_volumes_list = mock.Mock()
vol1 = mock.Mock(id="id1",
size='size',
display_name='display_name',
display_description='display_description',
availability_zone='availability_zone',
volume_type='volume_type',
attachments=[{'device': 'device'}],
bootable='bootable')
self.cinder_client.get_volumes_list.return_value = [vol1]
res = self.cinder_client.read_info(id="id1")
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
self.assertEqual(vol1.id, res['volumes']['id1']['volume']['id'])
self.cinder_client.get_volumes_list = temp
def test_deploy(self):
vol = {'volume': {'size': 'size1',
'display_name': 'display_name1',
'display_description': 'display_description1',
'volume_type': 'volume_type1',
'availability_zone': 'availability_zone1'},
'meta': {'image': {'id': 'image_id1'}}}
info = {'volumes': {'id1': vol}}
create_volume = mock.Mock()
vol_return = mock.Mock(id="id2")
create_volume.return_value = vol_return
wait_for_status = mock.Mock()
finish = mock.Mock()
attach_vol_to_instance = mock.Mock()
self.cinder_client.create_volume = create_volume
self.cinder_client.wait_for_status = wait_for_status
self.cinder_client.finish = finish
self.cinder_client.attach_volume_to_instance = attach_vol_to_instance
res = self.cinder_client.deploy(info)
self.assertIn(vol_return.id, res)
def test_get_volume_path_iscsi(self):
fake_mysql_return = ('fake_ip:fake_port,3 iqn.2010-10.org.openstack:'
'volume-fake_volume_id fake_lun',)
self.fake_cloud.mysql_connector.execute().fetchone.return_value = (
fake_mysql_return)
volume_path = self.cinder_client.get_volume_path_iscsi('fake_vol_id')
expected_volume_path = (
'/dev/disk/by-path/ip-fake_ip:fake_port-iscsi-iqn.2010-10.org.'
'openstack:volume-fake_volume_id-lun-fake_lun')
self.assertEqual(expected_volume_path, volume_path)
self.fake_cloud.mysql_connector.execute.assert_called_with(
"SELECT provider_location FROM volumes WHERE id='fake_vol_id';")
def test_get_volume_path_iscsi_error(self):
fake_mysql_return = None
self.fake_cloud.mysql_connector.execute.return_value = (
fake_mysql_return)
expected_msg = ('There is no such raw in Cinder DB with the specified '
'volume_id=fake_vol_id')
try:
volume_path = self.cinder_client.get_volume_path_iscsi(
'fake_vol_id')
except Exception as e:
self.assertEqual(expected_msg, e.message)
self.fake_cloud.mysql_connector.execute.assert_called_once_with(
"SELECT provider_location FROM volumes WHERE id='fake_vol_id';")
self.assertRaises(Exception,
self.cinder_client.get_volume_path_iscsi,
'fake_vol_id')
| |
# -*- coding: utf-8 -*-
"""
Tests that the file header is properly handled or inferred
during parsing for all of the parsers defined in parsers.py
"""
from collections import namedtuple
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, lrange, u
class HeaderTests(object):
def test_read_with_bad_header(self):
errmsg = r"but only \d+ lines in file"
with tm.assert_raises_regex(ValueError, errmsg):
s = StringIO(',,')
self.read_csv(s, header=[10])
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_csv(StringIO(data), header=arg)
with pytest.raises(TypeError):
self.read_table(StringIO(data), header=arg)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
tm.assert_almost_equal(df_pref.values, expected)
tm.assert_index_equal(df_pref.columns,
Index(['Field0', 'Field1', 'Field2',
'Field3', 'Field4']))
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
assert list(df.columns) == ['A', 'B', 'C']
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1])
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1])
tm.assert_frame_equal(df, expected)
# INVALID OPTIONS
# names
pytest.raises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'])
# usecols
pytest.raises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'])
# non-numeric index_col
pytest.raises(ValueError, self.read_csv,
StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'])
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples(
[('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# to_csv, tuples
result = self.read_csv(StringIO(data), skiprows=3,
names=[('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')],
index_col=0)
tm.assert_frame_equal(df, result)
# to_csv, namedtuples
TestTuple = namedtuple('names', ['first', 'second'])
result = self.read_csv(
StringIO(data), skiprows=3, index_col=0,
names=[TestTuple('a', 'q'), TestTuple('a', 'r'),
TestTuple('a', 's'), TestTuple('b', 't'),
TestTuple('c', 'u'), TestTuple('c', 'v')])
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, tuples
result = self.read_csv(StringIO(data), skiprows=2,
names=[('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')],
index_col=0)
tm.assert_frame_equal(df, result)
# common, namedtuples
TestTuple = namedtuple('names', ['first', 'second'])
result = self.read_csv(
StringIO(data), skiprows=2, index_col=0,
names=[TestTuple('a', 'q'), TestTuple('a', 'r'),
TestTuple('a', 's'), TestTuple('b', 't'),
TestTuple('c', 'u'), TestTuple('c', 'v')])
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# common, no index_col, tuples
result = self.read_csv(StringIO(data), skiprows=2,
names=[('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')],
index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# common, no index_col, namedtuples
TestTuple = namedtuple('names', ['first', 'second'])
result = self.read_csv(
StringIO(data), skiprows=2, index_col=None,
names=[TestTuple('a', 'q'), TestTuple('a', 'r'),
TestTuple('a', 's'), TestTuple('b', 't'),
TestTuple('c', 'u'), TestTuple('c', 'v')])
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('r'), u('s'), u('t'),
u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array(
[[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('r'), u('s'), u('t'),
u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array(
[[3, 4, 5, 6], [9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')],
[u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2], [0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_read_only_header_no_rows(self):
# See gh-7773
expected = DataFrame(columns=['a', 'b', 'c'])
df = self.read_csv(StringIO('a,b,c'))
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO('a,b,c'), index_col=False)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
tm.assert_index_equal(df_pref.columns,
Index(['X0', 'X1', 'X2', 'X3', 'X4']))
tm.assert_index_equal(df.columns, Index(lrange(5)))
tm.assert_index_equal(df2.columns, Index(names))
def test_non_int_header(self):
# GH 16338
msg = 'header must be integer or list of integers'
data = """1,2\n3,4"""
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), sep=',', header=['a', 'b'])
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), sep=',', header='string_header')
def test_singleton_header(self):
# See GH #7757
data = """a,b,c\n0,1,2\n1,2,3"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]})
tm.assert_frame_equal(df, expected)
def test_mangles_multi_index(self):
# See GH 18062
data = """A,A,A,B\none,one,one,two\n0,40,34,0.1"""
df = self.read_csv(StringIO(data), header=[0, 1])
expected = DataFrame([[0, 40, 34, 0.1]],
columns=MultiIndex.from_tuples(
[('A', 'one'), ('A', 'one.1'),
('A', 'one.2'), ('B', 'two')]))
tm.assert_frame_equal(df, expected)
data = """A,A,A,B\none,one,one.1,two\n0,40,34,0.1"""
df = self.read_csv(StringIO(data), header=[0, 1])
expected = DataFrame([[0, 40, 34, 0.1]],
columns=MultiIndex.from_tuples(
[('A', 'one'), ('A', 'one.1'),
('A', 'one.1.1'), ('B', 'two')]))
tm.assert_frame_equal(df, expected)
data = """A,A,A,B,B\none,one,one.1,two,two\n0,40,34,0.1,0.1"""
df = self.read_csv(StringIO(data), header=[0, 1])
expected = DataFrame([[0, 40, 34, 0.1, 0.1]],
columns=MultiIndex.from_tuples(
[('A', 'one'), ('A', 'one.1'),
('A', 'one.1.1'), ('B', 'two'),
('B', 'two.1')]))
tm.assert_frame_equal(df, expected)
| |
from __future__ import unicode_literals
from time import gmtime
from calendar import timegm
from datetime import datetime
import pytz
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from elasticsearch import Elasticsearch
from website import settings
from website.search.elastic_search import requires_search
from util import generate_color, html_and_illegal_unicode_replace
share_es = Elasticsearch(
settings.SHARE_ELASTIC_URI,
request_timeout=settings.ELASTIC_TIMEOUT
)
@requires_search
def search(query, raw=False):
# Run the real query and get the results
results = share_es.search(index='share', doc_type=None, body=query)
return results if raw else {
'results': [hit['_source'] for hit in results['hits']['hits']],
'count': results['hits']['total'],
}
@requires_search
def count(query):
if query.get('from') is not None:
del query['from']
if query.get('size') is not None:
del query['size']
count = share_es.count(index='share', body=query)
return {
'results': [],
'count': count['count']
}
@requires_search
def providers():
provider_map = share_es.search(index='share_providers', doc_type=None, body={
'query': {
'match_all': {}
},
'size': 10000
})
return {
'providerMap': {
hit['_source']['short_name']: hit['_source'] for hit in provider_map['hits']['hits']
}
}
@requires_search
def stats(query=None):
query = query or {"query": {"match_all": {}}}
three_months_ago = timegm((datetime.now() + relativedelta(months=-3)).timetuple()) * 1000
query['aggs'] = {
"sources": {
"terms": {
"field": "_type",
"size": 0,
"min_doc_count": 0,
}
},
"doisMissing": {
"filter": {
"missing": {
"field": "id.doi"
}
},
"aggs": {
"sources": {
"terms": {
"field": "_type",
"size": 0
}
}
}
},
"dois": {
"filter": {
"exists": {
"field": "id.doi"
}
},
"aggs": {
"sources": {
"terms": {
"field": "_type",
"size": 0
}
}
}
},
"earlier_documents": {
"filter": {
"range": {
"dateUpdated": {
"lt": three_months_ago
}
}
},
"aggs": {
"sources": {
"terms": {
"field": "_type",
"size": 0,
"min_doc_count": 0
}
}
}
}
}
date_histogram_query = {
'query': {
'filtered': {
'query': query['query'],
'filter': {
'range': {
'dateUpdated': {
'gt': three_months_ago
}
}
}
}
}
}
date_histogram_query['aggs'] = {
"date_chunks": {
"terms": {
"field": "_type",
"size": 0,
"exclude": "of|and|or"
},
"aggs": {
"articles_over_time": {
"date_histogram": {
"field": "dateUpdated",
"interval": "week",
"min_doc_count": 0,
"extended_bounds": {
"min": three_months_ago,
"max": timegm(gmtime()) * 1000
}
}
}
}
}
}
results = share_es.search(index='share', body=query)
date_results = share_es.search(index='share', body=date_histogram_query)
results['aggregations']['date_chunks'] = date_results['aggregations']['date_chunks']
chart_results = data_for_charts(results)
return chart_results
def data_for_charts(elastic_results):
source_data = elastic_results['aggregations']['sources']['buckets']
for_charts = {}
## for the donut graph list of many lists, source and count
source_and_counts = [[item['key'], item['doc_count']] for item in source_data]
for_charts['shareDonutGraph'] = source_and_counts
r = generate_color()
stats = {}
colors = {}
for bucket in elastic_results['aggregations']['sources']['buckets']:
stats[bucket['key']] = {
'doc_count': bucket['doc_count'],
}
colors[bucket['key']] = r.next()
for bucket in elastic_results['aggregations']['earlier_documents']['sources']['buckets']:
stats[bucket['key']]['earlier_documents'] = bucket['doc_count']
default_buckets = []
for bucket in elastic_results['aggregations']['date_chunks']['buckets']:
default_buckets = bucket['articles_over_time']['buckets']
stats[bucket['key']]['articles_over_time'] = bucket['articles_over_time']['buckets']
max_len = 0
for key, value in stats.iteritems():
if not stats[key].get('earlier_documents'):
stats[key]['earlier_documents'] = 0
if not stats[key].get('articles_over_time'):
stats[key]['articles_over_time'] = [
{
'key_as_string': item['key_as_string'],
'key': item['key'],
'doc_count': 0
}
for item in default_buckets
]
if len(stats[key]['articles_over_time']) > max_len:
max_len = len(stats[key]['articles_over_time'])
names = ['x']
numbers = [['x']]
for date in stats[stats.keys()[0]]['articles_over_time']:
numbers[0].append(' ')
for key, value in stats.iteritems():
try:
names.append(key)
x = [item['doc_count'] for item in value['articles_over_time']]
if len(x) < max_len:
x += [0] * (max_len - len(x))
x[0] += stats[key].get('earlier_documents', 0)
numbers.append([key] + [sum(x[0:i + 1]) for i in range(len(x[0:]))])
except IndexError:
pass
date_totals = {
'date_numbers': numbers,
'group_names': names
}
if date_totals.get('date_numbers') == [[u'x']]:
for name in date_totals.get('group_names'):
date_totals.get('date_numbers').append([name, 0])
for_charts['date_totals'] = date_totals
all_data = {}
all_data['raw_aggregations'] = elastic_results['aggregations']
all_data['charts'] = {
'shareDonutGraph': {
'type': 'donut',
'columns': for_charts['shareDonutGraph'],
'colors': colors
},
'shareTimeGraph': {
'x': 'x',
'type': 'area-spline',
'columns': for_charts['date_totals']['date_numbers'],
'groups': [for_charts['date_totals']['group_names']],
'colors': colors
}
}
return all_data
def to_atom(result):
return {
'title': html_and_illegal_unicode_replace(result.get('title')) or 'No title provided.',
'summary': html_and_illegal_unicode_replace(result.get('description')) or 'No summary provided.',
'id': result['id']['url'],
'updated': get_date_updated(result),
'links': [
{'href': result['id']['url'], 'rel': 'alternate'}
],
'author': format_contributors_for_atom(result['contributors']),
'categories': [{"term": html_and_illegal_unicode_replace(tag)} for tag in result.get('tags')],
'published': parse(result.get('dateUpdated'))
}
def format_contributors_for_atom(contributors_list):
return [
{
'name': '{} {}'.format(
html_and_illegal_unicode_replace(entry['given']),
html_and_illegal_unicode_replace(entry['family'])
)
}
for entry in contributors_list
]
def get_date_updated(result):
try:
updated = pytz.utc.localize(parse(result.get('dateUpdated')))
except ValueError:
updated = parse(result.get('dateUpdated'))
return updated
| |
"""Visualization of TAFs"""
import datetime
# third party
import requests
import pandas as pd
import numpy as np
import matplotlib.patheffects as PathEffects
from matplotlib.patches import Rectangle
from metpy.units import units
from metpy.calc import wind_components
from pyiem.plot import figure
from pyiem.util import (
get_autoplot_context,
get_dbconn,
get_sqlalchemy_conn,
utc,
)
from pyiem.exceptions import NoDataFound
VIS = "visibility"
TEXTARGS = {
"fontsize": 12,
"color": "k",
"ha": "center",
"va": "center",
"zorder": 3,
}
PE = [PathEffects.withStroke(linewidth=5, foreground="white")]
def get_description():
"""Return a dict describing how to call this plotter"""
desc = {}
desc["defaults"] = {"_r": "t"}
desc["data"] = True
desc["cache"] = 600
desc[
"description"
] = """
This app generates infographics for Terminal Aerodome Forecasts (TAF).
You need not provide an exact valid timestamp for the TAF issuance, the
app will search backwards in time up to 24 hours to find the nearest
issuance stored in the database.
"""
desc["arguments"] = [
dict(
type="text",
default="KDSM",
name="station",
label="Select station to plot:",
),
dict(
type="datetime",
name="valid",
default=utc().strftime("%Y/%m/%d %H%M"),
label="TAF Issuance/Valid Timestamp (UTC Timezone):",
min="1995/01/01 0000",
),
]
return desc
def get_text(product_id):
"""get the raw text."""
text = "Text Unavailable, Sorry."
uri = f"https://mesonet.agron.iastate.edu/api/1/nwstext/{product_id}"
try:
req = requests.get(uri, timeout=5)
if req.status_code == 200:
text = req.content.decode("ascii", "ignore").replace("\001", "")
text = "\n".join(text.replace("\r", "").split("\n")[5:])
except Exception:
pass
return text
def taf_search(pgconn, station, valid):
"""Go look for a nearest in time TAF."""
cursor = pgconn.cursor()
cursor.execute(
"SELECT valid at time zone 'UTC' from taf "
"WHERE station = %s and valid > %s and "
"valid < %s ORDER by valid DESC",
(station, valid - datetime.timedelta(hours=24), valid),
)
if cursor.rowcount == 0:
return None
return cursor.fetchone()[0].replace(tzinfo=datetime.timezone.utc)
def compute_flight_condition(row):
"""What's our status."""
# TEMPO may not address sky or vis
if row["is_tempo"] and (not row["skyc"] or pd.isna(row[VIS])):
return None
level = 10000
if "OVC" in row["skyc"]:
level = row["skyl"][row["skyc"].index("OVC")]
if level == 10000 and "BKN" in row["skyc"]:
level = row["skyl"][row["skyc"].index("BKN")]
if row[VIS] > 5 and level > 3000:
return "VFR"
if level < 500 or row[VIS] < 1:
return "LIFR"
if level < 1000 or row[VIS] < 3:
return "IFR"
if level <= 3000 or row[VIS] <= 5:
return "MVFR"
return "UNK"
def plotter(fdict):
"""Go"""
ctx = get_autoplot_context(fdict, get_description())
valid = ctx["valid"].replace(tzinfo=datetime.timezone.utc)
pgconn = get_dbconn("asos")
def fetch(ts):
"""Getme data."""
with get_sqlalchemy_conn("asos") as conn:
df = pd.read_sql(
"SELECT f.*, t.product_id from taf t JOIN taf_forecast f on "
"(t.id = f.taf_id) WHERE t.station = %s and t.valid = %s "
"ORDER by f.valid ASC",
conn,
params=(ctx["station"], ts),
index_col="valid",
)
return df
df = fetch(valid)
if df.empty:
valid = taf_search(pgconn, ctx["station"], valid)
if valid is None:
raise NoDataFound("TAF data was not found!")
df = fetch(valid)
df = df.fillna(np.nan)
df["next_valid"] = (
df.reset_index().shift(-1)["valid"].values - df.index.values
)
product_id = df.iloc[0]["product_id"]
title = (
f"{ctx['station']} Terminal Aerodome Forecast by NWS "
f"{product_id[14:17]}\n"
f"Valid: {valid.strftime('%-d %b %Y %H:%M UTC')}"
)
fig = figure(title=title, apctx=ctx)
###
text = get_text(product_id)
res = fig.text(0.43, 0.01, text.strip(), va="bottom", fontsize=12)
bbox = res.get_window_extent(fig.canvas.get_renderer())
figbbox = fig.get_window_extent()
# one-two line TAFs cause the legend to go off-screen
yndc = max([bbox.y1 / figbbox.y1, 0.13])
# Create the main axes that will hold all our hackery
ax = fig.add_axes([0.08, yndc + 0.05, 0.9, 0.9 - yndc - 0.05])
fig.text(0.015, 0.3, "Cloud Coverage & Level", rotation=90)
df["u"], df["v"] = [
x.m
for x in wind_components(
units("knot") * df["sknt"].values,
units("degree") * df["drct"].values,
)
]
df["ws_u"], df["ws_v"] = [
x.m
for x in wind_components(
units("knot") * df["ws_sknt"].values,
units("degree") * df["ws_drct"].values,
)
]
# Initialize a fcond with string type
df["fcond"] = ""
sz = len(df.index)
clevels = []
clevelx = []
for valid0, row in df.iterrows():
valid = valid0
if not pd.isna(row["end_valid"]):
valid = valid + (row["end_valid"] - valid) / 2
# Between 1-3 plot the clouds
for j, skyc in enumerate(row["skyc"]):
level = min([3200, row["skyl"][j]]) / 1600 + 1
if j + 1 == len(row["skyc"]):
clevelx.append(valid)
clevels.append(level)
ax.text(valid, level, skyc, **TEXTARGS).set_path_effects(PE)
# At 0.9 present weather
delta = row["next_valid"]
rotation = 0
if not pd.isna(delta) and delta < datetime.timedelta(hours=2):
rotation = 45
ax.text(
valid,
0.9,
"\n".join(row["presentwx"]),
rotation=rotation,
**TEXTARGS,
).set_path_effects(PE)
# Plot wind as text string
if not pd.isna(row["ws_sknt"]):
ax.text(
valid,
3.8 + (0.5 if row["v"] > 0 else 0.5),
"WS%g" % (row["ws_sknt"],),
ha="center",
fontsize=TEXTARGS["fontsize"],
va="top" if row["v"] < 0 else "bottom",
color="r",
).set_path_effects(PE)
text = f"{row['sknt']:.0f}"
if not pd.isna(row["gust"]) and row["gust"] > 0:
text += f"G{row['gust']:.0f}"
if not pd.isna(row["sknt"]):
ax.text(
valid,
3.8 + (0.35 if row["v"] > 0 else 0.35),
f"{text}KT",
ha="center",
fontsize=TEXTARGS["fontsize"],
color=TEXTARGS["color"],
va="top" if row["v"] < 0 else "bottom",
).set_path_effects(PE)
df.at[valid0, "fcond"] = compute_flight_condition(row)
# At 3.25 plot the visibility
if not pd.isna(row[VIS]):
pltval = f"{row['visibility']:g}"
if row["visibility"] > 6:
pltval = "6+"
ax.text(valid, 3.25, pltval, **TEXTARGS).set_path_effects(PE)
if clevels:
ax.plot(clevelx, clevels, linestyle=":", zorder=2)
# Between 3.5-4.5 plot the wind arrows
ax.barbs(
df.index.values,
[3.8] * sz,
df["u"].values,
df["v"].values,
zorder=3,
color="k",
)
ax.barbs(
df.index.values,
[3.8] * sz,
df["ws_u"].values,
df["ws_v"].values,
zorder=4,
color="r",
)
padding = datetime.timedelta(minutes=60)
ax.set_xlim(df.index.min() - padding, df.index.max() + padding)
ax.set_yticks([0.9, 1.5, 2, 2.5, 3, 3.25, 3.8])
ax.set_yticklabels(
[
"WX",
"800ft",
"1600ft",
"2400ft",
"3200+ft",
"Vis (mile)",
"Wind (KT)",
]
)
ax.set_ylim(0.8, 4.5)
for y in [1, 3.125, 3.375]:
ax.axhline(
y,
color="blue",
lw=0.5,
)
colors = {
"UNK": "#EEEEEE",
"VFR": "green",
"MVFR": "blue",
"IFR": "red",
"LIFR": "magenta",
}
# Colorize things by flight condition
xs = df.index.to_list()
xs[0] = xs[0] - padding
xs.append(df.index.max() + padding)
previous = "VFR"
for i, val in enumerate(df["fcond"].values):
if val is None:
val = previous
previous = val
ax.axvspan(
xs[i],
xs[i + 1],
fc=colors.get(val, "white"),
ec="None",
alpha=0.5,
zorder=2,
)
rects = []
for _, item in colors.items():
rects.append(Rectangle((0, 0), 1, 1, fc=item, alpha=0.5))
ax.legend(
rects,
colors.keys(),
ncol=3,
loc="upper left",
fontsize=14,
bbox_to_anchor=(0.0, -0.04),
fancybox=True,
shadow=True,
)
# Need to get rid of timezones
df = df.reset_index()
for col in ["valid", "end_valid"]:
# some rows could be NaN
df[col] = df[~pd.isna(df[col])][col].apply(
lambda x: x.strftime("%Y-%m-%d %H:%M")
)
return fig, df.drop("next_valid", axis=1)
if __name__ == "__main__":
plotter(dict(station="KMCK", valid="2021-07-06 1606"))
| |
# -*- encoding: utf-8 -*-
from . import FixtureTest
class IndianShieldTest(FixtureTest):
def test_547e_innh(self):
import dsl
z, x, y = (16, 47127, 28829)
self.generate_fixtures(
dsl.is_in('IN', z, x, y),
# https://www.openstreetmap.org/way/28465477
dsl.way(28465477, dsl.tile_diagonal(z, x, y), {
'highway': u'trunk',
'ref': u'NH547E',
'ref:old': u'SH265',
'source': u'openstreetmap.org',
}),
dsl.relation(1, {
'network': u'IN:NH:MH',
'note': u'NH547E in MH',
'ref': u'NH547E',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[28465477]),
dsl.relation(2, {
'network': u'IN:NH',
'ref': u'NH547E',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[28465477]),
dsl.relation(3, {
'name': u'SH265',
'ref': u'SH265',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[28465477]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 28465477,
'network': 'IN:NH',
'shield_text': '547E',
'all_networks': ['IN:NH', 'IN:SH'],
'all_shield_texts': ['547E', '265'],
})
def test_161_innh(self):
import dsl
z, x, y = (16, 46811, 29105)
self.generate_fixtures(
dsl.is_in('IN', z, x, y),
# https://www.openstreetmap.org/way/22865906
dsl.way(22865906, dsl.tile_diagonal(z, x, y), {
'highway': u'trunk',
'ref': u'NH161;SH204',
'source': u'openstreetmap.org',
}),
dsl.relation(1, {
'network': u'IN:NH:MH',
'note': u'NH161 in MH',
'ref': u'NH161',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[22865906]),
dsl.relation(2, {
'name': u'National Highway 161',
'network': u'IN:NH',
'ref': u'NH161',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[22865906]),
dsl.relation(3, {
'name': u'SH204',
'ref': u'SH204',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[22865906]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 22865906,
'network': 'IN:NH',
'shield_text': '161',
'all_networks': ['IN:NH', 'IN:SH'],
'all_shield_texts': ['161', '204'],
})
def test_6a_insh(self):
import dsl
z, x, y = (16, 47119, 30547)
self.generate_fixtures(
dsl.is_in('IN', z, x, y),
# https://www.openstreetmap.org/way/22832164
dsl.way(22832164, dsl.tile_diagonal(z, x, y), {
'AND:importance_level': u'5',
'AND_a_nosr_r': u'15061209',
'highway': u'primary',
'name': u'Tiruvannamalai - Harur State Highway',
'ref': u'SH6A',
'source': u'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 22832164,
'network': u'IN:SH',
'shield_text': u'6A',
})
def test_54_inmdr(self):
import dsl
z, x, y = (16, 46579, 26841)
self.generate_fixtures(
dsl.is_in('IN', z, x, y),
# https://www.openstreetmap.org/way/11760010
dsl.way(11760010, dsl.tile_diagonal(z, x, y), {
'highway': u'secondary',
'name': u'Rahon Road',
'ref': u'MDR54',
'source': u'openstreetmap.org',
}),
dsl.relation(1, {
'network': u'IN:SH:PB',
'ref': u'MDR54',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[11760010]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 11760010,
'network': u'IN:MDR',
'shield_text': u'54',
})
def test_none_inmdr(self):
import dsl
z, x, y = (16, 48428, 27975)
self.generate_fixtures(
dsl.is_in('IN', z, x, y),
# https://www.openstreetmap.org/way/22828011
dsl.way(22828011, dsl.tile_diagonal(z, x, y), {
'highway': u'secondary',
'ref': u'MDR',
'source': u'openstreetmap.org',
'surface': u'paved',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 22828011,
'network': u'IN:MDR',
'shield_text': type(None),
})
def test_inroads(self):
import dsl
z, x, y = (16, 46765, 26893)
self.generate_fixtures(
dsl.is_in('IN', z, x, y),
# https://www.openstreetmap.org/way/169248989
dsl.way(169248989, dsl.tile_diagonal(z, x, y), {
'highway': u'secondary',
'lanes': u'2',
'maxspeed': u'50',
'name': u'Panchkula - Nahan Road',
'ref': u'MDR118',
'source': u'openstreetmap.org',
}),
dsl.relation(1, {
'name': u'Haryana Major District Road 118',
'network': u'IN:SH:HR',
'ref': u'MDR118',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[169248989]),
dsl.relation(2, {
'name': u'Panchkula - Nahan Road',
'name:en': u'Panchkula - Nahan Road',
'network': u'IN-roads',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
}, ways=[169248989]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 169248989,
'network': 'IN:MDR',
'shield_text': '118',
'all_networks': ['IN:MDR'],
'all_shield_texts': ['118'],
})
def test_orr_innh_chennai(self):
import dsl
z, x, y = (16, 47346, 30371)
self.generate_fixtures(
dsl.is_in('IN', z, x, y),
# https://www.openstreetmap.org/way/26807719
dsl.way(26807719, dsl.tile_diagonal(z, x, y), {
'highway': u'motorway',
'lanes': u'3',
'layer': u'1',
'maxspeed': u'100',
'motorroad': u'yes',
'name': u'Outer Ring Road',
'oneway': u'yes',
'ref': u'ORR',
'source': u'openstreetmap.org',
'source:lanes': u'DigitalGlobeStandard',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 26807719,
'network': u'IN:NH',
'shield_text': u'ORR',
})
def test_orr_innh_hyderabad(self):
import dsl
z, x, y = (16, 47010, 29526)
self.generate_fixtures(
dsl.is_in('IN', z, x, y),
# https://www.openstreetmap.org/way/520309418
dsl.way(520309418, dsl.tile_diagonal(z, x, y), {
'bicycle': u'no',
'foot': u'no',
'highway': u'motorway',
'horse': u'no',
'lanes': u'4',
'maxspeed': u'120',
'motor_vehicle': u'designated',
'motorcycle': u'no',
'name': u'Outer Ring Road',
'official_name': u'Nehru Outer Ring Road',
'oneway': u'yes',
'ref': u'ORR',
'smoothness': u'excellent',
'source': u'openstreetmap.org',
'start_date': u'2011-08-14',
'surface': u'asphalt',
}),
dsl.relation(1, {
'name': u'Nehru Outer Ring Road',
'note': u'see other relation',
'operator': u'HMDA',
'ref': u'ORR',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
'wikidata': u'Q7112004',
}, ways=[520309418]),
dsl.relation(2, {
'highway': u'motorway',
'name': u'Outer Ring Road',
'official_name': u'Nehru Outer Ring Road',
'operator': u'HMDA',
'ref': u'ORR',
'route': u'road',
'source': u'openstreetmap.org',
'type': u'route',
'wikidata': u'Q7112004',
'wikipedia': u'en:Outer Ring Road, Hyderabad',
}, ways=[520309418]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 520309418,
'network': u'IN:NH',
'shield_text': u'ORR',
})
| |
import numpy as np
from shapely.geometry.polygon import Polygon
import datetime
import netCDF4 as nc
import itertools
import geojson
from shapely.ops import cascaded_union
from openclimategis.util.helpers import get_temp_path
from openclimategis.util.toshp import OpenClimateShp
from shapely.geometry.multipolygon import MultiPolygon, MultiPolygonAdapter
from shapely import prepared, wkt
from shapely.geometry.geo import asShape
class OcgDataset(object):
"""
Wraps and netCDF4-python Dataset object providing extraction methods by
spatial and temporal queries.
dataset -- netCDF4-python Dataset object
**kwds -- arguments for the names of spatial and temporal dimensions.
rowbnds_name
colbnds_name
time_name
time_units
calendar
"""
def __init__(self,dataset,**kwds):
self.dataset = dataset
# self.polygon = kwds.get('polygon')
# self.temporal = kwds.get('temporal')
# self.row_name = kwds.get('row_name') or 'latitude'
# self.col_name = kwds.get('col_name') or 'longitude'
## extract the names of the spatiotemporal variables/dimensions from
## the keyword arguments.
self.rowbnds_name = kwds.get('rowbnds_name') or 'bounds_latitude'
self.colbnds_name = kwds.get('colbnds_name') or 'bounds_longitude'
self.time_name = kwds.get('time_name') or 'time'
self.time_units = kwds.get('time_units') or 'days since 1950-01-01 00:00:00'
self.calendar = kwds.get('calendar') or 'proleptic_gregorian'
# self.clip = kwds.get('clip') or False
# self.dissolve = kwds.get('dissolve') or False
# self.row = self.dataset.variables[self.row_name][:]
# self.col = self.dataset.variables[self.col_name][:]
## extract the row and column bounds from the dataset
self.row_bnds = self.dataset.variables[self.rowbnds_name][:]
self.col_bnds = self.dataset.variables[self.colbnds_name][:]
## convert the time vector to datetime objects
self.timevec = nc.netcdftime.num2date(self.dataset.variables[self.time_name][:],
self.time_units,
self.calendar)
## these are base numpy arrays used by spatial operations.
## four numpy arrays one for each bounding coordinate of a polygon
self.min_col,self.min_row = np.meshgrid(self.col_bnds[:,0],self.row_bnds[:,0])
self.max_col,self.max_row = np.meshgrid(self.col_bnds[:,1],self.row_bnds[:,1])
## these are the original indices of the row and columns. they are
## referenced after the spatial subset to retrieve data from the dataset
self.real_col,self.real_row = np.meshgrid(np.arange(0,len(self.col_bnds)),
np.arange(0,len(self.row_bnds)))
def _itr_array_(self,a):
"a -- 2-d ndarray"
ix = a.shape[0]
jx = a.shape[1]
for ii,jj in itertools.product(xrange(ix),xrange(jx)):
yield ii,jj
def _contains_(self,grid,lower,upper):
s1 = grid > lower
s2 = grid < upper
return(s1*s2)
def _set_overlay_(self,polygon=None,clip=False):
"""
Perform spatial operations.
polygon=None -- shapely polygon object
clip=False -- set to True to perform an intersection
"""
print('overlay...')
## holds polygon objects
self._igrid = np.empty(self.min_row.shape,dtype=object)
## holds weights for area weighting in the case of a dissolve
self._weights = np.zeros(self.min_row.shape)
## initial subsetting to avoid iterating over all polygons unless abso-
## lutely necessary
if polygon is not None:
emin_col,emin_row,emax_col,emax_row = polygon.envelope.bounds
smin_col = self._contains_(self.min_col,emin_col,emax_col)
smax_col = self._contains_(self.max_col,emin_col,emax_col)
smin_row = self._contains_(self.min_row,emin_row,emax_row)
smax_row = self._contains_(self.max_row,emin_row,emax_row)
include = np.any((smin_col,smax_col),axis=0)*np.any((smin_row,smax_row),axis=0)
else:
include = np.empty(self.min_row.shape,dtype=bool)
include[:,:] = True
# print('constructing grid...')
# ## construct the subset of polygon geometries
# vfunc = np.vectorize(self._make_poly_array_)
# self._igrid = vfunc(include,
# self.min_row,
# self.min_col,
# self.max_row,
# self.max_col,
# polygon)
#
# ## calculate the areas for potential weighting
# print('calculating area...')
# def _area(x):
# if x != None:
# return(x.area)
# else:
# return(0.0)
# vfunc_area = np.vectorize(_area,otypes=[np.float])
# preareas = vfunc_area(self._igrid)
#
# ## if we are clipping the data, modify the geometries and record the weights
# if clip and polygon:
# print('clipping...')
## polys = []
## for p in self._igrid.reshape(-1):
## polys.append(self._intersection_(polygon,p))
# vfunc = np.vectorize(self._intersection_)
# self._igrid = vfunc(polygon,self._igrid)
#
# ## calculate weights following intersection
# areas = vfunc_area(self._igrid)
# def _weight(x,y):
# if y == 0:
# return(0.0)
# else:
# return(x/y)
# self._weights=np.vectorize(_weight)(areas,preareas)
#
# ## set the mask
# self._mask = self._weights > 0
#
# print('overlay done.')
## loop for each spatial grid element
if polygon:
# prepared_polygon = polygon
prepared_polygon = prepared.prep(polygon)
for ii,jj in self._itr_array_(include):
if not include[ii,jj]: continue
## create the polygon
g = self._make_poly_((self.min_row[ii,jj],self.max_row[ii,jj]),
(self.min_col[ii,jj],self.max_col[ii,jj]))
## add the polygon if it intersects the aoi of if all data is being
## returned.
if polygon:
if not prepared_polygon.intersects(g): continue
# if g.intersects(polygon) or polygon is None:
## get the area before the intersection
prearea = g.area
## full intersection in the case of a clip and an aoi is passed
# if g.overlaps(polygon) and clip is True and polygon is not None:
if clip is True and polygon is not None:
ng = g.intersection(polygon)
## otherwise, just keep the geometry
else:
ng = g
## calculate the weight
w = ng.area/prearea
## a polygon can have a true intersects but actually not overlap
## i.e. shares a border.
if w > 0:
self._igrid[ii,jj] = ng
self._weights[ii,jj] = w
## the mask is used as a subset
self._mask = self._weights > 0
# self._weights = self._weights/self._weights.sum()
def _make_poly_(self,rtup,ctup):
"""
rtup = (row min, row max)
ctup = (col min, col max)
"""
return Polygon(((ctup[0],rtup[0]),
(ctup[0],rtup[1]),
(ctup[1],rtup[1]),
(ctup[1],rtup[0])))
@staticmethod
def _make_poly_array_(include,min_row,min_col,max_row,max_col,polygon=None):
ret = None
if include:
poly = Polygon(((min_col,min_row),
(max_col,min_row),
(max_col,max_row),
(min_col,max_row),
(min_col,min_row)))
if polygon != None:
if polygon.intersects(poly):
ret = poly
else:
ret = poly
return(ret)
@staticmethod
def _intersection_(polygon,target):
ret = None
if target != None:
ppp = target.intersection(polygon)
if not ppp.is_empty:
ret = ppp
return(ret)
def _get_numpy_data_(self,var_name,polygon=None,time_range=None,clip=False):
"""
var_name -- NC variable to extract from
polygon=None -- shapely polygon object
time_range=None -- [lower datetime, upper datetime]
clip=False -- set to True to perform a full intersection
"""
print('getting numpy data...')
## perform the spatial operations
self._set_overlay_(polygon=polygon,clip=clip)
def _u(arg):
"Pulls unique values and generates an evenly spaced array."
un = np.unique(arg)
return(np.arange(un.min(),un.max()+1))
def _sub(arg):
"Subset an array."
return arg[self._idxrow.min():self._idxrow.max()+1,
self._idxcol.min():self._idxcol.max()+1]
## get the time indices
if time_range is not None:
self._idxtime = np.arange(
0,
len(self.timevec))[(self.timevec>=time_range[0])*
(self.timevec<=time_range[1])]
else:
self._idxtime = np.arange(0,len(self.timevec))
## reference the original (world) coordinates of the netCDF when selecting
## the spatial subset.
self._idxrow = _u(self.real_row[self._mask])
self._idxcol = _u(self.real_col[self._mask])
## subset our reference arrays in a similar manner
self._mask = _sub(self._mask)
self._weights = _sub(self._weights)
self._igrid = _sub(self._igrid)
## hit the dataset and extract the block
npd = self.dataset.variables[var_name][self._idxtime,self._idxrow,self._idxcol]
## add in an extra dummy dimension in the case of one time layer
if len(npd.shape) == 2:
npd = npd.reshape(1,npd.shape[0],npd.shape[1])
print('numpy extraction done.')
return(npd)
def _is_masked_(self,arg):
"Ensures proper formating of masked data."
if isinstance(arg,np.ma.core.MaskedConstant):
return None
else:
return arg
def extract_elements(self,*args,**kwds):
"""
Merges the geometries and extracted attributes into a GeoJson-like dictionary
list.
var_name -- NC variable to extract from
dissolve=False -- set to True to merge geometries and calculate an
area-weighted average
polygon=None -- shapely polygon object
time_range=None -- [lower datetime, upper datetime]
clip=False -- set to True to perform a full intersection
"""
print('extracting elements...')
## dissolve argument is unique to extract_elements
if 'dissolve' in kwds:
dissolve = kwds.pop('dissolve')
else:
dissolve = False
## pull the variable name from the arguments
var_name = args[0]
## extract numpy data from the nc file
npd = self._get_numpy_data_(*args,**kwds)
## will hold feature dictionaries
features = []
## the unique identified iterator
ids = self._itr_id_()
if dissolve:
## one feature is created for each unique time
for kk in range(len(self._idxtime)):
## check if this is the first iteration. approach assumes that
## masked values are homogenous through the time layers. this
## avoids multiple union operations on the geometries. i.e.
## time 1 = masked, time 2 = masked, time 3 = masked
## vs.
## time 1 = 0.5, time 2 = masked, time 3 = 0.46
if kk == 0:
## on the first iteration:
## 1. make the unioned geometry
## 2. weight the data according to area
## reference layer for the masked data
lyr = npd[kk,:,:]
## select values with spatial overlap and not masked
if hasattr(lyr,'mask'):
select = self._mask*np.invert(lyr.mask)
else:
select = self._mask
## select those geometries
geoms = self._igrid[select]
## union the geometries
unioned = cascaded_union([p for p in geoms])
## select the weight subset and normalize to unity
sub_weights = self._weights*select
self._weights = sub_weights/sub_weights.sum()
## apply the weighting
weighted = npd*self._weights
## generate the feature
feature = dict(
id=ids.next(),
geometry=unioned,
properties=dict({var_name:float(weighted[kk,:,:].sum()),
'timestamp':self.timevec[self._idxtime[kk]]}))
features.append(feature)
else:
## loop for each feature. no dissolving.
for ii,jj in self._itr_array_(self._mask):
## if the data is included, add the feature
if self._mask[ii,jj] == True:
## extract the data and convert any mask values
data = [self._is_masked_(da) for da in npd[:,ii,jj]]
for kk in range(len(data)):
## do not add the feature if the value is a NoneType
if data[kk] == None: continue
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
properties=dict({var_name:float(data[kk]),
'timestamp':self.timevec[self._idxtime[kk]]}))
features.append(feature)
print('extraction complete.')
return(features)
def _itr_id_(self,start=1):
while True:
try:
yield start
finally:
start += 1
def as_geojson(elements):
features = []
for e in elements:
e['properties']['timestamp'] = str(e['properties']['timestamp'])
features.append(geojson.Feature(**e))
fc = geojson.FeatureCollection(features)
return(geojson.dumps(fc))
def as_shp(elements,path=None):
if path is None:
path = get_temp_path(suffix='.shp')
ocs = OpenClimateShp(path,elements)
ocs.write()
return(path)
def multipolygon_operation(dataset,var,polygons,time_range=None,clip=None,dissolve=None,ocg_kwds={}):
elements = []
ncp = OcgDataset(dataset,**ocg_kwds)
for polygon in polygons:
# if ii != 2: continue
# print(ii)
# ncp = OcgDataset(dataset)
elements += ncp.extract_elements(var,
polygon=polygon,
time_range=time_range,
clip=clip,
dissolve=dissolve)
return(elements)
if __name__ == '__main__':
#NC = '/home/bkoziol/git/OpenClimateGIS/bin/climate_data/wcrp_cmip3/pcmdi.ipcc4.bccr_bcm2_0.1pctto2x.run1.monthly.cl_A1_1.nc'
NC = '/home/bkoziol/git/OpenClimateGIS/bin/climate_data/maurer/bccr_bcm2_0.1.sresa1b.monthly.Prcp.1950.nc'
## all
# POLYINT = Polygon(((-99,39),(-94,38),(-94,40),(-100,39)))
## great lakes
# POLYINT = Polygon(((-90.35,40.55),(-83,43),(-80.80,49.87),(-90.35,49.87)))
## return all data
# POLYINT = None
## two areas
# POLYINT = [wkt.loads('POLYGON ((-85.324076923076916 44.028020242914977,-84.280765182186229 44.16008502024291,-84.003429149797569 43.301663967611333,-83.607234817813762 42.91867611336032,-84.227939271255053 42.060255060728736,-84.941089068825903 41.307485829959511,-85.931574898785414 41.624441295546553,-85.588206477732783 43.011121457489871,-85.324076923076916 44.028020242914977))'),
# wkt.loads('POLYGON ((-89.24640080971659 46.061817813765174,-88.942651821862341 46.378773279352224,-88.454012145748976 46.431599190283393,-87.952165991902831 46.11464372469635,-88.163469635627521 45.190190283400803,-88.889825910931165 44.503453441295541,-88.770967611336033 43.552587044534405,-88.942651821862341 42.786611336032379,-89.774659919028338 42.760198380566798,-90.038789473684204 43.777097165991897,-89.735040485829956 45.097744939271251,-89.24640080971659 46.061817813765174))')]
## watersheds
path = '/home/bkoziol/git/OpenClimateGIS/bin/geojson/watersheds_4326.geojson'
# select = ['HURON']
select = []
with open(path,'r') as f:
data = ''.join(f.readlines())
# data2 = f.read()
gj = geojson.loads(data)
POLYINT = []
for feature in gj['features']:
if select:
prop = feature['properties']
if prop['HUCNAME'] in select:
pass
else:
continue
geom = asShape(feature['geometry'])
if not isinstance(geom,MultiPolygonAdapter):
geom = [geom]
for polygon in geom:
POLYINT.append(polygon)
TEMPORAL = [datetime.datetime(1950,2,1),datetime.datetime(1950,4,30)]
# TEMPORAL = [datetime.datetime(1950,2,1),datetime.datetime(1950,3,1)]
DISSOLVE = True
CLIP = True
VAR = 'Prcp'
dataset = nc.Dataset(NC,'r')
if type(POLYINT) not in (list,tuple): POLYINT = [POLYINT]
elements = multipolygon_operation(dataset,
VAR,
POLYINT,
time_range=TEMPORAL,
clip=CLIP,
dissolve=DISSOLVE)
# ncp = OcgDataset(dataset)
# ncp._set_overlay_(POLYINT)
# npd = ncp._get_numpy_data_(VAR,POLYINT,TEMPORAL)
# elements = ncp.extract_elements(VAR,polygon=POLYINT,time_range=TEMPORAL,clip=CLIP,dissolve=DISSOLVE)
# gj = ncp.as_geojson(elements)
# out = as_shp(elements)
out = as_geojson(elements)
# print(out)
with open('/tmp/out','w') as f:
f.write(out)
| |
""" Class to organize QA for a full DESI production run
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import glob, os
from desispec.io import get_exposures
from desispec.io import get_files
from desispec.io import read_frame
from desispec.log import get_logger
log = get_logger()
class QA_Prod(object):
def __init__(self, specprod_dir):
""" Class to organize and execute QA for a DESI production
Args:
specprod_dir(str): Path containing the exposures/ directory to use. If the value
is None, then the value of :func:`specprod_root` is used instead.
Notes:
Attributes:
qa_exps : list
List of QA_Exposure classes, one per exposure in production
data : dict
"""
self.specprod_dir = specprod_dir
tmp = specprod_dir.split('/')
self.prod_name = tmp[-1] if (len(tmp[-1]) > 0) else tmp[-2]
self.qa_exps = []
#
self.data = {}
def get_qa_array(self, qatype, metric, nights='all', channels='all'):
""" Generate an array of QA values from .data
Args:
qatype: str
FIBERFLAT, SKYSUB
metric: str
nights: str or list of str, optional
channels: str or list of str, optional
'b', 'r', 'z'
Returns:
array: ndarray
ne_dict: dict
dict of nights and exposures contributing to the array
"""
import pdb
out_list = []
ne_dict = {}
# Nights
for night in self.data.keys():
if (night not in nights) and (nights != 'all'):
continue
# Exposures
for expid in self.data[night].keys():
# Cameras
for camera in self.data[night][expid].keys():
if camera == 'flavor':
continue
if (camera[0] not in channels) and (channels != 'all'):
continue
# Grab
try:
val = self.data[night][expid][camera][qatype]['QA'][metric]
except KeyError: # Each exposure has limited qatype
pass
except TypeError:
pdb.set_trace()
else:
if isinstance(val, (list,tuple)):
out_list.append(val[0])
else:
out_list.append(val)
# dict
if night not in ne_dict.keys():
ne_dict[night] = []
if expid not in ne_dict[night]:
ne_dict[night].append(expid)
# Return
return np.array(out_list), ne_dict
def load_data(self):
""" Load QA data from disk
"""
from desispec.io.qa import load_qa_prod
#
inroot = self.specprod_dir+'/'+self.prod_name+'_qa'
self.data = load_qa_prod(inroot)
def make_frameqa(self, make_plots=False, clobber=True):
""" Work through the Production and make QA for all frames
Parameters:
make_plots: bool, optional
Remake the plots too?
clobber: bool, optional
Returns:
"""
# imports
from desispec.io import meta
from desispec.io.qa import load_qa_frame, write_qa_frame
from desispec.io.fiberflat import read_fiberflat
from desispec.io.sky import read_sky
from desispec.io.fluxcalibration import read_flux_calibration
from desispec.qa import qa_plots
from desispec.io.fluxcalibration import read_stdstar_models
# Loop on nights
path_nights = glob.glob(self.specprod_dir+'/exposures/*')
nights = [ipathn[ipathn.rfind('/')+1:] for ipathn in path_nights]
for night in nights:
for exposure in get_exposures(night, specprod_dir = self.specprod_dir):
# Object only??
frames_dict = get_files(filetype = str('frame'), night = night,
expid = exposure, specprod_dir = self.specprod_dir)
for camera,frame_fil in frames_dict.items():
# Load frame
frame = read_frame(frame_fil)
spectro = int(frame.meta['CAMERA'][-1])
if frame.meta['FLAVOR'] in ['flat','arc']:
qatype = 'qa_calib'
else:
qatype = 'qa_data'
qafile = meta.findfile(qatype, night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
if (not clobber) & os.path.isfile(qafile):
log.info("qafile={:s} exists. Not over-writing. Consider clobber=True".format(qafile))
continue
# Load
qaframe = load_qa_frame(qafile, frame, flavor=frame.meta['FLAVOR'])
# Flat QA
if frame.meta['FLAVOR'] in ['flat']:
fiberflat_fil = meta.findfile('fiberflat', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
fiberflat = read_fiberflat(fiberflat_fil)
qaframe.run_qa('FIBERFLAT', (frame, fiberflat), clobber=clobber)
if make_plots:
# Do it
qafig = meta.findfile('qa_flat_fig', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
qa_plots.frame_fiberflat(qafig, qaframe, frame, fiberflat)
# SkySub QA
if qatype == 'qa_data':
sky_fil = meta.findfile('sky', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
skymodel = read_sky(sky_fil)
qaframe.run_qa('SKYSUB', (frame, skymodel))
if make_plots:
qafig = meta.findfile('qa_sky_fig', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
qa_plots.frame_skyres(qafig, frame, skymodel, qaframe)
# FluxCalib QA
if qatype == 'qa_data':
# Standard stars
stdstar_fil = meta.findfile('stdstars', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir,
spectrograph=spectro)
model_tuple=read_stdstar_models(stdstar_fil)
flux_fil = meta.findfile('calib', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
fluxcalib = read_flux_calibration(flux_fil)
qaframe.run_qa('FLUXCALIB', (frame, fluxcalib, model_tuple))#, indiv_stars))
if make_plots:
qafig = meta.findfile('qa_flux_fig', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
qa_plots.frame_fluxcalib(qafig, qaframe, frame, fluxcalib, model_tuple)
# Write
write_qa_frame(qafile, qaframe)
#pdb.set_trace()
def slurp(self, make_frameqa=False, remove=True, **kwargs):
""" Slurp all the individual QA files into one master QA file
Args:
make_frameqa: bool, optional
Regenerate the individual QA files (at the frame level first)
remove: bool, optional
Remove
Returns:
"""
from desispec.io import meta
from desispec.qa import QA_Exposure
from desispec.io import write_qa_prod
import pdb
# Remake?
if make_frameqa:
self.make_frameqa(**kwargs)
# Loop on nights
path_nights = glob.glob(self.specprod_dir+'/exposures/*')
nights = [ipathn[ipathn.rfind('/')+1:] for ipathn in path_nights]
# Reset
log.info("Resetting qa_exps in qa_prod")
self.qa_exps = []
# Loop
for night in nights:
# Loop on exposures
for exposure in get_exposures(night, specprod_dir = self.specprod_dir):
frames_dict = get_files(filetype = str('frame'), night = night,
expid = exposure, specprod_dir = self.specprod_dir)
if len(frames_dict.keys()) == 0:
continue
# Load any frame (for the type)
key = frames_dict.keys()[0]
frame_fil = frames_dict[key]
frame = read_frame(frame_fil)
qa_exp = QA_Exposure(exposure, night, frame.meta['FLAVOR'],
specprod_dir=self.specprod_dir, remove=remove)
# Append
self.qa_exps.append(qa_exp)
# Write
outroot = self.specprod_dir+'/'+self.prod_name+'_qa'
write_qa_prod(outroot, self)
def __repr__(self):
""" Print formatting
"""
return ('{:s}: specprod_dir={:s}'.format(self.__class__.__name__, self.specprod_dir))
| |
"""
**run_eval.py**
Least squares anomaly evaluation on static data. After running experiments,
use `generate_latex.py` to create a table of results.
This is a refactored and updated version of
the script in `evaluate_lsanomaly.zip`
(see https://cit.mak.ac.ug/staff/jquinn/software/lsanomaly.html).
**usage**: run_eval.py [-h] --data-dir DATA_DIR --output-json JSON_FILE
Perform evaluation of LSAnomaly on downloaded data-sets. 5-fold cross
validation is performed.
**Arguments**
-h, --help
show this help message and exit
--data-dir DATA_DIR, -d DATA_DIR
directory of stored data-sets in `libsvm` format
--params YML_PARAMS, -p YML_PARAMS
YAML file with evaluation parameters
--output-json JSON_FILE, -o JSON_FILE
path and file name of the results
"""
# The MIT License
#
# Copyright 2019 John Quinn, Chris Skiscim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import copy
import json
import logging
import math
import os
import time
import numpy as np
import yaml
from sklearn import (
model_selection,
cluster,
metrics,
svm,
neighbors,
preprocessing,
)
from sklearn.datasets import load_svmlight_file
import lsanomaly
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
fmt = "[%(asctime)s %(levelname)-8s] [%(filename)s:%(lineno)4s - %(funcName)s()] %(message)s" # noqa
logging.basicConfig(level=logging.DEBUG, format=fmt)
def evaluate(
X_train,
y_train,
X_test,
y_test,
outlier_class,
method_name,
current_method_aucs,
sigma,
rho=0.1,
nu=0.5,
):
"""
Evaluation for a method and data set. Calculates the AUC for a single
evaluation fold.
Args:
X_train (numpy.ndarray): independent training variables
y_train (numpy.ndarray): training labels
X_test (numpy.ndarray): independent test variables
y_test (numpy.ndarray): test labels
outlier_class (int): index of the outlier class
method_name (str): method being run
current_method_aucs (list): input to the *results* dictionary
sigma (float): kernel lengthscale for LSAD and OCSVM
rho (float): smoothness parameter for LSAD
nu (float): OCSVM parameter - see *scikit-learn* documentation
Raises:
ValueError: if a `NaN` is encountered in the AUC calculation.
"""
try:
if method_name == "LSAD":
lsanomaly_model = lsanomaly.LSAnomaly(
n_kernels_max=500, gamma=sigma ** -2, rho=rho
)
lsanomaly_model.fit(X_train, y_train)
predictions = lsanomaly_model.predict_proba(X_test)[:, -1]
elif method_name == "OCSVM":
svm_anomaly_model = svm.OneClassSVM(gamma=sigma ** -2, nu=nu)
svm_anomaly_model.fit(X_train)
predictions = 1 - svm_anomaly_model.decision_function(X_test)
elif method_name == "KNN":
anomaly_model = neighbors.NearestNeighbors(10)
anomaly_model.fit(X_train)
dists, idx = anomaly_model.kneighbors(X_test)
predictions = dists[:, -1]
elif method_name == "KM":
km = cluster.KMeans(min(X_train.shape[0], 20))
km.fit(X_train)
nn = neighbors.NearestNeighbors(1)
nn.fit(km.cluster_centers_)
dists, idx = nn.kneighbors(X_test)
predictions = dists[:, 0]
else:
raise ValueError("unknown method: {}".format(method_name))
fpr, tpr, thresholds = metrics.roc_curve(
y_test == outlier_class, predictions
)
metric_auc = metrics.auc(fpr, tpr)
logger.debug("\tAUC: {:>6.4f}".format(metric_auc))
if not math.isnan(metric_auc):
current_method_aucs.append(metric_auc)
else:
raise ValueError("NaN encountered in {}".format(method_name))
except (IndexError, ValueError, Exception) as e:
logger.exception(
"\t{} {}: {}".format(method_name, type(e), str(e)), exc_info=True
)
raise
def gen_data(data_sets):
"""
Generator to deliver independent, dependent variables and the name
of the data set.
Args:
data_sets (list): data sets read from the data directory
Returns:
numpy.ndarray, numpy.ndarray, str: `X`, `y`, `name`
"""
for dataset in data_sets:
path, name = os.path.split(dataset)
try:
X, y = load_svmlight_file(dataset)
except (ValueError, FileNotFoundError, Exception) as e:
logger.error("unable to load {}".format(dataset))
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
X = np.array(X.todense())
scaler = preprocessing.StandardScaler()
X = scaler.fit_transform(X)
classes_ = list(set(y))
first_two_classes = np.logical_or(y == classes_[0], y == classes_[1])
X = X[first_two_classes, :]
y = y[first_two_classes]
yield X, y, name
def gen_dataset(data_dir):
"""
Generator for the test data file paths. All
test files must be capable of being loaded by `load_svmlight_file()`. Files
with extensions `.bz2`, `.csv` are ignored. Any file beginning with `.`
is also ignored.
This walks the directory tree starting at `data_dir`, therefore all
subdirectories will be read.
Args:
data_dir (str): Fully qualified path to the data directory
Returns:
list: `svmlight` formatted data sets in `data_dir`
"""
if not os.path.isdir(data_dir):
raise ValueError("not a directory: {}".format(data_dir))
for root, dirs, files in os.walk(data_dir):
for filename in files:
if filename.endswith(".bz2") or filename.endswith(".csv"):
continue
if filename.startswith("."):
continue
ds = os.path.join(data_dir, filename)
if os.path.isdir(ds):
continue
yield ds
def _read_params(param_file):
try:
with open(param_file) as yml_file:
params = yaml.safe_load(yml_file)
except (FileNotFoundError, ValueError):
raise
return params
def main(data_dir, json_out, param_file, n_splits=5, rho=0.1, nu=0.5):
"""
The main show. Loop through all the data-sets and methods running
a 5-fold stratified cross validation. The results are saved to the
specified `json_out` file for further processing.
Args:
data_dir (str): directory holding the downloaded data sets
json_out (str): path and filename to store the evaluation results
param_file (str): YAML file with evaluation parameters
n_splits (int): number of folds in the cross-validation.
"""
params = _read_params(param_file)
method_names = params["evaluation"]["methods"]
n_methods = len(method_names)
results = dict()
results["auc"] = dict()
results["time"] = dict()
results["methods"] = method_names[:n_methods]
results["datasize"] = dict()
results["n_classes"] = dict()
# chain the generators
datasets = gen_dataset(data_dir)
data_gen = gen_data(datasets)
logger.debug("starting...")
ot_start = time.time()
for X, y, dataset_name in data_gen:
results["auc"][dataset_name] = [None] * n_methods
results["time"][dataset_name] = [None] * n_methods
classes = list(set(y))
outlier_class = classes[-1]
y[y != outlier_class] = classes[0]
classes = list(set(y))
results["n_classes"][dataset_name] = len(classes)
results["datasize"][dataset_name] = X.shape
sigma = lsanomaly.lengthscale_approx.median_kneighbour_distance(X)
for method, method_name in enumerate(method_names):
logger.debug(
"dataset: {}, method: {}".format(dataset_name, method_name)
)
current_method_aucs = list()
start_time = time.time()
kf = model_selection.StratifiedKFold(n_splits=n_splits)
for train, test in kf.split(X, y):
X_train = X[train, :]
y_train = y[train]
X_train = X_train[y_train != outlier_class]
y_train = y_train[y_train != outlier_class]
X_test = X[test, :]
y_test = y[test]
evaluate(
X_train,
y_train,
X_test,
y_test,
outlier_class,
method_name,
current_method_aucs,
sigma,
rho=rho,
nu=nu,
)
elapsed = time.time() - start_time
logger.debug(
"\tavg AUC : {:>8.4f}".format(np.mean(current_method_aucs))
)
logger.debug("\ttotal time : {:>8.4f}s".format(elapsed))
logger.debug("\tavg time : {:>8.4f}s".format(elapsed / n_splits))
results["time"][dataset_name][method] = time.time() - start_time
results["auc"][dataset_name][method] = copy.copy(
current_method_aucs
)
with open(json_out, "w") as fp:
json.dump(results, fp)
logger.debug(
"Total evaluation time was about {:>4.2f}m".format(
(time.time() - ot_start) / 60.0
)
)
logger.debug("Results written to {}".format(json_out))
if __name__ == "__main__":
"""
Accept command line arguments and kick off the evaluation.
"""
import sys
import argparse
parser = argparse.ArgumentParser(
description="Perform evaluation of LSAnomaly on downloaded"
"data-sets. 5-fold cross validation is performed."
)
parser.add_argument(
"--data-dir",
"-d",
dest="data_dir",
required=True,
help="directory of stored data-sets in libsvm format",
)
parser.add_argument(
"--params",
"-p",
dest="yml_params",
required=True,
help="YAML file with evaluation parameters",
)
parser.add_argument(
"--output-json",
"-o",
dest="json_file",
required=True,
help="path and file name of the results (JSON)",
)
args = parser.parse_args()
try:
sys.exit(
main(args.data_dir, args.json_file, args.yml_params, n_splits=5)
)
except SystemExit:
pass
| |
# qlearningAgents.py
# ------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import *
from learningAgents import ReinforcementAgent
from featureExtractors import *
import random,util,math
import numpy as np
class QLearningAgent(ReinforcementAgent):
"""
Q-Learning Agent
Functions you should fill in:
- computeValueFromQValues
- computeActionFromQValues
- getQValue
- getAction
- update
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate)
Functions you should use
- self.getLegalActions(state)
which returns legal actions for a state
"""
def __init__(self, **args):
"You can initialize Q-values here..."
ReinforcementAgent.__init__(self, **args)
"*** YOUR CODE HERE ***"
self.QValues = util.Counter() # A Counter is a dict with default 0
def getQValue(self, state, action):
"""
Returns Q(state,action)
Should return 0.0 if we have never seen a state
or the Q node value otherwise
"""
"*** YOUR CODE HERE ***"
return self.QValues[(state, action)]
util.raiseNotDefined()
def computeValueFromQValues(self, state):
"""
Returns max_action Q(state,action)
where the max is over legal actions. Note that if
there are no legal actions, which is the case at the
terminal state, you should return a value of 0.0.
"""
"*** YOUR CODE HERE ***"
if state == 'TERMINAL_STATE': return 0.0
return max([self.getQValue(state, action) for action in self.getLegalActions(state)])
util.raiseNotDefined()
def computeActionFromQValues(self, state):
"""
Compute the best action to take in a state. Note that if there
are no legal actions, which is the case at the terminal state,
you should return None.
"""
"*** YOUR CODE HERE ***"
if state == 'TERMINAL_STATE': return 0.0
actions = self.getLegalActions(state)
action_values = [self.getQValue(state, action) for action in actions]
return actions[np.argmax(action_values)]
util.raiseNotDefined()
def getAction(self, state):
"""
Compute the action to take in the current state. With
probability self.epsilon, we should take a random action and
take the best policy action otherwise. Note that if there are
no legal actions, which is the case at the terminal state, you
should choose None as the action.
HINT: You might want to use util.flipCoin(prob)
HINT: To pick randomly from a list, use random.choice(list)
"""
# Pick Action
legalActions = self.getLegalActions(state)
action = None
"*** YOUR CODE HERE ***"
if True:
if np.random.rand() < self.epsilon:
action = np.random.choice(legalActions)
else:
prob = np.array([self.getQValue(state, a) for a in legalActions])
# prob -= prob.min()
# if prob.sum() == 0:
# prob = [1./len(prob)] * len(prob)
# else:
# prob /= prob.sum()
# action = np.random.choice(legalActions, p=prob)
action = legalActions[np.argmax(prob)]
return action
# util.raiseNotDefined()
def update(self, state, action, nextState, reward):
"""
The parent class calls this to observe a
state = action => nextState and reward transition.
You should do your Q-Value update here
NOTE: You should never call this function,
it will be called on your behalf
"""
"*** YOUR CODE HERE ***"
try:
nextStateMaxQ = max([self.getQValue(nextState, nextAction) \
for nextAction in self.getLegalActions(nextState)])
except:
nextStateMaxQ = 0
self.QValues[(state, action)] = \
self.getQValue(state, action)\
+ self.alpha * (reward\
+ self.discount * nextStateMaxQ\
- self.getQValue(state, action))
# util.raiseNotDefined()
def getPolicy(self, state):
return self.computeActionFromQValues(state)
def getValue(self, state):
return self.computeValueFromQValues(state)
class PacmanQAgent(QLearningAgent):
"Exactly the same as QLearningAgent, but with different default parameters"
def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
"""
These default parameters can be changed from the pacman.py command line.
For example, to change the exploration rate, try:
python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
alpha - learning rate
epsilon - exploration rate
gamma - discount factor
numTraining - number of training episodes, i.e. no learning after these many episodes
"""
args['epsilon'] = epsilon
args['gamma'] = gamma
args['alpha'] = alpha
args['numTraining'] = numTraining
self.index = 0 # This is always Pacman
QLearningAgent.__init__(self, **args)
def getAction(self, state):
"""
Simply calls the getAction method of QLearningAgent and then
informs parent of action for Pacman. Do not change or remove this
method.
"""
action = QLearningAgent.getAction(self,state)
self.doAction(state,action)
return action
class ApproximateQAgent(PacmanQAgent):
"""
ApproximateQLearningAgent
You should only have to overwrite getQValue
and update. All other QLearningAgent functions
should work as is.
"""
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
self.weights = util.Counter()
def getWeights(self):
return self.weights
def getQValue(self, state, action):
"""
Should return Q(state,action) = w * featureVector
where * is the dotProduct operator
"""
"*** YOUR CODE HERE ***"
feat = self.featExtractor.getFeatures(state, action)
Q = 0.0
for dim in feat:
Q += self.weights[dim] * feat[dim]
return Q
# util.raiseNotDefined()
def update(self, state, action, nextState, reward):
"""
Should update your weights based on transition
"""
"*** YOUR CODE HERE ***"
try:
nextStateMaxQ = max([self.getQValue(nextState, nextAction) \
for nextAction in self.getLegalActions(nextState)])
except:
nextStateMaxQ = 0
feat = self.featExtractor.getFeatures(state, action)
newweights = self.weights.copy() # synchronous
for dim in feat:
newweights[dim] = \
self.weights[dim]\
+ feat[dim] * \
self.alpha * (reward\
+ self.discount * nextStateMaxQ\
- self.getQValue(state, action))
self.weights = newweights
# util.raiseNotDefined()
def final(self, state):
"Called at the end of each game."
# call the super-class final method
PacmanQAgent.final(self, state)
# did we finish training?
if self.episodesSoFar == self.numTraining:
# you might want to print your weights here for debugging
"*** YOUR CODE HERE ***"
pass
| |
from __future__ import with_statement
from cms.api import create_page
from cms.cms_toolbar import CMSToolbar
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.toolbar.items import (Anchor, TemplateHTML, Switcher, List, ListItem,
GetButton)
from cms.utils import get_cms_setting
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User, Permission
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
class ToolbarTestBase(SettingsOverrideTestCase):
def get_page_request(self, page, user, path=None, edit=False):
path = page and page.get_absolute_url() or path
if edit:
path += '?edit'
request = RequestFactory().get(path)
request.session = {}
request.user = user
request.current_page = page
return request
def get_anon(self):
return AnonymousUser()
def get_staff(self):
staff = User(
username='staff',
email='staff@staff.org',
is_active=True,
is_staff=True,
)
staff.set_password('staff')
staff.save()
staff.user_permissions.add(Permission.objects.get(codename='change_page'))
return staff
def get_nonstaff(self):
nonstaff = User(
username='nonstaff',
email='nonstaff@staff.org',
is_active=True,
is_staff=False,
)
nonstaff.set_password('nonstaff')
nonstaff.save()
nonstaff.user_permissions.add(Permission.objects.get(codename='change_page'))
return nonstaff
def get_superuser(self):
superuser = User(
username='superuser',
email='superuser@superuser.org',
is_active=True,
is_staff=True,
is_superuser=True,
)
superuser.set_password('superuser')
superuser.save()
return superuser
class ToolbarTests(ToolbarTestBase):
settings_overrides = {'CMS_PERMISSION': False}
def test_toolbar_no_page_anon(self):
request = self.get_page_request(None, self.get_anon(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_items({})
self.assertEqual(len(items), 2) # Logo + login
# check the logo is there
logo = items[0]
self.assertIsInstance(logo, Anchor)
# check the login form is there
login = items[1]
self.assertIsInstance(login, TemplateHTML)
self.assertEqual(login.template, 'cms/toolbar/items/login.html')
def test_toolbar_no_page_staff(self):
request = self.get_page_request(None, self.get_staff(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_items({})
# Logo + edit-mode + admin-menu + logout
self.assertEqual(len(items), 4)
# check the logo is there
logo = items[0]
self.assertIsInstance(logo, Anchor)
# check the edit-mode switcher is there and the switcher is turned off
edit = items[1]
self.assertIsInstance(edit, Switcher)
self.assertFalse(toolbar.edit_mode)
# check the admin-menu
admin = items[2]
self.assertIsInstance(admin, List)
self.assertEqual(len(admin.raw_items), 1) # only the link to main admin
self.assertIsInstance(admin.raw_items[0], ListItem)
# check the logout button
logout = items[-1]
self.assertIsInstance(logout, GetButton)
self.assertEqual(logout.url, '?cms-toolbar-logout')
def test_toolbar_no_page_superuser(self):
request = self.get_page_request(None, self.get_superuser(), '/')
toolbar = CMSToolbar(request)
items = toolbar.get_items({})
# Logo + edit-mode + admin-menu + logout
self.assertEqual(len(items), 4)
# check the logo is there
logo = items[0]
self.assertIsInstance(logo, Anchor)
# check the edit-mode switcher is there and the switcher is turned off
edit = items[1]
self.assertIsInstance(edit, Switcher)
self.assertFalse(toolbar.edit_mode)
# check the admin-menu
admin = items[2]
self.assertIsInstance(admin, List)
self.assertEqual(len(admin.raw_items), 1) # only the link to main admin
self.assertIsInstance(admin.raw_items[0], ListItem)
# check the logout button
logout = items[-1]
self.assertIsInstance(logout, GetButton)
self.assertEqual(logout.url, '?cms-toolbar-logout')
def test_toolbar_anon(self):
page = create_page('test', 'nav_playground.html', 'en')
request = self.get_page_request(page, self.get_anon())
toolbar = CMSToolbar(request)
items = toolbar.get_items({})
self.assertEqual(len(items), 2) # Logo + login
# check the logo is there
logo = items[0]
self.assertIsInstance(logo, Anchor)
# check the login form is there
login = items[1]
self.assertIsInstance(login, TemplateHTML)
self.assertEqual(login.template, 'cms/toolbar/items/login.html')
def test_toolbar_nonstaff(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_nonstaff())
toolbar = CMSToolbar(request)
items = toolbar.get_items({})
# Logo + edit-mode + logout
self.assertEqual(len(items), 3)
# check the logo is there
logo = items[0]
self.assertIsInstance(logo, Anchor)
# check the edit-mode switcher is there and the switcher is turned off
edit = items[1]
self.assertIsInstance(edit, Switcher)
self.assertFalse(toolbar.edit_mode)
# check the logout button
logout = items[-1]
self.assertIsInstance(logout, GetButton)
self.assertEqual(logout.url, '?cms-toolbar-logout')
def test_toolbar_staff(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_superuser())
toolbar = CMSToolbar(request)
items = toolbar.get_items({})
# Logo + edit-mode + templates + page-menu + admin-menu + logout
self.assertEqual(len(items), 6)
# check the logo is there
logo = items[0]
self.assertIsInstance(logo, Anchor)
# check the edit-mode switcher is there and the switcher is turned off
edit = items[1]
self.assertIsInstance(edit, Switcher)
self.assertFalse(toolbar.edit_mode)
# check templates
templates = items[2]
self.assertIsInstance(templates, List)
self.assertEqual(len(templates.raw_items), len(get_cms_setting('TEMPLATES')))
base = reverse('admin:cms_page_change_template', args=(page.pk,))
for item, template in zip(templates.raw_items, get_cms_setting('TEMPLATES')):
self.assertEqual(item.url, '%s?template=%s' % (base, template[0]))
# check page menu
pagemenu = items[3]
self.assertIsInstance(pagemenu, List)
self.assertEqual(len(pagemenu.raw_items), 4)
overview, addchild, addsibling, delete = pagemenu.raw_items
self.assertEqual(overview.url, reverse('admin:cms_page_changelist'))
self.assertEqual(addchild.serialize_url({}, toolbar),
reverse('admin:cms_page_add') + '?position=last-child&target=%s' % page.pk)
self.assertEqual(addsibling.serialize_url({}, toolbar),
reverse('admin:cms_page_add') + '?position=last-child')
self.assertEqual(delete.serialize_url({}, toolbar),
reverse('admin:cms_page_delete', args=(page.pk,)))
# check the admin-menu
admin = items[4]
self.assertIsInstance(admin, List)
self.assertEqual(len(admin.raw_items), 3) # page settings, history and admin
self.assertIsInstance(admin.raw_items[0], ListItem)
self.assertIsInstance(admin.raw_items[1], ListItem)
self.assertIsInstance(admin.raw_items[2], ListItem)
# check the logout button
logout = items[-1]
self.assertIsInstance(logout, GetButton)
self.assertEqual(logout.url, '?cms-toolbar-logout')
def test_toolbar_template_change_permission(self):
with SettingsOverride(CMS_PERMISSIONS=True):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_nonstaff())
toolbar = CMSToolbar(request)
items = toolbar.get_items({})
self.assertEqual([item for item in items if item.css_class_suffix == 'templates'], [])
def test_toolbar_markup(self):
create_page("toolbar-page", "nav_playground.html", "en", published=True)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get('/en/?edit')
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'nav_playground.html')
self.assertContains(response, '<div id="cms_toolbar"')
self.assertContains(response, 'cms.placeholders.js')
self.assertContains(response, 'cms.placeholders.css')
def test_show_toolbar_to_staff(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, self.get_staff(), '/')
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.show_toolbar)
def test_show_toolbar_with_edit(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, AnonymousUser(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.show_toolbar)
def test_show_toolbar_without_edit(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, AnonymousUser(), edit=False)
toolbar = CMSToolbar(request)
self.assertFalse(toolbar.show_toolbar)
def test_toolbar_publish_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_superuser(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.edit_mode)
items = toolbar.get_items({})
# Logo + edit-mode + publish + templates + page-menu + admin-menu + logout
self.assertEqual(len(items), 7)
publish = items[2]
self.assertIsInstance(publish, GetButton)
def test_toolbar_no_publish_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
request = self.get_page_request(page, self.get_staff(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(page.has_change_permission(request))
self.assertFalse(page.has_publish_permission(request))
self.assertTrue(toolbar.edit_mode)
items = toolbar.get_items({})
# Logo + edit-mode + templates + page-menu + admin-menu + logout
self.assertEqual(len(items), 6)
def test_toolbar_no_change_button(self):
page = create_page('test', 'nav_playground.html', 'en', published=True)
user = self.get_staff()
user.user_permissions.all().delete()
request = self.get_page_request(page, user, edit=True)
toolbar = CMSToolbar(request)
self.assertFalse(page.has_change_permission(request))
self.assertFalse(page.has_publish_permission(request))
self.assertTrue(toolbar.edit_mode)
items = toolbar.get_items({})
# Logo + page-menu + admin-menu + logout
self.assertEqual(len(items), 4)
| |
import inspect
import os
import re
import sys
import nbformat
import numpy as np
from hyperopt import fmin
from nbconvert import PythonExporter
from .ensemble import VotingModel
from .utils import (
remove_imports, remove_all_comments, extract_imports, temp_string,
write_temp_files, determine_indent, with_line_numbers, unpack_hyperopt_vals,
eval_hyperopt_space, find_signature_end)
sys.path.append(".")
def minimize(model,
data,
algo,
max_evals,
trials,
functions=None,
rseed=1337,
notebook_name=None,
verbose=True,
eval_space=False,
return_space=False,
keep_temp=False,
data_args=None):
"""
Minimize a keras model for given data and implicit hyperparameters.
Parameters
----------
model: A function defining a keras model with hyperas templates, which returns a
valid hyperopt results dictionary, e.g.
return {'loss': -acc, 'status': STATUS_OK}
data: A parameter-less function that defines and return all data needed in the above
model definition.
algo: A hyperopt algorithm, like tpe.suggest or rand.suggest
max_evals: Maximum number of optimization runs
trials: A hyperopt trials object, used to store intermediate results for all
optimization runs
rseed: Integer random seed for experiments
notebook_name: If running from an ipython notebook, provide filename (not path)
verbose: Print verbose output
eval_space: Evaluate the best run in the search space such that 'choice's contain actually meaningful values instead of mere indices
return_space: Return the hyperopt search space object (e.g. for further processing) as last return value
keep_temp: Keep temp_model.py file on the filesystem
data_args: Arguments to be passed to data function
Returns
-------
If `return_space` is False: A pair consisting of the results dictionary of the best run and the corresponding
keras model.
If `return_space` is True: The pair of best result and corresponding keras model, and the hyperopt search space
"""
best_run, space = base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
full_model_string=None,
notebook_name=notebook_name,
verbose=verbose,
keep_temp=keep_temp,
data_args=data_args)
best_model = None
for trial in trials:
vals = trial.get('misc').get('vals')
# unpack the values from lists without overwriting the mutable dict within 'trial'
unpacked_vals = unpack_hyperopt_vals(vals)
# identify the best_run (comes with unpacked values from the hyperopt function `base.Trials.argmin`)
if unpacked_vals == best_run and 'model' in trial.get('result').keys():
best_model = trial.get('result').get('model')
if eval_space is True:
# evaluate the search space
best_run = eval_hyperopt_space(space, best_run)
if return_space is True:
# return the space as well
return best_run, best_model, space
else:
# the default case for backwards compatibility with expanded return arguments
return best_run, best_model
def base_minimizer(model, data, functions, algo, max_evals, trials,
rseed=1337, full_model_string=None, notebook_name=None,
verbose=True, stack=3, keep_temp=False, data_args=None):
if full_model_string is not None:
model_str = full_model_string
else:
model_str = get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack, data_args=data_args)
temp_file = './temp_model.py'
write_temp_files(model_str, temp_file)
if 'temp_model' in sys.modules:
del sys.modules["temp_model"]
try:
from temp_model import keras_fmin_fnct, get_space
except:
print("Unexpected error: {}".format(sys.exc_info()[0]))
raise
try:
if not keep_temp:
os.remove(temp_file)
os.remove(temp_file + 'c')
except OSError:
pass
try:
# for backward compatibility.
return (
fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
return_argmin=True),
get_space()
)
except TypeError:
pass
return (
fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rstate=np.random.RandomState(rseed),
return_argmin=True),
get_space()
)
def best_ensemble(nb_ensemble_models, model, data, algo, max_evals,
trials, voting='hard', weights=None, nb_classes=None, functions=None):
model_list = best_models(nb_models=nb_ensemble_models,
model=model,
data=data,
algo=algo,
max_evals=max_evals,
trials=trials,
functions=functions)
return VotingModel(model_list, voting, weights, nb_classes)
def best_models(nb_models, model, data, algo, max_evals, trials, functions=None, keep_temp=False):
base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
stack=4,
keep_temp=keep_temp)
if len(trials) < nb_models:
nb_models = len(trials)
scores = [trial.get('result').get('loss') for trial in trials]
cut_off = sorted(scores, reverse=True)[nb_models - 1]
model_list = [trial.get('result').get('model') for trial in trials if trial.get('result').get('loss') >= cut_off]
return model_list
def get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack, data_args):
model_string = inspect.getsource(model)
model_string = remove_imports(model_string)
if notebook_name:
notebook_path = os.getcwd() + "/{}.ipynb".format(notebook_name)
with open(notebook_path, 'r') as f:
notebook = nbformat.reads(f.read(), nbformat.NO_CONVERT)
exporter = PythonExporter()
source, _ = exporter.from_notebook_node(notebook)
else:
calling_script_file = os.path.abspath(inspect.stack()[stack][1])
with open(calling_script_file, 'r') as f:
source = f.read()
cleaned_source = remove_all_comments(source)
imports = extract_imports(cleaned_source, verbose)
parts = hyperparameter_names(model_string)
aug_parts = augmented_names(parts)
hyperopt_params = get_hyperparameters(model_string)
space = get_hyperopt_space(parts, hyperopt_params, verbose)
functions_string = retrieve_function_string(functions, verbose)
data_string = retrieve_data_string(data, verbose, data_args)
model = hyperopt_keras_model(model_string, parts, aug_parts, verbose)
temp_str = temp_string(imports, model, data_string, functions_string, space)
return temp_str
def get_hyperopt_space(parts, hyperopt_params, verbose=True):
space = "def get_space():\n return {\n"
for name, param in zip(parts, hyperopt_params):
param = re.sub(r"\(", "('" + name + "', ", param, 1)
space += " '" + name + "': hp." + param + ",\n"
space = space[:-1]
space += "\n }\n"
if verbose:
print('>>> Hyperas search space:\n')
print(space)
return space
def retrieve_data_string(data, verbose=True, data_args=None):
data_string = inspect.getsource(data)
first_line = data_string.split("\n")[0]
indent_length = len(determine_indent(data_string))
data_string = data_string.replace(first_line, "")
r = re.compile(r'^\s*return.*')
last_line = [s for s in reversed(data_string.split("\n")) if r.match(s)][0]
data_string = data_string.replace(last_line, "")
required_arguments = inspect.getfullargspec(data).args
if required_arguments:
if data_args is None:
raise ValueError(
"Data function takes arguments {} but no values are passed via data_args".format(required_arguments))
data_string = "\n".join(" {} = {}".format(x, repr(y)) for x, y in zip(required_arguments, data_args)) + data_string
split_data = data_string.split("\n")
for i, line in enumerate(split_data):
split_data[i] = line[indent_length:] + "\n"
data_string = ''.join(split_data)
if verbose:
print(">>> Data")
print(with_line_numbers(data_string))
return data_string
def retrieve_function_string(functions, verbose=True):
function_strings = ''
if functions is None:
return function_strings
for function in functions:
function_string = inspect.getsource(function)
function_strings = function_strings + function_string + '\n'
if verbose:
print(">>> Functions")
print(with_line_numbers(function_strings))
return function_strings
def hyperparameter_names(model_string):
parts = []
params = re.findall(r"(\{\{[^}]+}\})", model_string)
for param in params:
name = re.findall(r"(\w+(?=\s*[\=\(]\s*" + re.escape(param) + r"))", model_string)
if len(name) > 0:
parts.append(name[0])
else:
parts.append(parts[-1])
part_dict = {}
for i, part in enumerate(parts):
if part in part_dict.keys():
part_dict[part] += 1
parts[i] = part + "_" + str(part_dict[part])
else:
part_dict[part] = 0
return parts
def get_hyperparameters(model_string):
hyperopt_params = re.findall(r"(\{\{[^}]+}\})", model_string)
for i, param in enumerate(hyperopt_params):
hyperopt_params[i] = re.sub(r"[\{\}]", '', param)
return hyperopt_params
def augmented_names(parts):
aug_parts = []
for i, part in enumerate(parts):
aug_parts.append("space['" + part + "']")
return aug_parts
def hyperopt_keras_model(model_string, parts, aug_parts, verbose=True):
colon_index = find_signature_end(model_string)
func_sign_line_end = model_string.count("\n", 0, colon_index) + 1
func_sign_lines = "\n".join(model_string.split("\n")[:func_sign_line_end])
model_string = model_string.replace(func_sign_lines, "def keras_fmin_fnct(space):\n")
result = re.sub(r"(\{\{[^}]+}\})", lambda match: aug_parts.pop(0), model_string, count=len(parts))
if verbose:
print('>>> Resulting replaced keras model:\n')
print(with_line_numbers(result))
return result
| |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Class to create default roles for datapipeline
import logging
from awscli.customizations.datapipeline.constants \
import DATAPIPELINE_DEFAULT_SERVICE_ROLE_NAME, \
DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME, \
DATAPIPELINE_DEFAULT_SERVICE_ROLE_ARN, \
DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ARN, \
DATAPIPELINE_DEFAULT_SERVICE_ROLE_ASSUME_POLICY, \
DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ASSUME_POLICY
from awscli.customizations.commands import BasicCommand
from awscli.customizations.datapipeline.translator \
import display_response, dict_to_string, get_region
from botocore.exceptions import ClientError
LOG = logging.getLogger(__name__)
class CreateDefaultRoles(BasicCommand):
NAME = "create-default-roles"
DESCRIPTION = ('Creates the default IAM role ' +
DATAPIPELINE_DEFAULT_SERVICE_ROLE_NAME + ' and ' +
DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME +
' which are used while creating an EMR cluster.\n'
'If the roles do not exist, create-default-roles '
'will automatically create them and set their policies.'
' If these roles are already '
'created create-default-roles'
' will not update their policies.'
'\n')
def __init__(self, session, formatter=None):
super(CreateDefaultRoles, self).__init__(session)
def _run_main(self, parsed_args, parsed_globals, **kwargs):
"""Call to run the commands"""
self._region = get_region(self._session, parsed_globals)
self._endpoint_url = parsed_globals.endpoint_url
self._iam_client = self._session.create_client(
'iam',
region_name=self._region,
endpoint_url=self._endpoint_url,
verify=parsed_globals.verify_ssl
)
return self._create_default_roles(parsed_args, parsed_globals)
def _create_role(self, role_name, role_arn, role_policy):
"""Method to create a role for a given role name and arn
if it does not exist
"""
role_result = None
role_policy_result = None
# Check if the role with the name exists
if self._check_if_role_exists(role_name):
LOG.debug('Role ' + role_name + ' exists.')
else:
LOG.debug('Role ' + role_name + ' does not exist.'
' Creating default role for EC2: ' + role_name)
# Create a create using the IAM Client with a particular triplet
# (role_name, role_arn, assume_role_policy)
role_result = self._create_role_with_role_policy(role_name,
role_policy,
role_arn)
role_policy_result = self._get_role_policy(role_arn)
return role_result, role_policy_result
def _construct_result(self, dpl_default_result,
dpl_default_policy,
dpl_default_res_result,
dpl_default_res_policy):
"""Method to create a resultant list of responses for create roles
for service and resource role
"""
result = []
self._construct_role_and_role_policy_structure(result,
dpl_default_result,
dpl_default_policy)
self._construct_role_and_role_policy_structure(result,
dpl_default_res_result,
dpl_default_res_policy)
return result
def _create_default_roles(self, parsed_args, parsed_globals):
# Setting the role name and arn value
(datapipline_default_result,
datapipline_default_policy) = self._create_role(
DATAPIPELINE_DEFAULT_SERVICE_ROLE_NAME,
DATAPIPELINE_DEFAULT_SERVICE_ROLE_ARN,
DATAPIPELINE_DEFAULT_SERVICE_ROLE_ASSUME_POLICY)
(datapipline_default_resource_result,
datapipline_default_resource_policy) = self._create_role(
DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME,
DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ARN,
DATAPIPELINE_DEFAULT_RESOURCE_ROLE_ASSUME_POLICY)
# Check if the default EC2 Instance Procfile for DataPipeline exists.
instance_profile_name = DATAPIPELINE_DEFAULT_RESOURCE_ROLE_NAME
if self._check_if_instance_profile_exists(instance_profile_name):
LOG.debug('Instance Procfile ' + instance_profile_name + ' exists.')
else:
LOG.debug('Instance Procfile ' + instance_profile_name +
'does not exist. Creating default Instance Procfile ' +
instance_profile_name)
self._create_instance_profile_with_role(instance_profile_name,
instance_profile_name)
result = self._construct_result(datapipline_default_result,
datapipline_default_policy,
datapipline_default_resource_result,
datapipline_default_resource_policy)
display_response(self._session, 'create_role', result, parsed_globals)
return 0
def _get_role_policy(self, arn):
"""Method to get the Policy for a particular ARN
This is used to display the policy contents to the user
"""
pol_det = self._iam_client.get_policy(PolicyArn=arn)
policy_version_details = self._iam_client.get_policy_version(
PolicyArn=arn, VersionId=pol_det["Policy"]["DefaultVersionId"])
return policy_version_details["PolicyVersion"]["Document"]
def _create_role_with_role_policy(
self, role_name, assume_role_policy, role_arn):
"""Method to create role with a given rolename, assume_role_policy
and role_arn
"""
# Create a role using IAM client CreateRole API
create_role_response = self._iam_client.create_role(
RoleName=role_name, AssumeRolePolicyDocument=dict_to_string(
assume_role_policy))
# Create a role using IAM client AttachRolePolicy API
self._iam_client.attach_role_policy(PolicyArn=role_arn,
RoleName=role_name)
return create_role_response
def _construct_role_and_role_policy_structure(
self, list_val, response, policy):
"""Method to construct the message to be displayed to the user"""
# If the response is not none they we get the role name
# from the response and
# append the policy information to the response
if response is not None and response['Role'] is not None:
list_val.append({'Role': response['Role'], 'RolePolicy': policy})
return list_val
def _check_if_instance_profile_exists(self, instance_profile_name):
"""Method to verify if a particular role exists"""
try:
# Client call to get the instance profile with that name
self._iam_client.get_instance_profile(
InstanceProfileName=instance_profile_name)
except ClientError as e:
# If the instance profile does not exist then the error message
# would contain the required message
if e.response['Error']['Code'] == 'NoSuchEntity':
# No instance profile error.
return False
else:
# Some other error. raise.
raise e
return True
def _check_if_role_exists(self, role_name):
"""Method to verify if a particular role exists"""
try:
# Client call to get the role
self._iam_client.get_role(RoleName=role_name)
except ClientError as e:
# If the role does not exist then the error message
# would contain the required message.
if e.response['Error']['Code'] == 'NoSuchEntity':
# No role error.
return False
else:
# Some other error. raise.
raise e
return True
def _create_instance_profile_with_role(self, instance_profile_name,
role_name):
"""Method to create the instance profile with the role"""
# Setting the value for instance profile name
# Client call to create an instance profile
self._iam_client.create_instance_profile(
InstanceProfileName=instance_profile_name)
# Adding the role to the Instance Procfile
self._iam_client.add_role_to_instance_profile(
InstanceProfileName=instance_profile_name, RoleName=role_name)
| |
#!/usr/bin/env python
#
# $Id: _windows.py 1453 2012-07-13 19:55:11Z g.rodola $
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows specific tests. These are implicitly run by test_psutil.py."""
import os
import unittest
import platform
import signal
import time
import warnings
import atexit
import sys
import subprocess
import errno
import traceback
import psutil
import _psutil_mswindows
from psutil._compat import PY3, callable, long
from test_psutil import reap_children, get_test_subprocess, wait_for_pid, warn
try:
import wmi
except ImportError:
err = sys.exc_info()[1]
atexit.register(warn, "Couldn't run wmi tests: %s" % str(err))
wmi = None
try:
import win32api
import win32con
except ImportError:
err = sys.exc_info()[1]
atexit.register(warn, "Couldn't run pywin32 tests: %s" % str(err))
win32api = None
class WindowsSpecificTestCase(unittest.TestCase):
def setUp(self):
sproc = get_test_subprocess()
wait_for_pid(sproc.pid)
self.pid = sproc.pid
def tearDown(self):
reap_children()
def test_issue_24(self):
p = psutil.Process(0)
self.assertRaises(psutil.AccessDenied, p.kill)
def test_special_pid(self):
p = psutil.Process(4)
self.assertEqual(p.name, 'System')
# use __str__ to access all common Process properties to check
# that nothing strange happens
str(p)
p.username
self.assertTrue(p.create_time >= 0.0)
try:
rss, vms = p.get_memory_info()
except psutil.AccessDenied:
# expected on Windows Vista and Windows 7
if not platform.uname()[1] in ('vista', 'win-7', 'win7'):
raise
else:
self.assertTrue(rss > 0)
def test_signal(self):
p = psutil.Process(self.pid)
self.assertRaises(ValueError, p.send_signal, signal.SIGINT)
def test_nic_names(self):
p = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE)
out = p.communicate()[0]
if PY3:
out = str(out, sys.stdout.encoding)
nics = psutil.network_io_counters(pernic=True).keys()
for nic in nics:
if "pseudo-interface" in nic.replace(' ', '-').lower():
continue
if nic not in out:
self.fail("%r nic wasn't found in 'ipconfig /all' output" % nic)
def test_exe(self):
for p in psutil.process_iter():
try:
self.assertEqual(os.path.basename(p.exe), p.name)
except psutil.Error:
pass
if wmi is not None:
# --- Process class tests
def test_process_name(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(p.name, w.Caption)
def test_process_exe(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(p.exe, w.ExecutablePath)
def test_process_cmdline(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(' '.join(p.cmdline), w.CommandLine.replace('"', ''))
def test_process_username(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
domain, _, username = w.GetOwner()
username = "%s\\%s" %(domain, username)
self.assertEqual(p.username, username)
def test_process_rss_memory(self):
time.sleep(0.1)
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
rss = p.get_memory_info().rss
self.assertEqual(rss, int(w.WorkingSetSize))
def test_process_vms_memory(self):
time.sleep(0.1)
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
vms = p.get_memory_info().vms
# http://msdn.microsoft.com/en-us/library/aa394372(VS.85).aspx
# ...claims that PageFileUsage is represented in Kilo
# bytes but funnily enough on certain platforms bytes are
# returned instead.
wmi_usage = int(w.PageFileUsage)
if (vms != wmi_usage) and (vms != wmi_usage * 1024):
self.fail("wmi=%s, psutil=%s" % (wmi_usage, vms))
def test_process_create_time(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
wmic_create = str(w.CreationDate.split('.')[0])
psutil_create = time.strftime("%Y%m%d%H%M%S",
time.localtime(p.create_time))
self.assertEqual(wmic_create, psutil_create)
# --- psutil namespace functions and constants tests
def test_NUM_CPUS(self):
num_cpus = int(os.environ['NUMBER_OF_PROCESSORS'])
self.assertEqual(num_cpus, psutil.NUM_CPUS)
def test_TOTAL_PHYMEM(self):
w = wmi.WMI().Win32_ComputerSystem()[0]
self.assertEqual(int(w.TotalPhysicalMemory), psutil.TOTAL_PHYMEM)
def test__UPTIME(self):
# _UPTIME constant is not public but it is used internally
# as value to return for pid 0 creation time.
# WMI behaves the same.
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(0)
wmic_create = str(w.CreationDate.split('.')[0])
psutil_create = time.strftime("%Y%m%d%H%M%S",
time.localtime(p.create_time))
# XXX - ? no actual test here
def test_get_pids(self):
# Note: this test might fail if the OS is starting/killing
# other processes in the meantime
w = wmi.WMI().Win32_Process()
wmi_pids = [x.ProcessId for x in w]
wmi_pids.sort()
psutil_pids = psutil.get_pid_list()
psutil_pids.sort()
if wmi_pids != psutil_pids:
difference = filter(lambda x:x not in wmi_pids, psutil_pids) + \
filter(lambda x:x not in psutil_pids, wmi_pids)
self.fail("difference: " + str(difference))
def test_disks(self):
ps_parts = psutil.disk_partitions(all=True)
wmi_parts = wmi.WMI().Win32_LogicalDisk()
for ps_part in ps_parts:
for wmi_part in wmi_parts:
if ps_part.device.replace('\\', '') == wmi_part.DeviceID:
if not ps_part.mountpoint:
# this is usually a CD-ROM with no disk inserted
break
try:
usage = psutil.disk_usage(ps_part.mountpoint)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.ENOENT:
# usually this is the floppy
break
else:
raise
self.assertEqual(usage.total, int(wmi_part.Size))
wmi_free = int(wmi_part.FreeSpace)
self.assertEqual(usage.free, wmi_free)
# 10 MB tollerance
if abs(usage.free - wmi_free) > 10 * 1024 * 1024:
self.fail("psutil=%s, wmi=%s" % usage.free, wmi_free)
break
else:
self.fail("can't find partition %s" % repr(ps_part))
if win32api is not None:
def test_get_num_handles(self):
p = psutil.Process(os.getpid())
before = p.get_num_handles()
handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
win32con.FALSE, os.getpid())
after = p.get_num_handles()
self.assertEqual(after, before+1)
win32api.CloseHandle(handle)
self.assertEqual(p.get_num_handles(), before)
def test_get_num_handles_2(self):
# Note: this fails from time to time; I'm keen on thinking
# it doesn't mean something is broken
def call(p, attr):
attr = getattr(p, name, None)
if attr is not None and callable(attr):
ret = attr()
else:
ret = attr
p = psutil.Process(self.pid)
attrs = []
failures = []
for name in dir(psutil.Process):
if name.startswith('_') \
or name.startswith('set_') \
or name in ('terminate', 'kill', 'suspend', 'resume', 'nice',
'send_signal', 'wait', 'get_children', 'as_dict'):
continue
else:
try:
call(p, name)
num1 = p.get_num_handles()
call(p, name)
num2 = p.get_num_handles()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
else:
if num2 > num1:
fail = "failure while processing Process.%s method " \
"(before=%s, after=%s)" % (name, num1, num2)
failures.append(fail)
if failures:
self.fail('\n' + '\n'.join(failures))
import _psutil_mswindows
from psutil._psmswindows import ACCESS_DENIED_SET
def wrap_exceptions(callable):
def wrapper(self, *args, **kwargs):
try:
return callable(self, *args, **kwargs)
except OSError:
err = sys.exc_info()[1]
if err.errno in ACCESS_DENIED_SET:
raise psutil.AccessDenied(None, None)
if err.errno == errno.ESRCH:
raise psutil.NoSuchProcess(None, None)
raise
return wrapper
class TestDualProcessImplementation(unittest.TestCase):
fun_names = [
# function name tolerance
('get_process_cpu_times', 0.2),
('get_process_create_time', 0.5),
('get_process_num_handles', 1), # 1 because impl #1 opens a handle
('get_process_io_counters', 0),
('get_process_memory_info', 1024), # KB
]
def test_compare_values(self):
# Certain APIs on Windows have 2 internal implementations, one
# based on documented Windows APIs, another one based
# NtQuerySystemInformation() which gets called as fallback in
# case the first fails because of limited permission error.
# Here we test that the two methods return the exact same value,
# see:
# http://code.google.com/p/psutil/issues/detail?id=304
def assert_ge_0(obj):
if isinstance(obj, tuple):
for value in obj:
assert value >= 0, value
elif isinstance(obj, (int, long, float)):
assert obj >= 0, obj
else:
assert 0 # case not handled which needs to be fixed
def compare_with_tolerance(ret1, ret2, tolerance):
if ret1 == ret2:
return
else:
if isinstance(ret2, (int, long, float)):
diff = abs(ret1 - ret2)
assert diff <= tolerance, diff
elif isinstance(ret2, tuple):
for a, b in zip(ret1, ret2):
diff = abs(a - b)
assert diff <= tolerance, diff
failures = []
for name, tolerance in self.fun_names:
meth1 = wrap_exceptions(getattr(_psutil_mswindows, name))
meth2 = wrap_exceptions(getattr(_psutil_mswindows, name + '_2'))
for p in psutil.process_iter():
#
try:
ret1 = meth1(p.pid)
except psutil.NoSuchProcess:
continue
except psutil.AccessDenied:
ret1 = None
#
try:
ret2 = meth2(p.pid)
except psutil.NoSuchProcess:
# this is supposed to fail only in case of zombie process
# never for permission error
continue
# compare values
try:
if ret1 is None:
assert_ge_0(ret2)
else:
compare_with_tolerance(ret1, ret2, tolerance)
assert_ge_0(ret1)
assert_ge_0(ret2)
except AssertionError:
err = sys.exc_info()[1]
trace = traceback.format_exc()
msg = '%s\npid=%s, method=%r, ret_1=%r, ret_2=%r' \
% (trace, p.pid, name, ret1, ret2)
failures.append(msg)
break
if failures:
self.fail('\n\n'.join(failures))
def test_zombies(self):
# test that NPS is raised by the 2nd implementation in case a
# process no longer exists
ZOMBIE_PID = max(psutil.get_pid_list()) + 5000
for name, _ in self.fun_names:
meth = wrap_exceptions(getattr(_psutil_mswindows, name))
self.assertRaises(psutil.NoSuchProcess, meth, ZOMBIE_PID)
if __name__ == '__main__':
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(WindowsSpecificTestCase))
test_suite.addTest(unittest.makeSuite(TestDualProcessImplementation))
unittest.TextTestRunner(verbosity=2).run(test_suite)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
import json
import numpy as np
from pymatgen import Lattice, Structure, Specie
from pymatgen.transformations.standard_transformations import \
OxidationStateDecorationTransformation, SubstitutionTransformation, \
OrderDisorderedStructureTransformation
from pymatgen.transformations.advanced_transformations import \
SuperTransformation, EnumerateStructureTransformation, \
MultipleSubstitutionTransformation, ChargeBalanceTransformation, \
SubstitutionPredictorTransformation, MagOrderingTransformation, \
DopingTransformation, _find_codopant, SlabTransformation
from monty.os.path import which
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.energy_models import IsingModel
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.surface import SlabGenerator
from pymatgen.analysis.energy_models import SymmetryModel
from pymatgen.util.testing import PymatgenTest
"""
Created on Jul 24, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 24, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
def get_table():
"""
Loads a lightweight lambda table for use in unit tests to reduce
initialization time, and make unit tests insensitive to changes in the
default lambda table.
"""
data_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', 'struct_predictor')
json_file = os.path.join(data_dir, 'test_lambda.json')
with open(json_file) as f:
lambda_table = json.load(f)
return lambda_table
enumlib_present = which('enum.x') and which('makestr.x')
class SuperTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
tl = [SubstitutionTransformation({"Li+": "Na+"}),
SubstitutionTransformation({"Li+": "K+"})]
t = SuperTransformation(tl)
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "Li+", "Li+",
"O2-", "O2-"], coords)
s = t.apply_transformation(struct, return_ranked_list=True)
for s_and_t in s:
self.assertEqual(s_and_t['transformation']
.apply_transformation(struct),
s_and_t['structure'])
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
def test_apply_transformation_mult(self):
# Test returning multiple structures from each transformation.
disord = Structure(np.eye(3) * 4.209, [{"Cs+": 0.5, "K+": 0.5}, "Cl-"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
disord.make_supercell([2, 2, 1])
tl = [EnumerateStructureTransformation(),
OrderDisorderedStructureTransformation()]
t = SuperTransformation(tl, nstructures_per_trans=10)
self.assertEqual(len(t.apply_transformation(disord,
return_ranked_list=20)), 8)
t = SuperTransformation(tl)
self.assertEqual(len(t.apply_transformation(disord,
return_ranked_list=20)), 2)
class MultipleSubstitutionTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
sub_dict = {1: ["Na", "K"]}
t = MultipleSubstitutionTransformation("Li+", 0.5, sub_dict, None)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
self.assertEqual(len(t.apply_transformation(struct,
return_ranked_list=True)),
2)
class ChargeBalanceTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = ChargeBalanceTransformation('Li+')
coords = list()
coords.append([0, 0, 0])
coords.append([0.375, 0.375, 0.375])
coords.append([.5, .5, .5])
coords.append([0.875, 0.875, 0.875])
coords.append([0.125, 0.125, 0.125])
coords.append([0.25, 0.25, 0.25])
coords.append([0.625, 0.625, 0.625])
coords.append([0.75, 0.75, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "Li+", "Li+", "Li+", "Li+",
"O2-", "O2-"], coords)
s = t.apply_transformation(struct)
self.assertAlmostEqual(s.charge, 0, 5)
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class EnumerateStructureTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
enum_trans = EnumerateStructureTransformation(refine_structure=True)
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
struct = p.structure
expected_ans = [1, 3, 1]
for i, frac in enumerate([0.25, 0.5, 0.75]):
trans = SubstitutionTransformation({'Fe': {'Fe': frac}})
s = trans.apply_transformation(struct)
oxitrans = OxidationStateDecorationTransformation(
{'Li': 1, 'Fe': 2, 'P': 5, 'O': -2})
s = oxitrans.apply_transformation(s)
alls = enum_trans.apply_transformation(s, 100)
self.assertEqual(len(alls), expected_ans[i])
self.assertIsInstance(trans.apply_transformation(s), Structure)
for s in alls:
self.assertIn("energy", s)
# make sure it works for non-oxidation state decorated structure
trans = SubstitutionTransformation({'Fe': {'Fe': 0.5}})
s = trans.apply_transformation(struct)
alls = enum_trans.apply_transformation(s, 100)
self.assertEqual(len(alls), 3)
self.assertIsInstance(trans.apply_transformation(s), Structure)
for s in alls:
self.assertNotIn("energy", s)
def test_occu_tol(self):
s = PymatgenTest.get_structure("Li2O")
s["Li+"] = {"Li+": 0.48}
trans = EnumerateStructureTransformation(occu_tol=4)
ss = trans.apply_transformation(s, return_ranked_list=10)
self.assertEqual(len(ss), 1)
s = PymatgenTest.get_structure("Li2O")
s["Li+"] = {"Li+": 0.24}
trans = EnumerateStructureTransformation(max_cell_size=2, occu_tol=4)
ss = trans.apply_transformation(s, return_ranked_list=10)
self.assertEqual(len(ss), 3)
def test_to_from_dict(self):
trans = EnumerateStructureTransformation()
d = trans.as_dict()
trans = EnumerateStructureTransformation.from_dict(d)
self.assertEqual(trans.symm_prec, 0.1)
class SubstitutionPredictorTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = SubstitutionPredictorTransformation(threshold=1e-3, alpha=-5,
lambda_table=get_table())
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ['O2-', 'Li1+', 'Li1+'], coords)
outputs = t.apply_transformation(struct, return_ranked_list=True)
self.assertEqual(len(outputs), 4, 'incorrect number of structures')
def test_as_dict(self):
t = SubstitutionPredictorTransformation(threshold=2, alpha=-2,
lambda_table=get_table())
d = t.as_dict()
t = SubstitutionPredictorTransformation.from_dict(d)
self.assertEqual(t.threshold, 2,
'incorrect threshold passed through dict')
self.assertEqual(t._substitutor.p.alpha, -2,
'incorrect alpha passed through dict')
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class MagOrderingTransformationTest(PymatgenTest):
def test_apply_transformation(self):
trans = MagOrderingTransformation({"Fe": 5})
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
s = p.structure
alls = trans.apply_transformation(s, 10)
self.assertEqual(len(alls), 3)
f = SpacegroupAnalyzer(alls[0]["structure"], 0.1)
self.assertEqual(f.get_space_group_number(), 31)
model = IsingModel(5, 5)
trans = MagOrderingTransformation({"Fe": 5},
energy_model=model)
alls2 = trans.apply_transformation(s, 10)
# Ising model with +J penalizes similar neighbor magmom.
self.assertNotEqual(alls[0]["structure"], alls2[0]["structure"])
self.assertEqual(alls[0]["structure"], alls2[2]["structure"])
s = self.get_structure('Li2O')
# Li2O doesn't have magnetism of course, but this is to test the
# enumeration.
trans = MagOrderingTransformation({"Li+": 1}, max_cell_size=3)
alls = trans.apply_transformation(s, 100)
self.assertEqual(len(alls), 12)
def test_ferrimagnetic(self):
trans = MagOrderingTransformation({"Fe": 5}, 0.75, max_cell_size=1)
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),
check_for_POTCAR=False)
s = p.structure
alls = trans.apply_transformation(s, 10)
self.assertEqual(len(alls), 2)
def test_as_from_dict(self):
trans = MagOrderingTransformation({"Fe": 5}, 0.75)
d = trans.as_dict()
# Check json encodability
s = json.dumps(d)
trans = MagOrderingTransformation.from_dict(d)
self.assertEqual(trans.mag_species_spin, {"Fe": 5})
self.assertIsInstance(trans.energy_model, SymmetryModel)
def test_zero_spin_case(self):
# ensure that zero spin case maintains sites and formula
s = self.get_structure('Li2O')
trans = MagOrderingTransformation({"Li+": 0.0}, 0.5)
alls = trans.apply_transformation(s)
# Ensure s does not have a spin property
self.assertFalse('spin' in s.sites[1].specie._properties)
# ensure sites are assigned a spin property in alls
self.assertTrue('spin' in alls.sites[1].specie._properties)
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class DopingTransformationTest(PymatgenTest):
def test_apply_transformation(self):
structure = PymatgenTest.get_structure("LiFePO4")
t = DopingTransformation("Ca2+", min_length=10)
ss = t.apply_transformation(structure, 100)
self.assertEqual(len(ss), 1)
t = DopingTransformation("Al3+", min_length=15, ionic_radius_tol=0.1)
ss = t.apply_transformation(structure, 100)
self.assertEqual(len(ss), 0)
# Aliovalent doping with vacancies
for dopant, nstructures in [("Al3+", 4), ("N3-", 420), ("Cl-", 16)]:
t = DopingTransformation(dopant, min_length=4, alio_tol=1,
max_structures_per_enum=1000)
ss = t.apply_transformation(structure, 1000)
self.assertEqual(len(ss), nstructures)
for d in ss:
self.assertEqual(d["structure"].charge, 0)
# Aliovalent doping with codopant
for dopant, nstructures in [("Al3+", 3), ("N3-", 60), ("Cl-", 60)]:
t = DopingTransformation(dopant, min_length=4, alio_tol=1,
codopant=True,
max_structures_per_enum=1000)
ss = t.apply_transformation(structure, 1000)
self.assertEqual(len(ss), nstructures)
for d in ss:
self.assertEqual(d["structure"].charge, 0)
# Make sure compensation is done with lowest oxi state
structure = PymatgenTest.get_structure("SrTiO3")
t = DopingTransformation("Nb5+", min_length=5, alio_tol=1,
max_structures_per_enum=1000,
allowed_doping_species=["Ti4+"])
ss = t.apply_transformation(structure, 1000)
self.assertEqual(len(ss), 3)
for d in ss:
self.assertEqual(d["structure"].formula, "Sr7 Ti6 Nb2 O24")
def test_as_from_dict(self):
trans = DopingTransformation("Al3+", min_length=5, alio_tol=1,
codopant=False, max_structures_per_enum=1)
d = trans.as_dict()
# Check json encodability
s = json.dumps(d)
trans = DopingTransformation.from_dict(d)
self.assertEqual(str(trans.dopant), "Al3+")
self.assertEqual(trans.max_structures_per_enum, 1)
def test_find_codopant(self):
self.assertEqual(_find_codopant(Specie("Fe", 2), 1), Specie("Cu", 1))
self.assertEqual(_find_codopant(Specie("Fe", 2), 3), Specie("In", 3))
class SlabTransformationTest(PymatgenTest):
def test_apply_transformation(self):
s = self.get_structure("LiFePO4")
trans = SlabTransformation([0, 0, 1], 10, 10, shift=0.25)
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
slab_from_gen = gen.get_slab(0.25)
slab_from_trans = trans.apply_transformation(s)
self.assertArrayAlmostEqual(slab_from_gen.lattice.matrix,
slab_from_trans.lattice.matrix)
self.assertArrayAlmostEqual(slab_from_gen.cart_coords,
slab_from_trans.cart_coords)
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
trans = SlabTransformation([1, 1, 1], 10, 10)
slab_from_trans = trans.apply_transformation(fcc)
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10)
slab_from_gen = gen.get_slab()
self.assertArrayAlmostEqual(slab_from_gen.lattice.matrix,
slab_from_trans.lattice.matrix)
self.assertArrayAlmostEqual(slab_from_gen.cart_coords,
slab_from_trans.cart_coords)
if __name__ == "__main__":
unittest.main()
| |
# vim:fileencoding=utf-8:noet
from __future__ import unicode_literals, absolute_import
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import sys
import errno
from time import sleep
from threading import RLock
from powerline.lib.monotonic import monotonic
from powerline.lib.inotify import INotify, INotifyError
def realpath(path):
return os.path.abspath(os.path.realpath(path))
class INotifyWatch(INotify):
is_stat_based = False
def __init__(self, expire_time=10):
super(INotifyWatch, self).__init__()
self.watches = {}
self.modified = {}
self.last_query = {}
self.lock = RLock()
self.expire_time = expire_time * 60
def expire_watches(self):
now = monotonic()
for path, last_query in tuple(self.last_query.items()):
if last_query - now > self.expire_time:
self.unwatch(path)
def process_event(self, wd, mask, cookie, name):
if wd == -1 and (mask & self.Q_OVERFLOW):
# We missed some INOTIFY events, so we dont
# know the state of any tracked files.
for path in tuple(self.modified):
if os.path.exists(path):
self.modified[path] = True
else:
self.watches.pop(path, None)
self.modified.pop(path, None)
self.last_query.pop(path, None)
return
for path, num in tuple(self.watches.items()):
if num == wd:
if mask & self.IGNORED:
self.watches.pop(path, None)
self.modified.pop(path, None)
self.last_query.pop(path, None)
else:
if mask & self.ATTRIB:
# The watched file could have had its inode changed, in
# which case we will not get any more events for this
# file, so re-register the watch. For example by some
# other file being renamed as this file.
try:
self.unwatch(path)
except OSError:
pass
try:
self.watch(path)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
else:
self.modified[path] = True
else:
self.modified[path] = True
def unwatch(self, path):
''' Remove the watch for path. Raises an OSError if removing the watch
fails for some reason. '''
path = realpath(path)
with self.lock:
self.modified.pop(path, None)
self.last_query.pop(path, None)
wd = self.watches.pop(path, None)
if wd is not None:
if self._rm_watch(self._inotify_fd, wd) != 0:
self.handle_error()
def watch(self, path):
''' Register a watch for the file/directory named path. Raises an OSError if path
does not exist. '''
import ctypes
path = realpath(path)
with self.lock:
if path not in self.watches:
bpath = path if isinstance(path, bytes) else path.encode(self.fenc)
flags = self.MOVE_SELF | self.DELETE_SELF
buf = ctypes.c_char_p(bpath)
# Try watching path as a directory
wd = self._add_watch(self._inotify_fd, buf, flags | self.ONLYDIR)
if wd == -1:
eno = ctypes.get_errno()
if eno != errno.ENOTDIR:
self.handle_error()
# Try watching path as a file
flags |= (self.MODIFY | self.ATTRIB)
wd = self._add_watch(self._inotify_fd, buf, flags)
if wd == -1:
self.handle_error()
self.watches[path] = wd
self.modified[path] = False
def is_watched(self, path):
with self.lock:
return realpath(path) in self.watches
def __call__(self, path):
''' Return True if path has been modified since the last call. Can
raise OSError if the path does not exist. '''
path = realpath(path)
with self.lock:
self.last_query[path] = monotonic()
self.expire_watches()
if path not in self.watches:
# Try to re-add the watch, it will fail if the file does not
# exist/you dont have permission
self.watch(path)
return True
self.read(get_name=False)
if path not in self.modified:
# An ignored event was received which means the path has been
# automatically unwatched
return True
ans = self.modified[path]
if ans:
self.modified[path] = False
return ans
def close(self):
with self.lock:
for path in tuple(self.watches):
try:
self.unwatch(path)
except OSError:
pass
super(INotifyWatch, self).close()
class StatWatch(object):
is_stat_based = True
def __init__(self):
self.watches = {}
self.lock = RLock()
def watch(self, path):
path = realpath(path)
with self.lock:
self.watches[path] = os.path.getmtime(path)
def unwatch(self, path):
path = realpath(path)
with self.lock:
self.watches.pop(path, None)
def is_watched(self, path):
with self.lock:
return realpath(path) in self.watches
def __call__(self, path):
path = realpath(path)
with self.lock:
if path not in self.watches:
self.watches[path] = os.path.getmtime(path)
return True
mtime = os.path.getmtime(path)
if mtime != self.watches[path]:
self.watches[path] = mtime
return True
return False
def close(self):
with self.lock:
self.watches.clear()
def create_file_watcher(use_stat=False, expire_time=10):
'''
Create an object that can watch for changes to specified files. To use:
watcher = create_file_watcher()
watcher(path1) # Will return True if path1 has changed since the last time this was called. Always returns True the first time.
watcher.unwatch(path1)
Uses inotify if available, otherwise tracks mtimes. expire_time is the
number of minutes after the last query for a given path for the inotify
watch for that path to be automatically removed. This conserves kernel
resources.
'''
if use_stat:
return StatWatch()
try:
return INotifyWatch(expire_time=expire_time)
except INotifyError:
pass
return StatWatch()
if __name__ == '__main__':
watcher = create_file_watcher()
print ('Using watcher: %s' % watcher.__class__.__name__)
print ('Watching %s, press Ctrl-C to quit' % sys.argv[-1])
watcher.watch(sys.argv[-1])
try:
while True:
if watcher(sys.argv[-1]):
print ('%s has changed' % sys.argv[-1])
sleep(1)
except KeyboardInterrupt:
pass
watcher.close()
| |
import abc
import string
import bitstring
import logging
from .lifter_helper import ParseError
from .syntax_wrapper import VexValue
from ...expr import IRExpr, RdTmp
from .vex_helper import JumpKind, vex_int_class
l = logging.getLogger("instr")
class Instruction(metaclass=abc.ABCMeta):
"""
Base class for an Instruction.
You should make a subclass of this for each instruction you want to lift. These classes will contain the "semantics"
of the instruction, that is, what it _does_, in terms of the VEX IR.
You may want to subclass this for your architecture, and add arch-specific handling for parsing, argument
resolution, etc., and have instructions subclass that instead.
The core parsing functionality is done via ``bin_format``. Each instruction should be a subclass of ``Instruction``
and will be parsed by comparing bits in the provided bitstream to symbols in the ``bin_format`` member of the class.
"Bin formats" are strings of symbols, like those you'd find in an ISA document, such as "0010rrrrddddffmm"
0 or 1 specify hard-coded bits that must match for an instruction to match.
Any letters specify arguments, grouped by letter, which will be parsed and provided as bitstrings in the ``data``
member of the class as a dictionary.
So, in our example, the bits ``0010110101101001``, applied to format string ``0010rrrrddddffmm``
will result in the following in ``self.data``:
{'r': '1101',
'd': '0110',
'f': '10',
'm': '01'}
Implement compute_result to provide the "meat" of what your instruction does.
You can also implement it in your arch-specific subclass of ``Instruction``, to handle things common to all
instructions, and provide instruction implementations elsewhere.
We provide the ``VexValue`` syntax wrapper to make expressing instruction semantics easy.
You first convert the bitstring arguments into ``VexValue``s using the provided convenience methods
(``self.get/put/load/store/etc.``)
This loads the register from the actual registers into a temporary value we can work with.
You can then write it back to a register when you're done.
For example, if you have the register in ``r``, as above, you can make a ``VexValue`` like this:
r = int(self.data['r'], 2) # we get bits corresponding to `r` bits and convert it to an int
r_vv = self.get(r, Type.int_32)
If you then had an instruction to increment ``r``, you could simply:
return r_vv += 1
You could then write it back to the register like this:
self.put(r_vv, r)
Note that most architectures have special flags that get set differently for each instruction, make sure to
implement those as well (override ``set_flags()`` )
Override ``parse()`` to extend parsing.
For example, in MSP430, this allows us to grab extra words from the bitstream
when extra immediate words are present.
All architectures are different enough that there's no magic recipe for how to write a lifter.
See the examples provided by gymrat for ideas of how to use this to build your own lifters quickly and easily.
"""
data = None
irsb_c = None
def __init__(self, bitstrm, arch, addr):
"""
Create an instance of the instruction
:param irsb_c: The IRSBCustomizer to put VEX instructions into
:param bitstrm: The bitstream to decode instructions from
:param addr: The address of the instruction to be lifted, used only for jumps and branches
"""
self.addr = addr
self.arch = arch
self.bitwidth = len(self.bin_format)
self.data = self.parse(bitstrm)
@property
@abc.abstractmethod
def bin_format(self) -> str:
"""
Read the documentation of the class to understand what a bin format string is
:return: str bin format string
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""
Name of the instruction
Can be useful to name the instruction when there's an error related to it
"""
def __call__(self, irsb_c, past_instructions, future_instructions):
self.lift(irsb_c, past_instructions, future_instructions)
def mark_instruction_start(self):
self.irsb_c.imark(self.addr, self.bytewidth, 0)
def fetch_operands(self): # pylint: disable=no-self-use
"""
Get the operands out of memory or registers
Return a tuple of operands for the instruction
"""
return ()
def lift(self, irsb_c, past_instructions, future_instructions): # pylint: disable=unused-argument
"""
This is the main body of the "lifting" for the instruction.
This can/should be overridden to provide the general flow of how instructions in your arch work.
For example, in MSP430, this is:
- Figure out what your operands are by parsing the addressing, and load them into temporary registers
- Do the actual operation, and commit the result, if needed.
- Compute the flags
"""
self.irsb_c = irsb_c
# Always call this first!
self.mark_instruction_start()
# Then do the actual stuff.
inputs = self.fetch_operands()
retval = self.compute_result(*inputs) # pylint: disable=assignment-from-none
if retval is not None:
self.commit_result(retval)
vals = list(inputs) + [retval]
self.compute_flags(*vals)
def commit_result(self, res):
"""
This where the result of the operation is written to a destination.
This happens only if compute_result does not return None, and happens before compute_flags is called.
Override this to specify how to write out the result.
The results of fetch_operands can be used to resolve various addressing modes for the write outward.
A common pattern is to return a function from fetch_operands which will be called here to perform the write.
:param args: A tuple of the results of fetch_operands and compute_result
"""
def compute_result(self, *args): # pylint: disable=unused-argument,no-self-use
"""
This is where the actual operation performed by your instruction, excluding the calculation of flags, should be
performed. Return the VexValue of the "result" of the instruction, which may
be used to calculate the flags later.
For example, for a simple add, with arguments src and dst, you can simply write:
return src + dst:
:param args:
:return: A VexValue containing the "result" of the operation.
"""
return None
def compute_flags(self, *args):
"""
Most CPU architectures have "flags" that should be computed for many instructions.
Override this to specify how that happens. One common pattern is to define this method to call specifi methods
to update each flag, which can then be overriden in the actual classes for each instruction.
"""
def match_instruction(self, data, bitstrm): # pylint: disable=unused-argument,no-self-use
"""
Override this to extend the parsing functionality.
This is great for if your arch has instruction "formats" that have an opcode that has to match.
:param data:
:param bitstrm:
:return: data
"""
return data
def parse(self, bitstrm):
if self.arch.instruction_endness == 'Iend_LE':
# This arch stores its instructions in memory endian-flipped compared to the ISA.
# To enable natural lifter-writing, we let the user write them like in the manual, and correct for
# endness here.
instr_bits = self._load_le_instr(bitstrm, self.bitwidth)
else:
instr_bits = bitstrm.peek("bin:%d" % self.bitwidth)
data = {c: '' for c in self.bin_format if c in string.ascii_letters}
for c, b in zip(self.bin_format, instr_bits):
if c in '01':
if b != c:
raise ParseError('Mismatch between format bit %c and instruction bit %c' % (c, b))
elif c in string.ascii_letters:
data[c] += b
else:
raise ValueError('Invalid bin_format character %c' % c)
# Hook here for extra matching functionality
if hasattr(self, 'match_instruction'):
# Should raise if it's not right
self.match_instruction(data, bitstrm)
# Use up the bits once we're sure it's right
self.rawbits = bitstrm.read('hex:%d' % self.bitwidth)
# Hook here for extra parsing functionality (e.g., trailers)
if hasattr(self, '_extra_parsing'):
data = self._extra_parsing(data, bitstrm) # pylint: disable=no-member
return data
@property
def bytewidth(self):
if self.bitwidth % self.arch.byte_width != 0:
raise ValueError("Instruction is not a multiple of bytes wide!")
return self.bitwidth // self.arch.byte_width
def disassemble(self):
"""
Return the disassembly of this instruction, as a string.
Override this in subclasses.
:return: The address (self.addr), the instruction's name, and a list of its operands, as strings
"""
return self.addr, 'UNK', [self.rawbits]
# These methods should be called in subclasses to do register and memory operations
def load(self, addr, ty):
"""
Load a value from memory into a VEX temporary register.
:param addr: The VexValue containing the addr to load from.
:param ty: The Type of the resulting data
:return: a VexValue
"""
rdt = self.irsb_c.load(addr.rdt, ty)
return VexValue(self.irsb_c, rdt)
def constant(self, val, ty):
"""
Creates a constant as a VexValue
:param val: The value, as an integer
:param ty: The type of the resulting VexValue
:return: a VexValue
"""
if isinstance(val, VexValue) and not isinstance(val, IRExpr):
raise Exception('Constant cannot be made from VexValue or IRExpr')
rdt = self.irsb_c.mkconst(val, ty)
return VexValue(self.irsb_c, rdt)
@staticmethod
def _lookup_register(arch, reg):
if isinstance(reg, int):
if hasattr(arch, 'register_index'):
reg = arch.register_index[reg]
else:
reg = arch.register_list[reg].name
return arch.get_register_offset(reg)
def get(self, reg, ty):
"""
Load a value from a machine register into a VEX temporary register.
All values must be loaded out of registers before they can be used with operations, etc
and stored back into them when the instruction is over. See Put().
:param reg: Register number as an integer, or register string name
:param ty: The Type to use.
:return: A VexValue of the gotten value.
"""
offset = self._lookup_register(self.irsb_c.irsb.arch, reg)
if offset == self.irsb_c.irsb.arch.ip_offset:
return self.constant(self.addr, ty)
rdt = self.irsb_c.rdreg(offset, ty)
return VexValue(self.irsb_c, rdt)
def put(self, val, reg):
"""
Puts a value from a VEX temporary register into a machine register.
This is how the results of operations done to registers get committed to the machine's state.
:param val: The VexValue to store (Want to store a constant? See Constant() first)
:param reg: The integer register number to store into, or register name
:return: None
"""
offset = self._lookup_register(self.irsb_c.irsb.arch, reg)
self.irsb_c.put(val.rdt, offset)
def put_conditional(self, cond, valiftrue, valiffalse, reg):
"""
Like put, except it checks a condition
to decide what to put in the destination register.
:param cond: The VexValue representing the logical expression for the condition
(if your expression only has constants, don't use this method!)
:param valiftrue: the VexValue to put in reg if cond evals as true
:param validfalse: the VexValue to put in reg if cond evals as false
:param reg: The integer register number to store into, or register name
:return: None
"""
val = self.irsb_c.ite(cond.rdt, valiftrue.rdt, valiffalse.rdt)
offset = self._lookup_register(self.irsb_c.irsb.arch, reg)
self.irsb_c.put(val, offset)
def store(self, val, addr):
"""
Store a VexValue in memory at the specified loaction.
:param val: The VexValue of the value to store
:param addr: The VexValue of the address to store into
:return: None
"""
self.irsb_c.store(addr.rdt, val.rdt)
def jump(self, condition, to_addr, jumpkind=JumpKind.Boring, ip_offset=None):
"""
Jump to a specified destination, under the specified condition.
Used for branches, jumps, calls, returns, etc.
:param condition: The VexValue representing the expression for the guard, or None for an unconditional jump
:param to_addr: The address to jump to.
:param jumpkind: The JumpKind to use. See the VEX docs for what these are; you only need them for things
aren't normal jumps (e.g., calls, interrupts, program exits, etc etc)
:return: None
"""
to_addr_ty = None
if isinstance(to_addr, VexValue):
# Unpack a VV
to_addr_rdt = to_addr.rdt
to_addr_ty = to_addr.ty
elif isinstance(to_addr, int):
# Direct jump to an int, make an RdT and Ty
to_addr_ty = vex_int_class(self.irsb_c.irsb.arch.bits).type
to_addr = self.constant(to_addr, to_addr_ty) # TODO archinfo may be changing
to_addr_rdt = to_addr.rdt
elif isinstance(to_addr, RdTmp):
# An RdT; just get the Ty of the arch's pointer type
to_addr_ty = vex_int_class(self.irsb_c.irsb.arch.bits).type
to_addr_rdt = to_addr
else:
raise TypeError("Jump destination has unknown type: " + repr(type(to_addr)))
if not condition:
# This is the default exit.
self.irsb_c.irsb.jumpkind = jumpkind
self.irsb_c.irsb.next = to_addr_rdt
else:
# add another exit
# EDG says: We should make sure folks set ArchXYZ.ip_offset like they're supposed to
if ip_offset is None:
ip_offset = self.arch.ip_offset
assert ip_offset is not None
negated_condition_rdt = self.ite(condition, self.constant(0, condition.ty), self.constant(1, condition.ty))
direct_exit_target = self.constant(self.addr + (self.bitwidth // 8), to_addr_ty)
self.irsb_c.add_exit(negated_condition_rdt, direct_exit_target.rdt, jumpkind, ip_offset)
self.irsb_c.irsb.jumpkind = jumpkind
self.irsb_c.irsb.next = to_addr_rdt
def ite(self, cond, t, f):
return self.irsb_c.ite(cond.rdt, t.rdt, f.rdt)
def ccall(self, ret_type, func_obj, args):
"""
Creates a CCall operation.
A CCall is a procedure that calculates a value at *runtime*, not at lift-time.
You can use these for flags, unresolvable jump targets, etc.
We caution you to avoid using them when at all possible though.
For an example of how to write and use a CCall, see gymrat/bf/lift_bf.py
:param ret_type: The return type of the CCall
:param func_obj: The function object to eventually call.
:param args: List of arguments to the function
:return: A VexValue of the result.
"""
# HACK: FIXME: If you're reading this, I'm sorry. It's truly a crime against Python...
from angr.engines.vex.claripy import ccall
# Check the args to make sure they're the right type
list_args = list(args)
new_args = []
for arg in list_args:
if isinstance(arg, VexValue):
arg = arg.rdt
new_args.append(arg)
args = tuple(new_args)
# Support calling ccalls via string name
if isinstance(func_obj, str):
func_obj = getattr(ccall, func_obj)
else:
# ew, monkey-patch in the user-provided CCall
if not hasattr(ccall, func_obj.__name__):
setattr(ccall, func_obj.__name__, func_obj)
cc = self.irsb_c.op_ccall(ret_type, func_obj.__name__, args)
return VexValue(self.irsb_c, cc)
def _load_le_instr(self, bitstream: bitstring.ConstBitStream, numbits: int) -> str:
return bitstring.Bits(uint=bitstream.peek("uintle:%d" % numbits), length=numbits).bin
| |
"""
Custom Authenticator to use GitHub OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import json
import os
import urllib
from tornado.auth import OAuth2Mixin
from tornado.escape import url_escape
from tornado import gen, web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.handlers import BaseHandler
from jupyterhub.auth import Authenticator, LocalAuthenticator
from jupyterhub.utils import url_path_join
from traitlets import Unicode, Set
class GitHubMixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = "https://github.com/login/oauth/authorize"
_OAUTH_ACCESS_TOKEN_URL = "https://github.com/login/oauth/access_token"
class BitbucketMixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = "https://bitbucket.org/site/oauth2/authorize"
_OAUTH_ACCESS_TOKEN_URL = "https://bitbucket.org/site/oauth2/access_token"
class WelcomeHandler(BaseHandler):
"""Render the login page."""
def _render(self, login_error=None, username=None):
return self.render_template('login.html',
next=url_escape(self.get_argument('next', default='')),
username=username,
login_error=login_error,
)
def get(self):
next_url = self.get_argument('next', '')
if not next_url.startswith('/'):
# disallow non-absolute next URLs (e.g. full URLs)
next_url = ''
user = self.get_current_user()
if user:
if not next_url:
if user.running:
next_url = user.server.base_url
else:
next_url = self.hub.server.base_url
# set new login cookie
# because single-user cookie may have been cleared or incorrect
#self.set_login_cookie(self.get_current_user())
self.redirect('/oauth_login', permanent=False)
else:
self.finish(self._render())
class OAuthLoginHandler(BaseHandler):
def get(self):
guess_uri = '{proto}://{host}{path}'.format(
proto=self.request.protocol,
host=self.request.host,
path=url_path_join(
self.hub.server.base_url,
'oauth_callback'
)
)
redirect_uri = self.authenticator.oauth_callback_url or guess_uri
self.log.info('oauth redirect: %r', redirect_uri)
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=[],
response_type='code')
class GitHubLoginHandler(OAuthLoginHandler, GitHubMixin):
pass
class BitbucketLoginHandler(OAuthLoginHandler, BitbucketMixin):
pass
class GitHubOAuthHandler(BaseHandler):
@gen.coroutine
def get(self):
# TODO: Check if state argument needs to be checked
username = yield self.authenticator.authenticate(self)
if username:
user = self.user_from_username(username)
self.set_login_cookie(user)
self.redirect(self.hub.server.base_url)
else:
# todo: custom error page?
raise web.HTTPError(403)
class BitbucketOAuthHandler(GitHubOAuthHandler):
pass
class GitHubOAuthenticator(Authenticator):
login_service = "GitHub"
oauth_callback_url = Unicode('', config=True)
client_id = Unicode(os.environ.get('GITHUB_CLIENT_ID', ''),
config=True)
client_secret = Unicode(os.environ.get('GITHUB_CLIENT_SECRET', ''),
config=True)
def login_url(self, base_url):
return url_path_join(base_url, 'login')
def get_handlers(self, app):
return [
(r'/login', WelcomeHandler),
(r'/oauth_login', GitHubLoginHandler),
(r'/oauth_callback', GitHubOAuthHandler),
]
@gen.coroutine
def authenticate(self, handler):
code = handler.get_argument("code", False)
if not code:
raise web.HTTPError(400, "oauth callback made without a token")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a GitHub Access Token
#
# See: https://developer.github.com/v3/oauth/
# GitHub specifies a POST request yet requires URL parameters
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
code=code
)
url = url_concat("https://github.com/login/oauth/access_token",
params)
req = HTTPRequest(url,
method="POST",
headers={"Accept": "application/json"},
body='' # Body is required for a POST...
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# Determine who the logged in user is
headers={"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "token {}".format(access_token)
}
req = HTTPRequest("https://api.github.com/user",
method="GET",
headers=headers
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["login"]
if self.whitelist and username not in self.whitelist:
username = None
raise gen.Return(username)
class BitbucketOAuthenticator(Authenticator):
login_service = "Bitbucket"
oauth_callback_url = Unicode(os.environ.get('OAUTH_CALLBACK_URL', ''),
config=True)
client_id = Unicode(os.environ.get('BITBUCKET_CLIENT_ID', ''),
config=True)
client_secret = Unicode(os.environ.get('BITBUCKET_CLIENT_SECRET', ''),
config=True)
team_whitelist = Set(
config=True,
help="Automatically whitelist members of selected teams",
)
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def get_handlers(self, app):
return [
(r'/oauth_login', BitbucketLoginHandler),
(r'/oauth_callback', BitbucketOAuthHandler),
]
@gen.coroutine
def authenticate(self, handler):
code = handler.get_argument("code", False)
if not code:
raise web.HTTPError(400, "oauth callback made without a token")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
grant_type="authorization_code",
code=code,
redirect_uri=self.oauth_callback_url
)
url = url_concat(
"https://bitbucket.org/site/oauth2/access_token", params)
self.log.info(url)
bb_header = {"Content-Type":
"application/x-www-form-urlencoded;charset=utf-8"}
req = HTTPRequest(url,
method="POST",
auth_username=self.client_id,
auth_password=self.client_secret,
body=urllib.parse.urlencode(params).encode('utf-8'),
headers=bb_header
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# Determine who the logged in user is
headers = {"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}".format(access_token)
}
req = HTTPRequest("https://api.bitbucket.org/2.0/user",
method="GET",
headers=headers
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["username"]
whitelisted = yield self.check_whitelist(username, headers)
if not whitelisted:
username = None
return username
def check_whitelist(self, username, headers):
if self.team_whitelist:
return self._check_group_whitelist(username, headers)
else:
return self._check_user_whitelist(username)
@gen.coroutine
def _check_user_whitelist(self, user):
return (not self.whitelist) or (user in self.whitelist)
@gen.coroutine
def _check_group_whitelist(self, username, headers):
http_client = AsyncHTTPClient()
# We verify the team membership by calling teams endpoint.
# Re-use the headers, change the request.
next_page = url_concat("https://api.bitbucket.org/2.0/teams",
{'role': 'member'})
user_teams = set()
while next_page:
req = HTTPRequest(next_page, method="GET", headers=headers)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
next_page = resp_json.get('next', None)
user_teams |= \
set([entry["username"] for entry in resp_json["values"]])
return len(self.team_whitelist & user_teams) > 0
class LocalGitHubOAuthenticator(LocalAuthenticator, GitHubOAuthenticator):
"""A version that mixes in local system user creation"""
pass
class LocalBitbucketOAuthenticator(LocalAuthenticator,
BitbucketOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| |
#!/usr/bin/env python3
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
Extra supported commands are:
* gen, to generate the classes required for Telethon to run or docs
* pypi, to generate sdist, bdist_wheel, and push to PyPi
"""
import itertools
import json
import os
import re
import shutil
import sys
from pathlib import Path
from subprocess import run
from setuptools import find_packages, setup
# Needed since we're importing local files
sys.path.insert(0, os.path.dirname(__file__))
class TempWorkDir:
"""Switches the working directory to be the one on which this file lives,
while within the 'with' block.
"""
def __init__(self, new=None):
self.original = None
self.new = new or str(Path(__file__).parent.resolve())
def __enter__(self):
# os.chdir does not work with Path in Python 3.5.x
self.original = str(Path('.').resolve())
os.makedirs(self.new, exist_ok=True)
os.chdir(self.new)
return self
def __exit__(self, *args):
os.chdir(self.original)
GENERATOR_DIR = Path('telethon_generator')
LIBRARY_DIR = Path('telethon')
ERRORS_IN = GENERATOR_DIR / 'data/errors.csv'
ERRORS_OUT = LIBRARY_DIR / 'errors/_generated.py'
METHODS_IN = GENERATOR_DIR / 'data/methods.csv'
# Which raw API methods are covered by *friendly* methods in the client?
FRIENDLY_IN = GENERATOR_DIR / 'data/friendly.csv'
TLOBJECT_IN_TLS = [Path(x) for x in sorted(GENERATOR_DIR.glob('data/*.tl'))]
TLOBJECT_OUT = LIBRARY_DIR / '_tl'
TLOBJECT_MOD = 'telethon._tl'
DOCS_IN_RES = GENERATOR_DIR / 'data/html'
DOCS_OUT = Path('docs')
def generate(which, action='gen'):
from telethon_generator.parsers import\
parse_errors, parse_methods, parse_tl, find_layer
from telethon_generator.generators import\
generate_errors, generate_tlobjects, generate_docs, clean_tlobjects
layer = next(filter(None, map(find_layer, TLOBJECT_IN_TLS)))
errors = list(parse_errors(ERRORS_IN))
methods = list(parse_methods(METHODS_IN, FRIENDLY_IN, {e.str_code: e for e in errors}))
tlobjects = list(itertools.chain(*(
parse_tl(file, layer, methods) for file in TLOBJECT_IN_TLS)))
if not which:
which.extend(('tl', 'errors'))
clean = action == 'clean'
action = 'Cleaning' if clean else 'Generating'
if 'all' in which:
which.remove('all')
for x in ('tl', 'errors', 'docs'):
if x not in which:
which.append(x)
if 'tl' in which:
which.remove('tl')
print(action, 'TLObjects...')
if clean:
clean_tlobjects(TLOBJECT_OUT)
else:
generate_tlobjects(tlobjects, layer, TLOBJECT_MOD, TLOBJECT_OUT)
if 'errors' in which:
which.remove('errors')
print(action, 'RPCErrors...')
if clean:
if ERRORS_OUT.is_file():
ERRORS_OUT.unlink()
else:
with ERRORS_OUT.open('w') as file:
generate_errors(errors, file)
if 'docs' in which:
which.remove('docs')
print(action, 'documentation...')
if clean:
if DOCS_OUT.is_dir():
shutil.rmtree(str(DOCS_OUT))
else:
in_path = DOCS_IN_RES.resolve()
with TempWorkDir(DOCS_OUT):
generate_docs(tlobjects, methods, layer, in_path)
if 'json' in which:
which.remove('json')
print(action, 'JSON schema...')
json_files = [x.with_suffix('.json') for x in TLOBJECT_IN_TLS]
if clean:
for file in json_files:
if file.is_file():
file.unlink()
else:
def gen_json(fin, fout):
meths = []
constructors = []
for tl in parse_tl(fin, layer):
if tl.is_function:
meths.append(tl.to_dict())
else:
constructors.append(tl.to_dict())
what = {'constructors': constructors, 'methods': meths}
with open(fout, 'w') as f:
json.dump(what, f, indent=2)
for fs in zip(TLOBJECT_IN_TLS, json_files):
gen_json(*fs)
if which:
print(
'The following items were not understood:', which,
'\n Consider using only "tl", "errors" and/or "docs".'
'\n Using only "clean" will clean them. "all" to act on all.'
'\n For instance "gen tl errors".'
)
def main(argv):
if len(argv) >= 2 and argv[1] in ('gen', 'clean'):
generate(argv[2:], argv[1])
elif len(argv) >= 2 and argv[1] == 'pypi':
# (Re)generate the code to make sure we don't push without it
generate(['tl', 'errors'])
# Try importing the telethon module to assert it has no errors
try:
import telethon
except:
print('Packaging for PyPi aborted, importing the module failed.')
return
remove_dirs = ['__pycache__', 'build', 'dist', 'Telethon.egg-info']
for root, _dirs, _files in os.walk(LIBRARY_DIR, topdown=False):
# setuptools is including __pycache__ for some reason (#1605)
if root.endswith('/__pycache__'):
remove_dirs.append(root)
for x in remove_dirs:
shutil.rmtree(x, ignore_errors=True)
run('python3 setup.py sdist', shell=True)
run('python3 setup.py bdist_wheel', shell=True)
run('twine upload dist/*', shell=True)
for x in ('build', 'dist', 'Telethon.egg-info'):
shutil.rmtree(x, ignore_errors=True)
else:
# e.g. install from GitHub
if GENERATOR_DIR.is_dir():
generate(['tl', 'errors'])
# Get the long description from the README file
with open('README.rst', 'r', encoding='utf-8') as f:
long_description = f.read()
with open('telethon/version.py', 'r', encoding='utf-8') as f:
version = re.search(r"^__version__\s*=\s*'(.*)'.*$",
f.read(), flags=re.MULTILINE).group(1)
setup(
name='Telethon',
version=version,
description="Full-featured Telegram client library for Python 3",
long_description=long_description,
url='https://github.com/LonamiWebs/Telethon',
download_url='https://github.com/LonamiWebs/Telethon/releases',
author='Lonami Exo',
author_email='totufals@hotmail.com',
license='MIT',
# See https://stackoverflow.com/a/40300957/4759433
# -> https://www.python.org/dev/peps/pep-0345/#requires-python
# -> http://setuptools.readthedocs.io/en/latest/setuptools.html
python_requires='>=3.7',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Communications :: Chat',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
keywords='telegram api chat client library messaging mtproto',
packages=find_packages(exclude=[
'telethon_*', 'tests*'
]),
install_requires=['pyaes', 'rsa'],
extras_require={
'cryptg': ['cryptg']
}
)
if __name__ == '__main__':
with TempWorkDir():
main(sys.argv)
| |
'''Test runner and result class for the regression test suite.
'''
import functools
import io
import sys
import time
import traceback
import unittest
# Brython: xml is not available
# import xml.etree.ElementTree as ET
from datetime import datetime
class RegressionTestResult(unittest.TextTestResult):
separator1 = '=' * 70 + '\n'
separator2 = '-' * 70 + '\n'
def __init__(self, stream, descriptions, verbosity):
super().__init__(stream=stream, descriptions=descriptions, verbosity=0)
self.buffer = True
self.__suite = ET.Element('testsuite')
self.__suite.set('start', datetime.utcnow().isoformat(' '))
self.__e = None
self.__start_time = None
self.__results = []
self.__verbose = bool(verbosity)
@classmethod
def __getId(cls, test):
try:
test_id = test.id
except AttributeError:
return str(test)
try:
return test_id()
except TypeError:
return str(test_id)
return repr(test)
def startTest(self, test):
super().startTest(test)
self.__e = e = ET.SubElement(self.__suite, 'testcase')
self.__start_time = time.perf_counter()
if self.__verbose:
self.stream.write(f'{self.getDescription(test)} ... ')
self.stream.flush()
def _add_result(self, test, capture=False, **args):
e = self.__e
self.__e = None
if e is None:
return
e.set('name', args.pop('name', self.__getId(test)))
e.set('status', args.pop('status', 'run'))
e.set('result', args.pop('result', 'completed'))
if self.__start_time:
e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}')
if capture:
if self._stdout_buffer is not None:
stdout = self._stdout_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-out').text = stdout
if self._stderr_buffer is not None:
stderr = self._stderr_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-err').text = stderr
for k, v in args.items():
if not k or not v:
continue
e2 = ET.SubElement(e, k)
if hasattr(v, 'items'):
for k2, v2 in v.items():
if k2:
e2.set(k2, str(v2))
else:
e2.text = str(v2)
else:
e2.text = str(v)
def __write(self, c, word):
if self.__verbose:
self.stream.write(f'{word}\n')
@classmethod
def __makeErrorDict(cls, err_type, err_value, err_tb):
if isinstance(err_type, type):
if err_type.__module__ == 'builtins':
typename = err_type.__name__
else:
typename = f'{err_type.__module__}.{err_type.__name__}'
else:
typename = repr(err_type)
msg = traceback.format_exception(err_type, err_value, None)
tb = traceback.format_exception(err_type, err_value, err_tb)
return {
'type': typename,
'message': ''.join(msg),
'': ''.join(tb),
}
def addError(self, test, err):
self._add_result(test, True, error=self.__makeErrorDict(*err))
super().addError(test, err)
self.__write('E', 'ERROR')
def addExpectedFailure(self, test, err):
self._add_result(test, True, output=self.__makeErrorDict(*err))
super().addExpectedFailure(test, err)
self.__write('x', 'expected failure')
def addFailure(self, test, err):
self._add_result(test, True, failure=self.__makeErrorDict(*err))
super().addFailure(test, err)
self.__write('F', 'FAIL')
def addSkip(self, test, reason):
self._add_result(test, skipped=reason)
super().addSkip(test, reason)
self.__write('S', f'skipped {reason!r}')
def addSuccess(self, test):
self._add_result(test)
super().addSuccess(test)
self.__write('.', 'ok')
def addUnexpectedSuccess(self, test):
self._add_result(test, outcome='UNEXPECTED_SUCCESS')
super().addUnexpectedSuccess(test)
self.__write('u', 'unexpected success')
def printErrors(self):
if self.__verbose:
self.stream.write('\n')
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavor, errors):
for test, err in errors:
self.stream.write(self.separator1)
self.stream.write(f'{flavor}: {self.getDescription(test)}\n')
self.stream.write(self.separator2)
self.stream.write('%s\n' % err)
def get_xml_element(self):
e = self.__suite
e.set('tests', str(self.testsRun))
e.set('errors', str(len(self.errors)))
e.set('failures', str(len(self.failures)))
return e
class QuietRegressionTestRunner:
def __init__(self, stream, buffer=False):
self.result = RegressionTestResult(stream, None, 0)
self.result.buffer = buffer
def run(self, test):
test(self.result)
return self.result
def get_test_runner_class(verbosity, buffer=False):
if verbosity:
return functools.partial(unittest.TextTestRunner,
resultclass=RegressionTestResult,
buffer=buffer,
verbosity=verbosity)
return functools.partial(QuietRegressionTestRunner, buffer=buffer)
def get_test_runner(stream, verbosity, capture_output=False):
return get_test_runner_class(verbosity, capture_output)(stream)
if __name__ == '__main__':
class TestTests(unittest.TestCase):
def test_pass(self):
pass
def test_pass_slow(self):
time.sleep(1.0)
def test_fail(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
self.fail('failure message')
def test_error(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
raise RuntimeError('error message')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTests))
stream = io.StringIO()
runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv))
runner = runner_cls(sys.stdout)
result = runner.run(suite)
print('Output:', stream.getvalue())
print('XML: ', end='')
for s in ET.tostringlist(result.get_xml_element()):
print(s.decode(), end='')
print()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from py4j.java_gateway import JavaClass
from pyspark.sql import since
from pyspark.sql.column import _to_seq
from pyspark.sql.types import *
__all__ = ["DataFrameReader", "DataFrameWriter"]
def to_str(value):
"""
A wrapper over str(), but convert bool values to lower case string
"""
if isinstance(value, bool):
return str(value).lower()
else:
return str(value)
class DataFrameReader(object):
"""
Interface used to load a :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`SQLContext.read`
to access this.
::Note: Experimental
.. versionadded:: 1.4
"""
def __init__(self, sqlContext):
self._jreader = sqlContext._ssql_ctx.read()
self._sqlContext = sqlContext
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._sqlContext)
@since(1.4)
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = sqlContext.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
@since(1.4)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a StructType object
"""
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
jschema = self._sqlContext._ssql_ctx.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
return self
@since(1.5)
def option(self, key, value):
"""Adds an input option for the underlying data source.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds input options for the underlying data source.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(1.4)
def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`StructType` for the input schema.
:param options: all other string options
>>> df = sqlContext.read.load('python/test_support/sql/parquet_partitioned', opt1=True,
... opt2=1, opt3='str')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if path is not None:
return self._df(self._jreader.load(path))
else:
return self._df(self._jreader.load())
@since(1.4)
def json(self, path, schema=None):
"""
Loads a JSON file (one object per line) and returns the result as
a :class`DataFrame`.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
:param path: string, path to the JSON dataset.
:param schema: an optional :class:`StructType` for the input schema.
>>> df = sqlContext.read.json('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
if schema is not None:
self.schema(schema)
return self._df(self._jreader.json(path))
@since(1.4)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:param tableName: string, name of the table.
>>> df = sqlContext.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.registerTempTable('tmpTable')
>>> sqlContext.read.table('tmpTable').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.table(tableName))
@since(1.4)
def parquet(self, *paths):
"""Loads a Parquet file, returning the result as a :class:`DataFrame`.
>>> df = sqlContext.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.parquet(_to_seq(self._sqlContext._sc, paths)))
@since(1.5)
def orc(self, path):
"""
Loads an ORC file, returning the result as a :class:`DataFrame`.
::Note: Currently ORC support is only available together with
:class:`HiveContext`.
>>> df = hiveContext.read.orc('python/test_support/sql/orc_partitioned')
>>> df.dtypes
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
"""
return self._df(self._jreader.orc(path))
@since(1.4)
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table accessible
via JDBC URL `url` named `table` and connection `properties`.
The `column` parameter could be used to partition the table, then it will
be retrieved in parallel based on the parameters passed to this function.
The `predicates` parameter gives a list expressions suitable for inclusion
in WHERE clauses; each one defines one partition of the :class:`DataFrame`.
::Note: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL
:param table: name of table
:param column: the column used to partition
:param lowerBound: the lower bound of partition column
:param upperBound: the upper bound of the partition column
:param numPartitions: the number of partitions
:param predicates: a list of expressions
:param properties: JDBC database connection arguments, a list of arbitrary string
tag/value. Normally at least a "user" and "password" property
should be included.
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._sqlContext._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
if numPartitions is None:
numPartitions = self._sqlContext._sc.defaultParallelism
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
arr = self._sqlContext._sc._jvm.PythonUtils.toArray(predicates)
return self._df(self._jreader.jdbc(url, table, arr, jprop))
return self._df(self._jreader.jdbc(url, table, jprop))
class DataFrameWriter(object):
"""
Interface used to write a [[DataFrame]] to external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`DataFrame.write`
to access this.
::Note: Experimental
.. versionadded:: 1.4
"""
def __init__(self, df):
self._df = df
self._sqlContext = df.sql_ctx
self._jwrite = df._jdf.write()
@since(1.4)
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
@since(1.4)
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
@since(1.5)
def option(self, key, value):
"""Adds an output option for the underlying data source.
"""
self._jwrite = self._jwrite.option(key, value)
return self
@since(1.4)
def options(self, **options):
"""Adds output options for the underlying data source.
"""
for k in options:
self._jwrite = self._jwrite.option(k, options[k])
return self
@since(1.4)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._sqlContext._sc, cols))
return self
@since(1.4)
def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path)
@since(1.4)
def insertInto(self, tableName, overwrite=False):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
@since(1.4)
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the [[DataFrame]] does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `ignore` (default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name)
@since(1.4)
def json(self, path, mode=None):
"""Saves the content of the :class:`DataFrame` in JSON format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)._jwrite.json(path)
@since(1.4)
def parquet(self, path, mode=None, partitionBy=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._jwrite.parquet(path)
def orc(self, path, mode=None, partitionBy=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
::Note: Currently ORC support is only available together with
:class:`HiveContext`.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
>>> orc_df = hiveContext.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._jwrite.orc(path)
@since(1.4)
def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to a external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;\
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param properties: JDBC database connection arguments, a list of
arbitrary string tag/value. Normally at least a
"user" and "password" property should be included.
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._sqlContext._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self._jwrite.mode(mode).jdbc(url, table, jprop)
def _test():
import doctest
import os
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, HiveContext
import pyspark.sql.readwriter
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.readwriter.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['hiveContext'] = HiveContext(sc)
globs['df'] = globs['sqlContext'].read.parquet('python/test_support/sql/parquet_partitioned')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.readwriter, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
# -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the S3 File System"""
# pytype: skip-file
from __future__ import absolute_import
import logging
import unittest
import mock
from apache_beam.io.aws.clients.s3 import messages
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import FileMetadata
from apache_beam.options.pipeline_options import PipelineOptions
# Protect against environments where boto3 library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.aws import s3filesystem
except ImportError:
s3filesystem = None # type: ignore[assignment]
# pylint: enable=wrong-import-order, wrong-import-position
@unittest.skipIf(s3filesystem is None, 'AWS dependencies are not installed')
class S3FileSystemTest(unittest.TestCase):
def setUp(self):
pipeline_options = PipelineOptions()
self.fs = s3filesystem.S3FileSystem(pipeline_options=pipeline_options)
def test_scheme(self):
self.assertEqual(self.fs.scheme(), 's3')
self.assertEqual(s3filesystem.S3FileSystem.scheme(), 's3')
def test_join(self):
self.assertEqual(
's3://bucket/path/to/file',
self.fs.join('s3://bucket/path', 'to', 'file'))
self.assertEqual(
's3://bucket/path/to/file', self.fs.join('s3://bucket/path', 'to/file'))
self.assertEqual(
's3://bucket/path/to/file',
self.fs.join('s3://bucket/path', '/to/file'))
self.assertEqual(
's3://bucket/path/to/file',
self.fs.join('s3://bucket/path/', 'to', 'file'))
self.assertEqual(
's3://bucket/path/to/file',
self.fs.join('s3://bucket/path/', 'to/file'))
self.assertEqual(
's3://bucket/path/to/file',
self.fs.join('s3://bucket/path/', '/to/file'))
with self.assertRaises(ValueError):
self.fs.join('/bucket/path/', '/to/file')
def test_split(self):
self.assertEqual(('s3://foo/bar', 'baz'), self.fs.split('s3://foo/bar/baz'))
self.assertEqual(('s3://foo', ''), self.fs.split('s3://foo/'))
self.assertEqual(('s3://foo', ''), self.fs.split('s3://foo'))
with self.assertRaises(ValueError):
self.fs.split('/no/s3/prefix')
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_match_multiples(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
s3io_mock.list_prefix.return_value = {
's3://bucket/file1': 1, 's3://bucket/file2': 2
}
expected_results = set([
FileMetadata('s3://bucket/file1', 1),
FileMetadata('s3://bucket/file2', 2)
])
match_result = self.fs.match(['s3://bucket/'])[0]
self.assertEqual(set(match_result.metadata_list), expected_results)
s3io_mock.list_prefix.assert_called_once_with('s3://bucket/')
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_match_multiples_limit(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
limit = 1
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
s3io_mock.list_prefix.return_value = {'s3://bucket/file1': 1}
expected_results = set([FileMetadata('s3://bucket/file1', 1)])
match_result = self.fs.match(['s3://bucket/'], [limit])[0]
self.assertEqual(set(match_result.metadata_list), expected_results)
self.assertEqual(len(match_result.metadata_list), limit)
s3io_mock.list_prefix.assert_called_once_with('s3://bucket/')
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_match_multiples_error(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
exception = IOError('Failed')
s3io_mock.list_prefix.side_effect = exception
with self.assertRaises(BeamIOError) as error:
self.fs.match(['s3://bucket/'])
self.assertIn('Match operation failed', str(error.exception))
s3io_mock.list_prefix.assert_called_once_with('s3://bucket/')
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_match_multiple_patterns(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
s3io_mock.list_prefix.side_effect = [
{
's3://bucket/file1': 1
},
{
's3://bucket/file2': 2
},
]
expected_results = [[FileMetadata('s3://bucket/file1', 1)],
[FileMetadata('s3://bucket/file2', 2)]]
result = self.fs.match(['s3://bucket/file1*', 's3://bucket/file2*'])
self.assertEqual([mr.metadata_list for mr in result], expected_results)
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_create(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
# Issue file copy
_ = self.fs.create('s3://bucket/from1', 'application/octet-stream')
s3io_mock.open.assert_called_once_with(
's3://bucket/from1', 'wb', mime_type='application/octet-stream')
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_open(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
# Issue file copy
_ = self.fs.open('s3://bucket/from1', 'application/octet-stream')
s3io_mock.open.assert_called_once_with(
's3://bucket/from1', 'rb', mime_type='application/octet-stream')
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_copy_file(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
sources = ['s3://bucket/from1', 's3://bucket/from2']
destinations = ['s3://bucket/to1', 's3://bucket/to2']
# Issue file copy
self.fs.copy(sources, destinations)
src_dest_pairs = list(zip(sources, destinations))
s3io_mock.copy_paths.assert_called_once_with(src_dest_pairs)
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_copy_file_error(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
sources = ['s3://bucket/from1', 's3://bucket/from2', 's3://bucket/from3']
destinations = ['s3://bucket/to1', 's3://bucket/to2']
# Issue file copy
with self.assertRaises(BeamIOError):
self.fs.copy(sources, destinations)
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_delete(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
s3io_mock.size.return_value = 0
files = [
's3://bucket/from1',
's3://bucket/from2',
's3://bucket/from3',
]
# Issue batch delete.
self.fs.delete(files)
s3io_mock.delete_paths.assert_called_once_with(files)
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_delete_error(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
problematic_directory = 's3://nonexistent-bucket/tree/'
exception = messages.S3ClientError('Not found', 404)
s3io_mock.delete_paths.return_value = {
problematic_directory: exception,
's3://bucket/object1': None,
's3://bucket/object2': None,
}
s3io_mock.size.return_value = 0
files = [
problematic_directory,
's3://bucket/object1',
's3://bucket/object2',
]
expected_results = {problematic_directory: exception}
# Issue batch delete.
with self.assertRaises(BeamIOError) as error:
self.fs.delete(files)
self.assertIn('Delete operation failed', str(error.exception))
self.assertEqual(error.exception.exception_details, expected_results)
s3io_mock.delete_paths.assert_called()
@mock.patch('apache_beam.io.aws.s3filesystem.s3io')
def test_rename(self, unused_mock_arg):
# Prepare mocks.
s3io_mock = mock.MagicMock()
s3filesystem.s3io.S3IO = lambda: s3io_mock # type: ignore[misc]
sources = ['s3://bucket/from1', 's3://bucket/from2']
destinations = ['s3://bucket/to1', 's3://bucket/to2']
# Issue file copy
self.fs.rename(sources, destinations)
src_dest_pairs = list(zip(sources, destinations))
s3io_mock.rename_files.assert_called_once_with(src_dest_pairs)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
#!/usr/bin/python
import argparse, csv, os, sys, re #std python imports
import numpy as np
from sklearn.ensemble import RandomForestClassifier #RF classifier from SKlearn
from sklearn.cross_validation import cross_val_score #validation stats from SKlearn
import itertools
import multiprocessing as mp #allows for parallelization of the classification to speed up script.
#########################
#Args
#########################
parser=argparse.ArgumentParser(description="Runs RandomForest classifier on WHAM output VCF files to classify structural variant type. Appends WC and WP flags for user to explore structural variant calls. The output is a VCF file written to standard out.")
parser.add_argument("VCF", type=str, help="User supplied VCF with WHAM variants; VCF needs AT field data")
parser.add_argument("training_matrix", type=str, help="training dataset for classifier derived from simulated read dataset")
parser.add_argument("--filter", type=str, help="optional arg for filtering type one of : ['sensitive', 'specific']; defaults to output all data if filtering if argument is not supplied.")
parser.add_argument("--proc", type=str, help="optional arg for number of proceses to run with classifier; higher thread number will increase speed of classifier; defaults to 1")
parser.add_argument("--minclassfreq", default=0, type=float, help="optional arg for minimum frequency required for classification, otherwise variant set as unknown (UNK). Default is to classify everything.")
arg=parser.parse_args()
#########################
#Functions
#########################
#class object for processing VCF files.
class vcf:
"""
class vcf generates an iterator for looping through a vcf
file. Can add various functionalities here to further process
the vcf in a number of ways
chunksize = number of lines to process at once for parallel applications.
"""
def __init__(self,file):
self.f = open(file,'r')
#proces all of the header lines of the vcf.
header = True #boolean to continye looping through header
info_boolean = False #need a boolean to trigger and append new INFO fields
while header:
self.line = self.f.readline()
line = self.line.strip()
line = line.split("\t") #split line on tabs
if line[0][0] == '#': #process header lines
if re.search("##FORMAT", line[0]) and info_boolean == False: #first instance of ##FORMAT..
#need to append new INFO fields for the corresponding data
print '##INFO=<ID=WC,Number=1,Type=String,Description="WHAM classifier variant type">'
print '##INFO=<ID=WP,Number=4,Type=Float,Description="WHAM probability estimate for each structural variant classification from RandomForest model">'
info_boolean = True #reset boolean to
print "\t".join( line ) #print results to stdout
else:
header = False #break out of the loop
def __iter__(self):
return self
def next(self, chunksize=5000):
cnt = 0 #boolean for chunking.
return_array = [] #initialize empty array to store line data.
#check here if we are currently on last line, and raise StopIteration to exit next()
if len(self.line) == 0: #
raise StopIteration
while cnt < chunksize:
line = self.line
if len( line ) == 0:
return( return_array )
break #break out of loop because we are at last line in file.
else:
return_array.append( line )
self.line = self.f.readline()
cnt += 1
return( return_array )
#parse the targets for ML. converts text list of classified data
#into a numerical dataset with links to the classified names
def parse_targets( target ):
"""
target = list of factors to be turned into numerical classifiers.
for machine learning classifiction. ie. converts INR, DEL, INV,
DUP into integer factors
"""
target = np.array(target) #convert to np array for ease in processing
names = np.unique( target ) #unique names of SV types (factors)
#now iterate through and classify to an integer for SKlearn
cnt = 0
target_numerical = np.zeros( target.shape[0] ) #generate empty dataset
for name in names:
idx = np.where( name == target )
target_numerical[ idx ] = cnt
cnt += 1
#setup return data structure
RV = {'names': names, 'target': target_numerical}
#can use index of SV type in 'names' to get text based variant
#call from 'target', where they've been converted to integers.
return( RV )
#method to run observed data through the trained model to output
#a vcf friendly return of classified variant call and the prediction
#probabilities for each call
def classify_data( _x, clf, names, minclassfreq=None ):
"""
_x = pass the col 8 from vcf
clf = machine learning object
names = string names, zero indexed of variant calls.
"""
_x = np.array(_x)
#pdb.set_trace()
# index = [16,17,18]
# _x = map(lambda(x):_x[x], index)
class_idx = int( clf.predict(_x) )#predict classifier. can link back to dataset['target_names']
prediction = names[ class_idx ] #lookup text based name for classification
class_probs = clf.predict_proba(_x)[0] #gives weights for your predictions 1:target_names
#if minclass is set and is below threshold, we change prediction to UKN - unknown
if minclassfreq and class_probs[class_idx] < minclassfreq:
prediction = "UKN" # set as unknown, not enough evidence for classification
#convert back to text comma separated list
class_str = ",".join( [ str(i) for i in class_probs ] )
#this is a terrible hack that make my soul hurt, but gets the job done for
# dels called dups.
#parse the two data fields into a string so they can be appended to the vcf file.
return_str = "WC=" + prediction + ";WP=" + class_str
return( return_str )
#A general parser that takes the data in VCF flag field and parses it into a
#dictionary data structure. Can then obtain whatever data needed by using
# RV['key']; ie. RV['GT'] ...
def parse_vcf_data( vdat ):
"""
vdat = string; column 8 from VCF file with INFO fields.
"""
#start by parsing the vcf data into a dictionary structure
#will be keyed by ["XX="] = data
dict = {}
vdat = vdat.split(";")
for v in vdat:
try:
v = v.split("=") #split key and data into list
except:
print "not valid VCF file"
dict[ v[0] ] = v[1] #setup dict struct
#return the dictionary structure data with info fields as keys.
return( dict )
#takes vcf field data and runs various filtering specs.
def run_filters( vdat, filtering = None ):
"""
vdat - dictionary of INFO field from VCF line
filtering - dictionary of fields to be filtered; defaults to None
Currently implemented for sensitive and specific. Can modify the
filters to return False anytime you want to not report results based
on filteirng criterion from the INFO field.
"""
pass_filt = True #will remain true until we do not satisfy some criterion
if filtering == None:
return( pass_filt ) #break out early
#sensitive is very perimssive
elif filtering == "sensitive":
if int( vdat['NC'] ) < 2:
pass_filt = False
return( pass_filt )
if pass_filt:
return( pass_filt )
#specific mapping is more restrictive on the filtering.
elif filtering == "specific":
if vdat['ED'] == 'nan':
pass_filt = False
return( pass_filt )
BE = vdat['BE'].split(',')
if int(BE[-1]) < 2:
pass_filt = False
return( pass_filt )
if int( vdat['NC'] ) < 3:
pass_filt = False
return( pass_filt )
if pass_filt:
return( pass_filt )
#elif filtering == "user_defined":
# ....
else:
raise ValueError('Not a valid --filter argumuent\n please try running with --help arg for instructions')
#fuction will process line information and classify variant for a line in VCF file.
def process_vcf( info ):
"""
pass izip object of line object and other needed vars
info[0] = list of vcf lines from VCF object iterator.
info[1] = clf object
info[2] = dataset dictionary
info[3] = filter arg supplied by user
info[4] = min classification frequency supplied by user (defaults to None)
"""
#sys.stderr.write("... running process VCF with job id %d \n" %(os.getpid() ) )
#parse the args to function
line_list = info[0] #list of lines from VCF obj
clf = info[1] #randomForest object
dataset = info[2] #dataset with class names
filter = info[3] #filter arg supplied by user
minclassfreq = info[4]
#iterate over lines in the chunked data
return_list = []
for line in line_list:
line = line.strip().split("\t")
vdat = parse_vcf_data( line[7] ) #parse all of vcf appended data
filter_bool = run_filters( vdat, filtering=filter ) #boolean of whether line info passes filters
if filter_bool:
_x = vdat[ 'AT' ].split(",") #create list from data in 'AT' field
_x = _x[1:]
#results = classify_data( _x, clf, dataset['target_names'] )
results = classify_data( _x, clf, dataset['target_names'], minclassfreq )
line[7] = line[7] + ";" + results #append data to correct vcf column
#print "\t".join( line ) #print results to stdout
print_line = "\t".join( line )
return_list.append( print_line )
else:
return_list.append( None )
#return the full list of updated line data
return( return_list )
#########################
#MAIN
#########################
###########
#import and assign training data
###########
#all sklearn data will be in 2D array [ nsamples X nfeatures]
sys.stderr.write("processing training file... \n" )
#iterate over training file. select out the numerical and classifier data
data = []
target = []
with open(arg.training_matrix) as t:
for line in csv.reader(t,delimiter='\t'):
if line[0][0] == "#": #add in this statemnt to print error if user supplies files in wrong order.
raise ValueError('not a valid WHAM training file. perhaps you supplied arguments in the wrong order? \n please try running with --help arg for instructions')
target.append( line[-1] ) #always have targets [classified SV] as last column
#exclude first attribute
d = [ float(i) for i in line[1:-1] ]
data.append( d )
#populate the training dataset in sciKitLearn friendly structure.
dataset = {} #empty data
dataset[ 'data' ] = np.array( data ) #all training data into 2-D array
#turn our target list into integers and return target names
target_parse = parse_targets( target )
dataset[ 'target' ] = np.array( target_parse['target'] )
dataset[ 'target_names' ] = np.array( target_parse['names'] )
###########
#random forest classification
###########
#setup inital params
clf = RandomForestClassifier( n_estimators=500 )
#run RFC on dataset with target classifiers; runs the model fit
clf = clf.fit( dataset['data'], dataset['target'] )
######
#run some sanity checks here.
######
training_stats = clf.feature_importances_ #array of variable importances for model.
#print training stats to user
train_list = [ str(i) for i in training_stats ] #convert to str for printing to user.
sys.stderr.write("\t Training weights for RandomForest classifier \n\t N = %d training variables\n" %( len(train_list) ) )
sys.stderr.write("\t %s\n" %( "\t".join( train_list ) ) )
#need cross validation here. uses sklearn.cross_validation
scores = cross_val_score( clf, dataset['data'], dataset['target'] )
avg_val = scores.mean() * 100 #average cross validation levels
sys.stderr.write("\t results from cross validation:\n\t %f%s \n" %( avg_val, '%' ) )
######
#prediction and output
######
sys.stderr.write("processing VCF file through classifier... \n" )
sys.stderr.write("...running parent process with job id %d \n can use this ID to exit \n" %(os.getpid() ) )
sys.stderr.write("minclassfreq var is set to = %f \n" %( arg.minclassfreq ) )
#load VCF file into class obj
vcf_file = vcf(arg.VCF)
#parse the number of processes to enact
if arg.proc == None:
proc_num = 1
else:
proc_num = int( arg.proc )
###
#setup multiprocessing for the classification of SVs
###
p = mp.Pool( processes = proc_num )
results = p.imap(process_vcf, itertools.izip( vcf_file, itertools.repeat(clf), itertools.repeat(dataset), itertools.repeat(arg.filter), itertools.repeat(arg.minclassfreq) ) )
#iterate over the results and feed to stdout
for r in results:
for rv in r: #iterate over the list of returned results
if rv != None: #only print results that pass filtering specs.
print rv #write output to std out
#final output to std err that the run has finished.
sys.stderr.write("...classifier finished \n" )
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class policystringmap(base_resource) :
""" Configuration for string map resource. """
def __init__(self) :
self._name = ""
self._comment = ""
self.___count = 0
@property
def name(self) :
"""Unique name for the string map. Not case sensitive. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Must not begin with 're' or 'xp' or be a word reserved for use as a default syntax expression qualifier prefix (such as HTTP) or enumeration value (such as ASCII). Must not be the name of an existing named expression, pattern set, dataset, string map, or HTTP callout.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Unique name for the string map. Not case sensitive. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Must not begin with 're' or 'xp' or be a word reserved for use as a default syntax expression qualifier prefix (such as HTTP) or enumeration value (such as ASCII). Must not be the name of an existing named expression, pattern set, dataset, string map, or HTTP callout.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def comment(self) :
"""Comments associated with the string map.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Comments associated with the string map.
"""
try :
self._comment = comment
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(policystringmap_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.policystringmap
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add policystringmap.
"""
try :
if type(resource) is not list :
addresource = policystringmap()
addresource.name = resource.name
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ policystringmap() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete policystringmap.
"""
try :
if type(resource) is not list :
deleteresource = policystringmap()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ policystringmap() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ policystringmap() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update policystringmap.
"""
try :
if type(resource) is not list :
updateresource = policystringmap()
updateresource.name = resource.name
updateresource.comment = resource.comment
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ policystringmap() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].comment = resource[i].comment
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of policystringmap resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = policystringmap()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ policystringmap() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ policystringmap() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the policystringmap resources that are configured on netscaler.
"""
try :
if not name :
obj = policystringmap()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = policystringmap()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [policystringmap() for _ in range(len(name))]
obj = [policystringmap() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = policystringmap()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of policystringmap resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = policystringmap()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the policystringmap resources configured on NetScaler.
"""
try :
obj = policystringmap()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of policystringmap resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = policystringmap()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class policystringmap_response(base_response) :
def __init__(self, length=1) :
self.policystringmap = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.policystringmap = [policystringmap() for _ in range(length)]
| |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MusicVAE LSTM model utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.contrib import seq2seq
from tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn
from tensorflow.python.util import nest
def rnn_cell(rnn_cell_size, dropout_keep_prob, residual, is_training=True):
"""Builds an LSTMBlockCell based on the given parameters."""
dropout_keep_prob = dropout_keep_prob if is_training else 1.0
cells = []
for i in range(len(rnn_cell_size)):
cell = rnn.LSTMBlockCell(rnn_cell_size[i])
if residual:
cell = rnn.ResidualWrapper(cell)
if i == 0 or rnn_cell_size[i] != rnn_cell_size[i - 1]:
cell = rnn.InputProjectionWrapper(cell, rnn_cell_size[i])
cell = rnn.DropoutWrapper(
cell,
input_keep_prob=dropout_keep_prob)
cells.append(cell)
return rnn.MultiRNNCell(cells)
def cudnn_lstm_layer(layer_sizes, dropout_keep_prob, is_training=True,
name_or_scope='rnn'):
"""Builds a CudnnLSTM Layer based on the given parameters."""
dropout_keep_prob = dropout_keep_prob if is_training else 1.0
for ls in layer_sizes:
if ls != layer_sizes[0]:
raise ValueError(
'CudnnLSTM does not support layers with differing sizes. Got: %s' %
layer_sizes)
lstm = cudnn_rnn.CudnnLSTM(
num_layers=len(layer_sizes),
num_units=layer_sizes[0],
direction='unidirectional',
dropout=1.0 - dropout_keep_prob,
name=name_or_scope)
class BackwardCompatibleCudnnParamsFormatConverterLSTM(
tf.contrib.cudnn_rnn.CudnnParamsFormatConverterLSTM):
"""Overrides CudnnParamsFormatConverterLSTM for backward-compatibility."""
def _cudnn_to_tf_biases(self, *cu_biases):
"""Overrides to subtract 1.0 from `forget_bias` (see BasicLSTMCell)."""
(tf_bias,) = (
super(BackwardCompatibleCudnnParamsFormatConverterLSTM,
self)._cudnn_to_tf_biases(*cu_biases))
i, c, f, o = tf.split(tf_bias, 4)
# Non-Cudnn LSTM cells add 1.0 to the forget bias variable.
return (tf.concat([i, c, f - 1.0, o], axis=0),)
def _tf_to_cudnn_biases(self, *tf_biases):
"""Overrides to add 1.0 to `forget_bias` (see BasicLSTMCell)."""
(tf_bias,) = tf_biases
i, c, f, o = tf.split(tf_bias, 4)
# Non-Cudnn LSTM cells add 1.0 to the forget bias variable.
return (super(BackwardCompatibleCudnnParamsFormatConverterLSTM,
self)._tf_to_cudnn_biases(
tf.concat([i, c, f + 1.0, o], axis=0)))
class BackwardCompatibleCudnnLSTMSaveable(
tf.contrib.cudnn_rnn.CudnnLSTMSaveable):
"""Overrides CudnnLSTMSaveable for backward-compatibility."""
_format_converter_cls = BackwardCompatibleCudnnParamsFormatConverterLSTM
def _tf_canonical_name_prefix(self, layer, is_fwd=True):
"""Overrides for backward-compatible variable names."""
if self._direction == 'unidirectional':
return 'multi_rnn_cell/cell_%d/lstm_cell' % layer
else:
return (
'cell_%d/bidirectional_rnn/%s/multi_rnn_cell/cell_0/lstm_cell'
% (layer, 'fw' if is_fwd else 'bw'))
lstm._saveable_cls = BackwardCompatibleCudnnLSTMSaveable # pylint:disable=protected-access
return lstm
def state_tuples_to_cudnn_lstm_state(lstm_state_tuples):
"""Convert tuple of LSTMStateTuples to CudnnLSTM format."""
h = tf.stack([s.h for s in lstm_state_tuples])
c = tf.stack([s.c for s in lstm_state_tuples])
return (h, c)
def cudnn_lstm_state_to_state_tuples(cudnn_lstm_state):
"""Convert CudnnLSTM format to tuple of LSTMStateTuples."""
h, c = cudnn_lstm_state
return tuple(
rnn.LSTMStateTuple(h=h_i, c=c_i)
for h_i, c_i in zip(tf.unstack(h), tf.unstack(c)))
def _get_final_index(sequence_length, time_major=True):
indices = [tf.maximum(0, sequence_length - 1),
tf.range(sequence_length.shape[0])]
if not time_major:
indices = indices[-1::-1]
return tf.stack(indices, axis=1)
def get_final(sequence, sequence_length, time_major=True):
"""Get the final item in a batch of sequences."""
final_index = _get_final_index(sequence_length, time_major)
return tf.gather_nd(sequence, final_index)
def set_final(sequence, sequence_length, values, time_major=False):
"""Sets the final values in a batch of sequences, and clears those after."""
sequence_batch_major = (
sequence if not time_major else tf.transpose(sequence, [1, 0, 2]))
final_index = _get_final_index(sequence_length, time_major=False)
mask = tf.sequence_mask(
tf.maximum(0, sequence_length - 1),
maxlen=sequence_batch_major.shape[1],
dtype=tf.float32)
sequence_batch_major = (
tf.expand_dims(mask, axis=-1) * sequence_batch_major +
tf.scatter_nd(final_index, values, tf.shape(sequence_batch_major)))
if time_major:
return tf.transpose(sequence_batch_major, [1, 0, 2])
return sequence_batch_major
def initial_cell_state_from_embedding(cell, z, name=None):
"""Computes an initial RNN `cell` state from an embedding, `z`."""
flat_state_sizes = nest.flatten(cell.state_size)
return nest.pack_sequence_as(
cell.zero_state(batch_size=z.shape[0], dtype=tf.float32),
tf.split(
tf.layers.dense(
z,
sum(flat_state_sizes),
activation=tf.tanh,
kernel_initializer=tf.random_normal_initializer(stddev=0.001),
name=name),
flat_state_sizes,
axis=1))
def get_sampling_probability(hparams, is_training):
"""Returns the sampling probability as a tensor based on the hparams.
Supports three sampling schedules (`hparams.sampling_schedule`):
constant: `hparams.sampling_rate` is the sampling probability. Must be in
the interval [0, 1].
exponential: `hparams.sampling_rate` is the base of the decay exponential.
Must be in the interval (0, 1). Larger values imply a slower increase in
sampling.
inverse_sigmoid: `hparams.sampling_rate` is in the interval [1, inf).
Larger values imply a slower increase in sampling.
A constant value of 0 is returned if `hparams.sampling_schedule` is undefined.
If not training and a non-0 sampling schedule is defined, a constant value of
1 is returned since this is assumed to be a test/eval job associated with a
scheduled sampling trainer.
Args:
hparams: An HParams object containing model hyperparameters.
is_training: Whether or not the model is being used for training.
Raises:
ValueError: On an invalid `sampling_schedule` or `sampling_rate` hparam.
"""
if (not hasattr(hparams, 'sampling_schedule') or
not hparams.sampling_schedule or
(hparams.sampling_schedule == 'constant' and hparams.sampling_rate == 0)):
return tf.constant(0.0)
if not is_training:
# This is likely an eval/test job associated with a training job using
# scheduled sampling.
tf.logging.warning(
'Setting non-training sampling schedule from %s:%f to constant:1.0.',
hparams.sampling_schedule, hparams.sampling_rate)
hparams.sampling_schedule = 'constant'
hparams.sampling_rate = 1.0
schedule = hparams.sampling_schedule
rate = hparams.sampling_rate
step = tf.to_float(tf.train.get_global_step())
if schedule == 'constant':
if not 0 <= rate <= 1:
raise ValueError(
'`constant` sampling rate must be in the interval [0, 1]. Got %f.'
% rate)
sampling_probability = tf.to_float(rate)
elif schedule == 'inverse_sigmoid':
if rate < 1:
raise ValueError(
'`inverse_sigmoid` sampling rate must be at least 1. Got %f.' % rate)
k = tf.to_float(rate)
sampling_probability = 1.0 - k / (k + tf.exp(step / k))
elif schedule == 'exponential':
if not 0 < rate < 1:
raise ValueError(
'`exponential` sampling rate must be in the interval (0, 1). Got %f.'
% hparams.sampling_rate)
k = tf.to_float(rate)
sampling_probability = 1.0 - tf.pow(k, step)
else:
raise ValueError('Invalid `sampling_schedule`: %s' % schedule)
tf.summary.scalar('sampling_probability', sampling_probability)
return sampling_probability
class LstmDecodeResults(
collections.namedtuple('LstmDecodeResults',
('rnn_input', 'rnn_output', 'samples', 'final_state',
'final_sequence_lengths'))):
pass
class Seq2SeqLstmDecoderOutput(
collections.namedtuple('BasicDecoderOutput',
('rnn_input', 'rnn_output', 'sample_id'))):
pass
class Seq2SeqLstmDecoder(seq2seq.BasicDecoder):
"""Overrides BaseDecoder to include rnn inputs in the output."""
def __init__(self, cell, helper, initial_state, input_shape,
output_layer=None):
self._input_shape = input_shape
super(Seq2SeqLstmDecoder, self).__init__(
cell, helper, initial_state, output_layer)
@property
def output_size(self):
return Seq2SeqLstmDecoderOutput(
rnn_input=self._input_shape,
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape)
@property
def output_dtype(self):
dtype = nest.flatten(self._initial_state)[0].dtype
return Seq2SeqLstmDecoderOutput(
dtype,
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype)
def step(self, time, inputs, state, name=None):
results = super(Seq2SeqLstmDecoder, self).step(time, inputs, state, name)
outputs = Seq2SeqLstmDecoderOutput(
rnn_input=inputs,
rnn_output=results[0].rnn_output,
sample_id=results[0].sample_id)
return (outputs,) + results[1:]
def maybe_split_sequence_lengths(sequence_length, num_splits, total_length):
"""Validates and splits `sequence_length`, if necessary.
Returned value must be used in graph for all validations to be executed.
Args:
sequence_length: A batch of sequence lengths, either sized `[batch_size]`
and equal to either 0 or `total_length`, or sized
`[batch_size, num_splits]`.
num_splits: The scalar number of splits of the full sequences.
total_length: The scalar total sequence length (potentially padded).
Returns:
sequence_length: If input shape was `[batch_size, num_splits]`, returns the
same Tensor. Otherwise, returns a Tensor of that shape with each input
length in the batch divided by `num_splits`.
Raises:
ValueError: If `sequence_length` is not shaped `[batch_size]` or
`[batch_size, num_splits]`.
tf.errors.InvalidArgumentError: If `sequence_length` is shaped
`[batch_size]` and all values are not either 0 or `total_length`.
"""
if sequence_length.shape.ndims == 1:
if total_length % num_splits != 0:
raise ValueError(
'`total_length` must be evenly divisible by `num_splits`.')
with tf.control_dependencies(
[tf.Assert(
tf.reduce_all(
tf.logical_or(tf.equal(sequence_length, 0),
tf.equal(sequence_length, total_length))),
data=[sequence_length])]):
sequence_length = (
tf.tile(tf.expand_dims(sequence_length, axis=1), [1, num_splits]) //
num_splits)
elif sequence_length.shape.ndims == 2:
with tf.control_dependencies([
tf.assert_less_equal(
sequence_length,
tf.constant(total_length // num_splits, tf.int32),
message='Segment length cannot be more than '
'`total_length / num_splits`.')]):
sequence_length = tf.identity(sequence_length)
sequence_length.set_shape([sequence_length.shape[0], num_splits])
else:
raise ValueError(
'Sequence lengths must be given as a vector or a 2D Tensor whose '
'second dimension size matches its initial hierarchical split. Got '
'shape: %s' % sequence_length.shape.as_list())
return sequence_length
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions to reorder terms within SymbolicOperators'''
import itertools
import numpy
from openfermion.ops.operators import (BosonOperator, FermionOperator,
QuadOperator)
from openfermion.ops.representations import InteractionOperator
def chemist_ordered(fermion_operator):
r"""Puts a two-body fermion operator in chemist ordering.
The normal ordering convention for chemists is different.
Rather than ordering the two-body term as physicists do, as
$a^\dagger a^\dagger a a$
the chemist ordering of the two-body term is
$a^\dagger a a^\dagger a$
TODO: This routine can be made more efficient.
Args:
fermion_operator (FermionOperator): a fermion operator guarenteed to
have number conserving one- and two-body fermion terms only.
Returns:
chemist_ordered_operator (FermionOperator): the input operator
ordered in the chemistry convention.
Raises:
OperatorSpecificationError: Operator is not two-body number conserving.
"""
# Make sure we're dealing with a fermion operator from a molecule.
if not fermion_operator.is_two_body_number_conserving():
raise TypeError('Operator is not two-body number conserving.')
# Normal order and begin looping.
normal_ordered_input = normal_ordered(fermion_operator)
chemist_ordered_operator = FermionOperator()
for term, coefficient in normal_ordered_input.terms.items():
if len(term) == 2 or not len(term):
chemist_ordered_operator += FermionOperator(term, coefficient)
else:
# Possibly add new one-body term.
if term[1][0] == term[2][0]:
new_one_body_term = (term[0], term[3])
chemist_ordered_operator += FermionOperator(
new_one_body_term, coefficient)
# Reorder two-body term.
new_two_body_term = (term[0], term[2], term[1], term[3])
chemist_ordered_operator += FermionOperator(new_two_body_term,
-coefficient)
return chemist_ordered_operator
def normal_ordered(operator, hbar=1.):
r"""Compute and return the normal ordered form of a FermionOperator,
BosonOperator, QuadOperator, or InteractionOperator.
Due to the canonical commutation/anticommutation relations satisfied
by these operators, there are multiple forms that the same operator
can take. Here, we define the normal ordered form of each operator,
providing a distinct representation for distinct operators.
In our convention, normal ordering implies terms are ordered
from highest tensor factor (on left) to lowest (on right). In
addition:
* FermionOperators: a^\dagger comes before a
* BosonOperators: b^\dagger comes before b
* QuadOperators: q operators come before p operators,
Args:
operator: an instance of the FermionOperator, BosonOperator,
QuadOperator, or InteractionOperator classes.
hbar (float): the value of hbar used in the definition of the
commutator [q_i, p_j] = i hbar delta_ij. By default hbar=1.
This argument only applies when normal ordering QuadOperators.
"""
kwargs = {}
if isinstance(operator, FermionOperator):
ordered_operator = FermionOperator()
order_fn = normal_ordered_ladder_term
kwargs['parity'] = -1
elif isinstance(operator, BosonOperator):
ordered_operator = BosonOperator()
order_fn = normal_ordered_ladder_term
kwargs['parity'] = 1
elif isinstance(operator, QuadOperator):
ordered_operator = QuadOperator()
order_fn = normal_ordered_quad_term
kwargs['hbar'] = hbar
elif isinstance(operator, InteractionOperator):
constant = operator.constant
n_modes = operator.n_qubits
one_body_tensor = operator.one_body_tensor.copy()
two_body_tensor = numpy.zeros_like(operator.two_body_tensor)
quadratic_index_pairs = (
(pq, pq) for pq in itertools.combinations(range(n_modes)[::-1], 2))
cubic_index_pairs = (
index_pair
for p, q, r in itertools.combinations(range(n_modes)[::-1], 3)
for index_pair in [((p, q), (p, r)), ((p, r), (
p, q)), ((p, q), (q, r)), ((q, r),
(p, q)), ((p, r),
(q, r)), ((q, r), (p, r))])
quartic_index_pairs = (
index_pair
for p, q, r, s in itertools.combinations(range(n_modes)[::-1], 4)
for index_pair in [((p, q), (r, s)), ((r, s), (
p, q)), ((p, r), (q, s)), ((q, s),
(p, r)), ((p, s),
(q, r)), ((q, r), (p, s))])
index_pairs = itertools.chain(quadratic_index_pairs, cubic_index_pairs,
quartic_index_pairs)
for pq, rs in index_pairs:
two_body_tensor[pq + rs] = sum(
s * ss * operator.two_body_tensor[pq[::s] + rs[::ss]]
for s, ss in itertools.product([-1, 1], repeat=2))
return InteractionOperator(constant, one_body_tensor, two_body_tensor)
else:
raise TypeError('Can only normal order FermionOperator, '
'BosonOperator, QuadOperator, or InteractionOperator.')
for term, coefficient in operator.terms.items():
ordered_operator += order_fn(term, coefficient, **kwargs)
return ordered_operator
def normal_ordered_ladder_term(term, coefficient, parity=-1):
"""Return a normal ordered FermionOperator or BosonOperator corresponding
to single term.
Args:
term (list or tuple): A sequence of tuples. The first element of each
tuple is an integer indicating the mode on which a fermion ladder
operator acts, starting from zero. The second element of each
tuple is an integer, either 1 or 0, indicating whether creation
or annihilation acts on that mode.
coefficient(complex or float): The coefficient of the term.
parity (int): parity=-1 corresponds to a Fermionic term that should be
ordered based on the canonical anti-commutation relations.
parity=1 corresponds to a Bosonic term that should be ordered based
on the canonical commutation relations.
Returns:
ordered_term: a FermionOperator or BosonOperator instance.
The normal ordered form of the input.
Note that this might have more terms.
In our convention, normal ordering implies terms are ordered
from highest tensor factor (on left) to lowest (on right).
Also, ladder operators come first.
Warning:
Even assuming that each creation or annihilation operator appears
at most a constant number of times in the original term, the
runtime of this method is exponential in the number of qubits.
"""
# Iterate from left to right across operators and reorder to normal
# form. Swap terms operators into correct position by moving from
# left to right across ladder operators.
term = list(term)
if parity == -1:
Op = FermionOperator
elif parity == 1:
Op = BosonOperator
ordered_term = Op()
for i in range(1, len(term)):
for j in range(i, 0, -1):
right_operator = term[j]
left_operator = term[j - 1]
# Swap operators if raising on right and lowering on left.
if right_operator[1] and not left_operator[1]:
term[j - 1] = right_operator
term[j] = left_operator
coefficient *= parity
# Replace a a^\dagger with 1 + parity*a^\dagger a
# if indices are the same.
if right_operator[0] == left_operator[0]:
new_term = term[:(j - 1)] + term[(j + 1):]
# Recursively add the processed new term.
ordered_term += normal_ordered_ladder_term(
tuple(new_term), parity * coefficient, parity)
# Handle case when operator type is the same.
elif right_operator[1] == left_operator[1]:
# If same two Fermionic operators are repeated,
# evaluate to zero.
if parity == -1 and right_operator[0] == left_operator[0]:
return ordered_term
# Swap if same ladder type but lower index on left.
elif right_operator[0] > left_operator[0]:
term[j - 1] = right_operator
term[j] = left_operator
coefficient *= parity
# Add processed term and return.
ordered_term += Op(tuple(term), coefficient)
return ordered_term
def normal_ordered_quad_term(term, coefficient, hbar=1.):
"""Return a normal ordered QuadOperator corresponding to single term.
Args:
term: A tuple of tuples. The first element of each tuple is
an integer indicating the mode on which a boson ladder
operator acts, starting from zero. The second element of each
tuple is an integer, either 1 or 0, indicating whether creation
or annihilation acts on that mode.
coefficient: The coefficient of the term.
hbar (float): the value of hbar used in the definition of the
commutator [q_i, p_j] = i hbar delta_ij. By default hbar=1.
Returns:
ordered_term (QuadOperator): The normal ordered form of the input.
Note that this might have more terms.
In our convention, normal ordering implies terms are ordered
from highest tensor factor (on left) to lowest (on right).
Also, q operators come first.
"""
# Iterate from left to right across operators and reorder to normal
# form. Swap terms operators into correct position by moving from
# left to right across ladder operators.
term = list(term)
ordered_term = QuadOperator()
for i in range(1, len(term)):
for j in range(i, 0, -1):
right_operator = term[j]
left_operator = term[j - 1]
# Swap operators if q on right and p on left.
# p q -> q p
if right_operator[1] == 'q' and not left_operator[1] == 'q':
term[j - 1] = right_operator
term[j] = left_operator
# Replace p q with i hbar + q p
# if indices are the same.
if right_operator[0] == left_operator[0]:
new_term = term[:(j - 1)] + term[(j + 1)::]
# Recursively add the processed new term.
ordered_term += normal_ordered_quad_term(
tuple(new_term), -coefficient * 1j * hbar)
# Handle case when operator type is the same.
elif right_operator[1] == left_operator[1]:
# Swap if same type but lower index on left.
if right_operator[0] > left_operator[0]:
term[j - 1] = right_operator
term[j] = left_operator
# Add processed term and return.
ordered_term += QuadOperator(tuple(term), coefficient)
return ordered_term
def reorder(operator, order_function, num_modes=None, reverse=False):
"""Changes the ladder operator order of the Hamiltonian based on the
provided order_function per mode index.
Args:
operator (SymbolicOperator): the operator that will be reordered. must
be a SymbolicOperator or any type of operator that inherits from
SymbolicOperator.
order_function (func): a function per mode that is used to map the
indexing. must have arguments mode index and num_modes.
num_modes (int): default None. User can provide the number of modes
assumed for the system. if None, the number of modes will be
calculated based on the Operator.
reverse (bool): default False. if set to True, the mode mapping is
reversed. reverse = True will not revert back to original if
num_modes calculated differs from original and reverted.
Note: Every order function must take in a mode_idx and num_modes.
"""
if num_modes is None:
num_modes = max(
[factor[0] for term in operator.terms for factor in term]) + 1
mode_map = {
mode_idx: order_function(mode_idx, num_modes)
for mode_idx in range(num_modes)
}
if reverse:
mode_map = {val: key for key, val in mode_map.items()}
rotated_hamiltonian = operator.__class__()
for term, value in operator.terms.items():
new_term = tuple([(mode_map[op[0]], op[1]) for op in term])
rotated_hamiltonian += operator.__class__(new_term, value)
return rotated_hamiltonian
| |
#!/usr/bin/python2.1
import sys
from xml.dom.ext import SplitQName
from xml.sax.handler import ContentHandler
from xml.sax.saxutils import escape
_ROOT, _STRING, _COMMENT, _NAME, _KEYWORD, _TEXT, _HEAD =0,1,2,3,4,5,6
DOCBOOK = {
_ROOT: ('<programlisting>','</programlisting>'),
_STRING: ('<emphasis>', '</emphasis>'),
_COMMENT:('<emphasis>', '</emphasis>'),
_NAME: ('', ''),
_KEYWORD:('<emphasis role="bold">', '</emphasis>'),
_TEXT: ('', '')
} HTML = {
_ROOT: ('<div>', '</div>'),
_STRING: ('<font color="#004080">', '</font>'),
_COMMENT:('<font color="#008000">', '</font>'),
_NAME: ('', ''),
_KEYWORD:('<font color="#C00000">', '</font>'),
_TEXT: ('', '')
}
class XmlFormatSaxHandler(ContentHandler):
''' format an xmlfile to docbook or html '''
def __init__(self, head=1, output=sys.stdout, encoding='UTF-8'):
self._out = output
self._cod = encoding
self._o_d = DOCBOOK
self._in_cdata = 0
self._in_entity = 0
def set_format(self, format):
if format == 'docbook':
self._o_d = DOCBOOK
if format == 'html':
self._o_d = HTML
## content handler #####################################################
def startDocument(self):
self._out.write(self._o_d[_ROOT][0])
def endDocument(self):
self._out.write(self._o_d[_ROOT][1])
def startElement(self, name, attrs):
prefix, local = SplitQName(name)
if prefix:
self._out.write('<%s%s%s:%s%s%s'.encode(self._cod) % (
self._o_d[_KEYWORD][0], prefix, self._o_d[_KEYWORD][1],
self._o_d[_NAME][0], local, self._o_d[_NAME][1]))
else:
self._out.write('<%s%s%s'.encode(self._cod) % (
self._o_d[_NAME][0], local, self._o_d[_NAME][1]))
for key, val in attrs.items():
prefix, local = SplitQName(key)
if prefix:
self._out.write('%s%s%s:%s%s%s=%s"%s"%s'.encode(self._cod) % (
self._o_d[_KEYWORD][0], prefix, self._o_d[_KEYWORD][1],
self._o_d[_NAME][0], local, self._o_d[_NAME][1],
self._o_d[_STRING][0], val, self._o_d[_STRING][1]))
else:
self._out.write(' %s%s%s=%s"%s"%s'.encode(self._cod) % (
self._o_d[_NAME][0], local, self._o_d[_NAME][1],
self._o_d[_STRING][0], val, self._o_d[_STRING][1]))
self._out.write('>')
def endElement(self, name):
prefix, local = SplitQName(name)
if prefix:
self._out.write('</%s%s%s:%s%s%s>'.encode(self._cod) % (
self._o_d[_KEYWORD][0], prefix, self._o_d[_KEYWORD][1],
self._o_d[_NAME][0], local, self._o_d[_NAME][1]))
else:
self._out.write('</%s%s%s>'.encode(self._cod) % (
self._o_d[_NAME][0], local, self._o_d[_NAME][1]))
def processingInstruction(self, target, data):
self._out.write('<?%s%s%s %s%s%s>'.encode(self._cod) % (
self._o_d[_NAME][0], target, self._o_d[_NAME][1],
self._o_d[_STRING][0], data, self._o_d[_STRING][1]))
def characters(self, ch):
if self._in_entity: return
elif not self._in_cdata: ch = escape(ch)
self._out.write('%s%s%s' % (
self._o_d[_TEXT][0], ch.encode(self._cod), self._o_d[_TEXT][1]))
## lexical handler #####################################################
def comment(self, comment):
self._out.write('%s<!--%s-->%s' % (
self._o_d[_COMMENT][0],
comment.replace('<', '<').encode(self._cod),
self._o_d[_COMMENT][1]))
def startCDATA(self):
self._out.write('<%s[CDATA[%s' % (
self._o_d[_KEYWORD][0], self._o_d[_KEYWORD][1]))
self._in_cdata = 1
def endCDATA(self):
self._out.write('%s]]%s>' % (
self._o_d[_KEYWORD][0], self._o_d[_KEYWORD][1]))
self._in_cdata = 0
def startDTD(self, name, public_id, system_id):
self._out.write('<%s!DOCTYPE%s %s'.encode(self._cod) % (
self._o_d[_KEYWORD][0], self._o_d[_KEYWORD][1], name))
if public_id:
self._out.write(' PUBLIC %s"%s"%s %s"%s"%s['.encode(self._cod) % (
self._o_d[_STRING][0], public_id, self._o_d[_STRING][1],
self._o_d[_STRING][0], system_id, self._o_d[_STRING][1]))
else:
self._out.write(' SYSTEM %s"%s"%s ['.encode(self._cod) % (
self._o_d[_STRING][0], system_id, self._o_d[_STRING][1]))
def endDTD(self):
self._out.write(']>')
def startEntity(self, name):
self._out.write('%s&%s;%s'.encode(self._cod) % (
self._o_d[_NAME][0], name, self._o_d[_NAME][1]))
self._in_entity = 1
def endEntity(self, name):
self._in_entity = 0
## decl handler ########################################################
def internalEntityDecl(self, name, value):
self._out.write('<%s!ENTITY%s %s'.encode(self._cod) % (
self._o_d[_KEYWORD][0], self._o_d[_KEYWORD][1], name))
if public_id:
self._out.write(' PUBLIC %s"%s"%s %s
self._o_d[_STRING][0], public_id, self._o_d[_STRING][1],
self._o_d[_STRING][0], system_id, self._o_d[_STRING][1]))
else:
self._out.write(' SYSTEM %s"%s"%s>'.encode(self._cod) % (
self._o_d[_STRING][0], system_id, self._o_d[_STRING][1]))
def externalEntityDecl(self, name, public_id, system_id):
self._out.write('<%s!ENTITY%s %s'.encode(self._cod) % (
self._o_d[_KEYWORD][0], self._o_d[_KEYWORD][1], name))
if public_id:
self._out.write(' PUBLIC %s"%s"%s %s"%s"%s>'.encode(self._cod)%(
self._o_d[_STRING][0], public_id, self._o_d[_STRING][1],
self._o_d[_STRING][0], system_id, self._o_d[_STRING][1]))
else:
self._out.write(' SYSTEM %s"%s"%s>'.encode(self._cod) % (
self._o_d[_STRING][0], system_id, self._o_d[_STRING][1]))
def elementDecl(self, elem_name, content_model):
c_m = _decode_content_model(content_model)
self._out.write('<%s!ELEMENT%s %s %s>'.encode(self._cod) % (
self._o_d[_KEYWORD][0], self._o_d[_KEYWORD][1], elem_name,
c_m))
def attributeDecl(self,elem_name,attr_name,type_d,value_def,value):
import types
if type(type_d) is types.ListType:
s = ''
for pos in type_d:
if not s:
s = '(%s' % pos
else:
s = '%s|%s' % (s, pos)
s = '%s)' % s
self._out.write('<%s!ATTLIST%s %s %s %s%s>'.encode(self._cod)%(
self._o_d[_KEYWORD][0], self._o_d[_KEYWORD][1], elem_name,
attr_name, s , value_def))
else:
self._out.write('<%s!ATTLIST%s %s %s%s>'.encode(self._cod)%(
self._o_d[_KEYWORD][0], self._o_d[_KEYWORD][1], elem_name,
attr_name, type))
C_OP, C_VAL, C_NUM = 0, 1, 2
def _decode_content_model(content_m):
''' recursively decode a content_model returned by parsers in
elementDecl '''
s = ''
if content_m[C_OP] == ',':
for c_m in content_m[C_VAL]:
if not s:
s = '(%s' % _decode_content_model(c_m)
else:
s = '%s, %s' % (s, _decode_content_model(c_m))
s = '%s)%s' % (s, content_m[C_NUM] )
elif content_m[C_OP] == '|':
for c_m in content_m[C_VAL]:
if not s:
s = '(%s' % _decode_content_model(c_m)
else:
s = '%s|%s' % (s, _decode_content_model(c_m))
s = '%s)%s' % (s, content_m[C_NUM] )
else:
s = '%s%s' % (s, content_m[C_OP])
s = '%s%s' % (s, content_m[-1])
return s
USAGE = '''xml2dcbk: format xml source code to xml docbook using roles
Usage: xml2dcbk [options] source.py..., parse XML file(s)
xml2dcbk -h/--help, print this help message and exit Options:
_ -e/--encoding iso-8859-1, specify encoding to use in outputs
_ -d/--docbook, format output as docbook xml (default)
_ -w/--html, format output in html instead of docbook '''
def run(args):
import getopt, os
from xml.sax import make_parser
from xml.sax.handler import property_lexical_handler,\
property_declaration_handler
## get options
(opt, args) = getopt.getopt(args, 'he:dw',
['help', 'encoding=', 'docbook', 'html'])
encod, format = 'UTF-8', 'docbook'
for o in opt:
if o[0] == '-h' or o[0] == '--help':
print USAGE
return
elif o[0] == '-d' or o[0] == '--docbook':
format = 'docbook'
elif o[0] == '-w' or o[0] == '--html':
format = 'html'
elif o[0] == '-e' or o[0] == '--encoding':
encod = o[1]
## transforms source files (xmlproc support property_lexical_handler while
## pyexpat doen't)
p = make_parser(['xml.sax.drivers2.drv_xmlproc'])
for file in args:
source = open(file, 'r')
## prepare handler
if file[-4:] != '.xml':
print >>sys.stderr, 'Unknown extension %s, ignored file %s'%(
file[-4:], file)
continue
dest = open('%s_dcbk.xml' % os.path.basename(file)[:-4], 'w+')
h = XmlFormatSaxHandler(dest, encod)
h.set_format(format)
p.setContentHandler(h)
try:
p.setProperty(property_lexical_handler, h)
except Exception, e:
print e
try:
p.setProperty(property_declaration_handler, h)
except Exception, e:
print e
print >>sys.stderr, "Formatting %s ..." % file
## parse and write colorized version to output file
p.parse(source)
source.close()
dest.close()
if __name__ == "__main__":
run(sys.argv[1:])
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemglobal_auditnslogpolicy_binding(base_resource) :
""" Binding class showing the auditnslogpolicy that can be bound to systemglobal.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._builtin = []
self.___count = 0
@property
def priority(self) :
"""The priority of the command policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority of the command policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def builtin(self) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
@builtin.setter
def builtin(self, builtin) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL
"""
try :
self._builtin = builtin
except Exception as e:
raise e
@property
def policyname(self) :
"""The name of the command policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""The name of the command policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemglobal_auditnslogpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemglobal_auditnslogpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = systemglobal_auditnslogpolicy_binding()
updateresource.policyname = resource.policyname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [systemglobal_auditnslogpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = systemglobal_auditnslogpolicy_binding()
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [systemglobal_auditnslogpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
""" Use this API to fetch a systemglobal_auditnslogpolicy_binding resources.
"""
try :
obj = systemglobal_auditnslogpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
""" Use this API to fetch filtered set of systemglobal_auditnslogpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_auditnslogpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
""" Use this API to count systemglobal_auditnslogpolicy_binding resources configued on NetScaler.
"""
try :
obj = systemglobal_auditnslogpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
""" Use this API to count the filtered set of systemglobal_auditnslogpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_auditnslogpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class systemglobal_auditnslogpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.systemglobal_auditnslogpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemglobal_auditnslogpolicy_binding = [systemglobal_auditnslogpolicy_binding() for _ in range(length)]
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology import Cosmology, Planck18
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.io.table import from_table, to_table
from astropy.table import QTable, Table, vstack
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromTableTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.table"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_table_bad_index(self, from_format, to_format):
"""Test if argument ``index`` is incorrect"""
tbl = to_format("astropy.table")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
from_format(tbl, index=2, format="astropy.table")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
from_format(tbl, index="row 0", format="astropy.table")
# -----------------------
def test_to_table_failed_cls(self, to_format):
"""Test failed table type."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format('astropy.table', cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_table_cls(self, to_format, tbl_cls):
tbl = to_format('astropy.table', cls=tbl_cls)
assert isinstance(tbl, tbl_cls) # test type
# -----------------------
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_table_in_meta(self, cosmo_cls, to_format, in_meta):
"""Test where the cosmology class is placed."""
tbl = to_format('astropy.table', cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
if in_meta:
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.colnames # not also a column
else:
assert tbl["cosmology"][0] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.meta
# -----------------------
def test_to_table(self, cosmo_cls, cosmo, to_format):
"""Test cosmology -> astropy.table."""
tbl = to_format("astropy.table")
# Test properties of Table.
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl["name"] == cosmo.name
assert tbl.indices # indexed
# Test each Parameter column has expected information.
for n in cosmo.__parameters__:
P = getattr(cosmo_cls, n) # Parameter
col = tbl[n] # Column
# Compare the two
assert col.info.name == P.name
assert col.info.description == P.__doc__
assert col.info.format == (None if col[0] is None else P.format_spec)
assert col.info.meta == (cosmo.meta.get(n) or {})
# -----------------------
def test_from_not_table(self, cosmo, from_format):
"""Test not passing a Table to the Table parser."""
with pytest.raises((TypeError, ValueError)):
from_format("NOT A TABLE", format="astropy.table")
def test_tofrom_table_instance(self, cosmo_cls, cosmo, from_format, to_format):
"""Test cosmology -> astropy.table -> cosmology."""
tbl = to_format("astropy.table")
# add information
tbl["mismatching"] = "will error"
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(tbl, format="astropy.table")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(tbl, format="astropy.table")
# unless mismatched are moved to meta
got = from_format(tbl, format="astropy.table", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
tbl.remove_column("mismatching")
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``to_format``.
tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]]
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# also it auto-identifies 'format'
got = from_format(tbl)
assert got == cosmo
def test_fromformat_table_subclass_partial_info(self, cosmo_cls, cosmo,
from_format, to_format):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
# test to_format
tbl = to_format("astropy.table")
assert isinstance(tbl, QTable)
# partial information
tbl.meta.pop("cosmology", None)
del tbl["Tcmb0"]
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.from_format(tbl, format="astropy.table")
got2 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls)
got3 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("add_index", [True, False])
def test_tofrom_table_mutlirow(self, cosmo_cls, cosmo, from_format, add_index):
"""Test if table has multiple rows."""
# ------------
# To Table
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts='silent')
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl[1]["name"] == cosmo.name
# whether to add an index. `from_format` can work with or without.
if add_index:
tbl.add_index("name", unique=True)
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
from_format(tbl, format="astropy.table")
# unless the index argument is provided
got = from_format(tbl, index=1, format="astropy.table")
assert got == cosmo
# the index can be a string
got = from_format(tbl, index=cosmo.name, format="astropy.table")
assert got == cosmo
# when there's more than one cosmology found
tbls = vstack([tbl, tbl], metadata_conflicts="silent")
with pytest.raises(ValueError, match="more than one"):
from_format(tbls, index=cosmo.name, format="astropy.table")
@pytest.mark.parametrize("format", [True, False, None, "astropy.table"])
def test_is_equivalent_to_table(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a |Table|.
"""
obj = to_format("astropy.table")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (True if format is not False else False)
class TestToFromTable(ToFromDirectTestBase, ToFromTableTestMixin):
"""Directly test ``to/from_table``."""
def setup_class(self):
self.functions = {"to": to_table, "from": from_table}
| |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
from oslo_utils import excutils
import six
import webob.exc
from neutron._i18n import _, _LE, _LI
from neutron.api import api_common
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.db import api as db_api
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
oslo_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._notifier = n_rpc.get_notifier('network')
# use plugin's dhcp notifier, if this is already instantiated
agent_notifiers = getattr(plugin, 'agent_notifiers', {})
self._dhcp_agent_notifier = (
agent_notifiers.get(const.AGENT_TYPE_DHCP) or
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
if cfg.CONF.notify_nova_on_port_data_changes:
from neutron.notifiers import nova
self._nova_notifier = nova.Notifier()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_LI("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in six.iteritems(self._attr_info):
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
data,
might_not_exist=True,
pluralized=self._collection):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(context, data, fields_to_strip)
def _filter_attributes(self, context, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in six.iteritems(data)
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
@db_api.retry_db_errors
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
parent_id = kwargs.get(self._parent_id_name)
resource = self._item(request,
id,
do_authz=True,
field_list=None,
parent_id=parent_id)
except oslo_policy.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = copy.deepcopy(kwargs.pop('body', None))
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context,
name,
resource,
pluralized=self._collection)
ret_value = getattr(self._plugin, name)(*arg_list, **kwargs)
# It is simply impossible to predict whether one of this
# actions alters resource usage. For instance a tenant port
# is created when a router interface is added. Therefore it is
# important to mark as dirty resources whose counters have
# been altered by this operation
resource_registry.set_resources_dirty(request.context)
return ret_value
return _handle_action
else:
raise AttributeError()
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin,
pluralized=self._collection)]
# Use the first element in the list for discriminating which attributes
# should be filtered out because of authZ policies
# fields_to_add contains a list of attributes added for request policy
# checks but that were not required by the user. They should be
# therefore stripped
fields_to_strip = fields_to_add or []
if obj_list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
request.context, obj,
fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
# Synchronize usage trackers, if needed
resource_registry.resync_resource(
request.context, self._resource, request.context.tenant_id)
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
return obj
def _send_dhcp_notification(self, context, data, methodname):
if cfg.CONF.dhcp_agent_notification:
if self._collection in data:
for body in data[self._collection]:
item = {self._resource: body}
self._dhcp_agent_notifier.notify(context, item, methodname)
else:
self._dhcp_agent_notifier.notify(context, data, methodname)
def _send_nova_notification(self, action, orig, returned):
if hasattr(self, '_nova_notifier'):
self._nova_notifier.send_network_change(action, orig, returned)
@db_api.retry_db_errors
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
@db_api.retry_db_errors
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
request.context,
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception:
with excutils.save_and_reraise_exception():
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id}
if parent_id else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the
# exception
LOG.exception(_LE("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
def create(self, request, body=None, **kwargs):
self._notifier.info(request.context,
self._resource + '.create.start',
body)
return self._create(request, body, **kwargs)
@db_api.retry_db_errors
def _create(self, request, body, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
body = Controller.prepare_request_body(request.context,
copy.deepcopy(body), True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
else:
items = [body]
# Ensure policy engine is initialized
policy.init()
# Store requested resource amounts grouping them by tenant
# This won't work with multiple resources. However because of the
# current structure of this controller there will hardly be more than
# one resource for which reservations are being made
request_deltas = collections.defaultdict(int)
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource],
pluralized=self._collection)
if 'tenant_id' not in item[self._resource]:
# no tenant_id - no quota check
continue
tenant_id = item[self._resource]['tenant_id']
request_deltas[tenant_id] += 1
# Quota enforcement
reservations = []
try:
for (tenant, delta) in request_deltas.items():
reservation = quota.QUOTAS.make_reservation(
request.context,
tenant,
{self._resource: delta},
self._plugin)
reservations.append(reservation)
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
def notify(create_result):
# Ensure usage trackers for all resources affected by this API
# operation are marked as dirty
with request.context.session.begin():
# Commit the reservation(s)
for reservation in reservations:
quota.QUOTAS.commit_reservation(
request.context, reservation.reservation_id)
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
notifier_method,
create_result)
self._send_dhcp_notification(request.context,
create_result,
notifier_method)
return create_result
def do_create(body, bulk=False, emulated=False):
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if bulk and not emulated:
obj_creator = getattr(self._plugin, "%s_bulk" % action)
else:
obj_creator = getattr(self._plugin, action)
try:
if emulated:
return self._emulate_bulk_create(obj_creator, request,
body, parent_id)
else:
if self._collection in body:
# This is weird but fixing it requires changes to the
# plugin interface
kwargs.update({self._collection: body})
else:
kwargs.update({self._resource: body})
return obj_creator(request.context, **kwargs)
except Exception:
# In case of failure the plugin will always raise an
# exception. Cancel the reservation
with excutils.save_and_reraise_exception():
for reservation in reservations:
quota.QUOTAS.cancel_reservation(
request.context, reservation.reservation_id)
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
objs = do_create(body, bulk=True)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
request.context, objs[0])
return notify({self._collection: [self._filter_attributes(
request.context, obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
if self._collection in body:
# Emulate atomic bulk behavior
objs = do_create(body, bulk=True, emulated=True)
return notify({self._collection: objs})
else:
obj = do_create(body)
self._send_nova_notification(action, {},
{self._resource: obj})
return notify({self._resource: self._view(request.context,
obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
self._notifier.info(request.context,
self._resource + '.delete.start',
{self._resource + '_id': id})
return self._delete(request, id, **kwargs)
@db_api.retry_db_errors
def _delete(self, request, id, **kwargs):
action = self._plugin_handlers[self.DELETE]
# Check authz
policy.init()
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
# A delete operation usually alters resource usage, so mark affected
# usage trackers as dirty
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.delete.end'
self._notifier.info(request.context,
notifier_method,
{self._resource + '_id': id})
result = {self._resource: self._view(request.context, obj)}
self._send_nova_notification(action, {}, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
self._notifier.info(request.context,
self._resource + '.update.start',
payload)
return self._update(request, id, body, **kwargs)
@db_api.retry_db_errors
def _update(self, request, id, body, **kwargs):
body = Controller.prepare_request_body(request.context, body, False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in six.iteritems(self._attr_info)
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
# Ensure policy engine is initialized
policy.init()
parent_id = kwargs.get(self._parent_id_name)
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_object_copy = copy.copy(orig_obj)
orig_obj.update(body[self._resource])
# Make a list of attributes to be updated to inform the policy engine
# which attributes are set explicitly so that it can distinguish them
# from the ones that are set to their default values.
orig_obj[const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
try:
policy.enforce(request.context,
action,
orig_obj,
pluralized=self._collection)
except oslo_policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception() as ctxt:
# If a tenant is modifying it's own object, it's safe to return
# a 403. Otherwise, pretend that it doesn't exist to avoid
# giving away information.
if request.context.tenant_id != orig_obj['tenant_id']:
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
# Usually an update operation does not alter resource usage, but as
# there might be side effects it might be worth checking for changes
# in resource usage here as well (e.g: a tenant port is created when a
# router interface is added)
resource_registry.set_resources_dirty(request.context)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
self._notifier.info(request.context, notifier_method, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
self._send_nova_notification(action, orig_object_copy, result)
return result
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug("Request body: %(body)s", {'body': body})
try:
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
if not body[collection]:
raise webob.exc.HTTPBadRequest(_("Resources required"))
bulk_body = [
Controller.prepare_request_body(
context, item if resource in item
else {resource: item}, is_create, resource, attr_info,
allow_bulk) for item in body[collection]
]
return {collection: bulk_body}
res_dict = body.get(resource)
except (AttributeError, TypeError):
msg = _("Body contains invalid data")
raise webob.exc.HTTPBadRequest(msg)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
attributes.populate_tenant_id(context, res_dict, attr_info, is_create)
attributes.verify_attributes(res_dict, attr_info)
if is_create: # POST
attributes.fill_default_value(attr_info, res_dict,
webob.exc.HTTPBadRequest)
else: # PUT
for attr, attr_vals in six.iteritems(attr_info):
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
attributes.convert_value(attr_info, res_dict, webob.exc.HTTPBadRequest)
return body
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or request.context.is_advsvc or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
# NOTE(kevinbenton): we raise a 404 to hide the existence of the
# network from the tenant since they don't have access to it.
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
| |
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Pool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
#
# Code run by worker processes
#
def worker(inqueue, outqueue, initializer=None, initargs=()):
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
while 1:
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
result = (False, e)
put((job, i, result))
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=()):
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
self._pool = []
for i in range(processes):
w = self.Process(
target=worker,
args=(self._inqueue, self._outqueue, initializer, initargs)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._task_handler, self._result_handler, self._cache),
exitpriority=15
)
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous version of `apply()` method.
'''
assert self._state == RUN
result = ApplyResult(self._cache, callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous version of `map()` method.
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._taskqueue.put(None)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
task_handler._state = TERMINATE
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
p.terminate()
debug('joining task handler')
task_handler.join(1e100)
debug('joining result handler')
result_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
p.join()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._ready = False
self._callback = callback
cache[self._job] = self
def ready(self):
return self._ready
def successful(self):
assert self._ready
return self._success
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._ready:
self._cond.wait(timeout)
finally:
self._cond.release()
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
del self._cache[self._job]
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._ready = True
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
else:
self._success = False
self._value = result
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| |
from __future__ import unicode_literals
from __future__ import print_function
from ..compat import text_type, number_types, iteritems
from ..context.missing import MoyaAttributeError
import re
import time
def obj_index(obj, key):
"""Get a key from an object"""
return (getattr(obj, "__getitem__", None) or getattr(obj, "__getattribute__"))(key)
def obj_index_getter(key):
"""Get a callable that extracts a key form an object."""
return lambda obj: (
getattr(obj, "__getitem__", None) or getattr(obj, "__getattribute__")
)(key)
def get_keys(obj):
"""Get context keys from an object"""
if hasattr(obj, "__getitem__"):
if hasattr(obj, "keys"):
return list(obj.keys())
else:
return [i for i, _v in enumerate(obj)]
else:
return [k for k in dir(obj) if not k.startswith("_")]
def get_moya_interface(context, obj):
"""Get a Moya context interface, from an object if available"""
if hasattr(obj, "__moyacontext__"):
return obj.__moyacontext__(context)
return obj
def get_moya_attribute(context, obj, key, default=None):
obj = get_moya_interface(context, obj)
try:
return getattr(obj, key, default)
except Exception as e:
return MoyaAttributeError(text_type(e))
def yield_join(seq, join=", "):
"""Iterate over a sequence, inserting join text between values"""
iter_seq = iter(seq)
value = next(iter_seq)
while 1:
yield value
value = next(iter_seq)
yield join
def to_expression(context, obj, max_size=None, truncate_text=" [...]"):
"""Convert an object to a Moya expression, where possible"""
if context is None:
from .. import pilot
context = pilot.context
if context is not None:
obj = get_moya_interface(context, obj)
def iter_dict(obj, sep=", "):
i = iteritems(obj)
k, v = next(i)
while 1:
for token in moya_repr(k):
yield token
yield ": "
for token in moya_repr(v):
yield token
k, v = next(i)
yield sep
def iter_seq(obj, sep=", "):
i = iter(obj)
value = next(i)
while 1:
for token in moya_repr(value):
yield token
value = next(i)
yield sep
def moya_repr(obj):
if isinstance(obj, text_type):
yield quote_string(obj)
elif obj is None:
yield "None"
elif isinstance(obj, bool):
if obj:
yield "yes"
else:
yield "no"
elif hasattr(obj, "__moyarepr__"):
yield obj.__moyarepr__(context)
elif isinstance(obj, number_types):
yield text_type(obj).rstrip("L")
elif isinstance(obj, (list, tuple)):
yield "["
for value in iter_seq(obj):
yield value
yield "]"
elif isinstance(obj, dict):
yield "{"
for token in iter_dict(obj):
yield token
yield "}"
elif isinstance(obj, set):
yield "set:["
for value in iter_seq(obj):
yield value
yield "]"
else:
# A last resort, may not be a valid Moya expression
try:
yield repr(obj)
except Exception as error:
yield "<repr failed '{}'>".format(error)
if max_size is None:
return "".join(moya_repr(obj))
components = []
append = components.append
size = 0
for c in moya_repr(obj):
append(c)
size += len(c)
if size > max_size:
# Try not to truncate the middle of a token if possible
if size > 50 and len(components) > 1 and len(components[-1]) < 20:
components.pop()
return "".join(components)[:max_size] + truncate_text
return "".join(components)
def get_app_from_callstack(context):
call = context.get(".call", None)
return getattr(call, "app", None) or context.get("._t.app", None)
def set_dynamic(context):
"""Set commonly used dynamic items on the stack"""
from .expressiontime import ExpressionDateTime
context.set_dynamic(".clock", lambda c: ExpressionDateTime.moya_utcnow())
context.set_counter(".counter")
context.set_dynamic(".app", get_app_from_callstack)
context.set_dynamic(".time", lambda c: time.time())
theme_fs = context.get(".fs.themes", None)
from ..theme import Theme
if theme_fs:
context.set_lazy(".theme", Theme.loader(theme_fs), None)
else:
context.set_lazy(".theme", Theme.dummy_loader, None)
STRING_ENCODE = {
"\a": "\\a",
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
"\v": "\\v",
"'": "\\'",
'"': '\\"',
"\\": "\\\\",
}
STRING_DECODE = {v: k for k, v in STRING_ENCODE.items()}
_encode_string = "|".join(re.escape(c) for c in STRING_ENCODE.keys())
_decode_string = "|".join(re.escape(c) for c in STRING_DECODE.keys() if c != '"')
_re_encode_string = re.compile(_encode_string)
_re_decode_string = re.compile(_decode_string)
def encode_string(s, _re_sub=_re_encode_string.sub, _replace=STRING_ENCODE.__getitem__):
return _re_sub(lambda match: _replace(match.group(0)), s)
def quote_string(s):
return "'{}'".format(encode_string(s))
def decode_string(s, _re_sub=_re_decode_string.sub, _replace=STRING_DECODE.__getitem__):
return _re_sub(lambda match: _replace(match.group(0)), s)
if __name__ == "__main__":
tests = ["test", "hello\nworld", 'you can "quote me" on that']
for t in tests:
print()
print(repr(t))
enc = encode_string(t)
print(repr(enc))
dec = decode_string(enc)
print(repr(dec))
# if __name__ == "__main__":
# from moya.context import Context
# c = Context()
# c['foo'] = [range(10)] * 10000
# c['bar'] = [{'a': "Hello world!", 'b': range(5)}] * 10000
#
# print(c.to_expr(c['foo']))
# print(c.to_expr(c['bar']))
# if __name__ == "__main__":
# from moya.context.expressionrange import *
# from moya.context.expressiontime import *
# print(to_expression(context, "hello\nworld"))
# from collections import OrderedDict
# print(to_expression(context, OrderedDict()))
# from moya.console import Console
# c = Console()
# c.obj(context, {'a': OrderedDict()})
| |
from __future__ import print_function, division
from sympy.core import S
from sympy.core.compatibility import u
from sympy.core.exprtools import factor_terms
from sympy.core.function import (Function, Derivative, ArgumentIndexError,
AppliedUndef)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core.expr import Expr
from sympy.core import Add, Mul
from sympy.core.relational import Eq
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.trigonometric import atan2
###############################################################################
######################### REAL and IMAGINARY PARTS ############################
###############################################################################
class re(Function):
"""
Returns real part of expression. This function performs only
elementary analysis and so it will fail to decompose properly
more complicated expressions. If completely simplified result
is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
Examples
========
>>> from sympy import re, im, I, E
>>> from sympy.abc import x, y
>>> re(2*E)
2*E
>>> re(2*I + 17)
17
>>> re(2*I)
0
>>> re(im(x) + x*I + 2)
2
See Also
========
im
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return arg
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return S.Zero
elif arg.is_Function and arg.func is conjugate:
return re(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
elif not term.has(S.ImaginaryUnit) and term.is_real:
excluded.append(term)
else:
# Try to do some advanced expansion. If
# impossible, don't try to do re(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[0])
else:
included.append(term)
if len(args) != len(included):
a, b, c = (Add(*xs) for xs in [included, reverted, excluded])
return cls(a) - im(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Returns the real number with a zero complex part.
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return re(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* im(Derivative(self.args[0], x, evaluate=True))
def _eval_rewrite_as_im(self, arg):
return self.args[0] - im(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _sage_(self):
import sage.all as sage
return sage.real_part(self.args[0]._sage_())
class im(Function):
"""
Returns imaginary part of expression. This function performs only
elementary analysis and so it will fail to decompose properly more
complicated expressions. If completely simplified result is needed then
use Basic.as_real_imag() or perform complex expansion on instance of
this function.
Examples
========
>>> from sympy import re, im, E, I
>>> from sympy.abc import x, y
>>> im(2*E)
0
>>> re(2*I + 17)
17
>>> im(x*I)
re(x)
>>> im(re(x) + y)
im(y)
See Also
========
re
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return S.Zero
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return -S.ImaginaryUnit * arg
elif arg.is_Function and arg.func is conjugate:
return -im(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
else:
excluded.append(coeff)
elif term.has(S.ImaginaryUnit) or not term.is_real:
# Try to do some advanced expansion. If
# impossible, don't try to do im(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[1])
else:
included.append(term)
if len(args) != len(included):
a, b, c = (Add(*xs) for xs in [included, reverted, excluded])
return cls(a) + re(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Return the imaginary part with a zero real part.
Examples
========
>>> from sympy.functions import im
>>> from sympy import I
>>> im(2 + 3*I).as_real_imag()
(3, 0)
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return im(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* re(Derivative(self.args[0], x, evaluate=True))
def _sage_(self):
import sage.all as sage
return sage.imag_part(self.args[0]._sage_())
def _eval_rewrite_as_re(self, arg):
return self.args[0] - re(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
###############################################################################
############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################
###############################################################################
class sign(Function):
"""
Returns the complex sign of an expression:
If the expresssion is real the sign will be:
* 1 if expression is positive
* 0 if expression is equal to zero
* -1 if expression is negative
If the expresssion is imaginary the sign will be:
* I if im(expression) is positive
* -I if im(expression) is negative
Otherwise an unevaluated expression will be returned. When evaluated, the
result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``.
Examples
========
>>> from sympy.functions import sign
>>> from sympy.core.numbers import I
>>> sign(-1)
-1
>>> sign(0)
0
>>> sign(-3*I)
-I
>>> sign(1 + I)
sign(1 + I)
>>> _.evalf()
0.707106781186548 + 0.707106781186548*I
See Also
========
Abs, conjugate
"""
is_finite = True
is_complex = True
def doit(self):
if self.args[0].is_nonzero:
return self.args[0] / Abs(self.args[0])
return self
@classmethod
def eval(cls, arg):
# handle what we can
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
s = sign(c)
for a in args:
if a.is_negative:
s = -s
elif a.is_positive:
pass
else:
ai = im(a)
if a.is_imaginary and ai.is_comparable: # i.e. a = I*real
s *= S.ImaginaryUnit
if ai.is_negative:
# can't use sign(ai) here since ai might not be
# a Number
s = -s
else:
unk.append(a)
if c is S.One and len(unk) == len(args):
return None
return s * cls(arg._new_rawargs(*unk))
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_positive:
return S.One
if arg.is_negative:
return S.NegativeOne
if arg.is_Function:
if arg.func is sign:
return arg
if arg.is_imaginary:
if arg.is_Pow and arg.exp is S.Half:
# we catch this because non-trivial sqrt args are not expanded
# e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1)
return S.ImaginaryUnit
arg2 = -S.ImaginaryUnit * arg
if arg2.is_positive:
return S.ImaginaryUnit
if arg2.is_negative:
return -S.ImaginaryUnit
def _eval_Abs(self):
if self.args[0].is_nonzero:
return S.One
def _eval_conjugate(self):
return sign(conjugate(self.args[0]))
def _eval_derivative(self, x):
if self.args[0].is_real:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(self.args[0])
elif self.args[0].is_imaginary:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(-S.ImaginaryUnit * self.args[0])
def _eval_is_nonnegative(self):
if self.args[0].is_nonnegative:
return True
def _eval_is_nonpositive(self):
if self.args[0].is_nonpositive:
return True
def _eval_is_imaginary(self):
return self.args[0].is_imaginary
def _eval_is_integer(self):
return self.args[0].is_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_power(self, other):
if (
self.args[0].is_real and
self.args[0].is_nonzero and
other.is_integer and
other.is_even
):
return S.One
def _sage_(self):
import sage.all as sage
return sage.sgn(self.args[0]._sage_())
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((1, arg > 0), (-1, arg < 0), (0, True))
def _eval_rewrite_as_Heaviside(self, arg):
from sympy import Heaviside
if arg.is_real:
return Heaviside(arg)*2-1
def _eval_simplify(self, ratio, measure):
return self.func(self.args[0].factor())
class Abs(Function):
"""
Return the absolute value of the argument.
This is an extension of the built-in function abs() to accept symbolic
values. If you pass a SymPy expression to the built-in abs(), it will
pass it automatically to Abs().
Examples
========
>>> from sympy import Abs, Symbol, S
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<... 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a sympy object.
See Also
========
sign, conjugate
"""
is_real = True
is_negative = False
unbranched = True
def fdiff(self, argindex=1):
"""
Get the first derivative of the argument to Abs().
Examples
========
>>> from sympy.abc import x
>>> from sympy.functions import Abs
>>> Abs(-x).fdiff()
sign(x)
"""
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.simplify.simplify import signsimp
if hasattr(arg, '_eval_Abs'):
obj = arg._eval_Abs()
if obj is not None:
return obj
if not isinstance(arg, Expr):
raise TypeError("Bad argument type for Abs(): %s" % type(arg))
# handle what we can
arg = signsimp(arg, evaluate=False)
if arg.is_Mul:
known = []
unk = []
for t in arg.args:
tnew = cls(t)
if tnew.func is cls:
unk.append(tnew.args[0])
else:
known.append(tnew)
known = Mul(*known)
unk = cls(Mul(*unk), evaluate=False) if unk else S.One
return known*unk
if arg is S.NaN:
return S.NaN
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if base.is_real:
if exponent.is_integer:
if exponent.is_even:
return arg
if base is S.NegativeOne:
return S.One
if base.func is cls and exponent is S.NegativeOne:
return arg
return Abs(base)**exponent
if base.is_positive == True:
return base**re(exponent)
return (-base)**re(exponent)*exp(-S.Pi*im(exponent))
if isinstance(arg, exp):
return exp(re(arg.args[0]))
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_nonnegative:
return arg
if arg.is_nonpositive:
return -arg
if arg.is_imaginary:
arg2 = -S.ImaginaryUnit * arg
if arg2.is_nonnegative:
return arg2
if arg.is_Add:
if arg.has(S.Infinity, S.NegativeInfinity):
if any(a.is_infinite for a in arg.as_real_imag()):
return S.Infinity
if arg.is_real is None and arg.is_imaginary is None:
if all(a.is_real or a.is_imaginary or (S.ImaginaryUnit*a).is_real for a in arg.args):
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
if arg.is_real is False and arg.is_imaginary is False:
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
def _eval_is_integer(self):
if self.args[0].is_real:
return self.args[0].is_integer
def _eval_is_nonzero(self):
return self._args[0].is_nonzero
def _eval_is_positive(self):
return self.is_nonzero
def _eval_is_rational(self):
if self.args[0].is_real:
return self.args[0].is_rational
def _eval_is_even(self):
if self.args[0].is_real:
return self.args[0].is_even
def _eval_is_odd(self):
if self.args[0].is_real:
return self.args[0].is_odd
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_power(self, exponent):
if self.args[0].is_real and exponent.is_integer:
if exponent.is_even:
return self.args[0]**exponent
elif exponent is not S.NegativeOne and exponent.is_Integer:
return self.args[0]**(exponent - 1)*self
return
def _eval_nseries(self, x, n, logx):
direction = self.args[0].leadterm(x)[0]
s = self.args[0]._eval_nseries(x, n=n, logx=logx)
when = Eq(direction, 0)
return Piecewise(
((s.subs(direction, 0)), when),
(sign(direction)*s, True),
)
def _sage_(self):
import sage.all as sage
return sage.abs_symbolic(self.args[0]._sage_())
def _eval_derivative(self, x):
if self.args[0].is_real or self.args[0].is_imaginary:
return Derivative(self.args[0], x, evaluate=True) \
* sign(conjugate(self.args[0]))
return (re(self.args[0]) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
def _eval_rewrite_as_Heaviside(self, arg):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
from sympy import Heaviside
if arg.is_real:
return arg*(Heaviside(arg) - Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((arg, arg >= 0), (-arg, True))
def _eval_rewrite_as_sign(self, arg):
from sympy import sign
return arg/sign(arg)
class arg(Function):
"""
Returns the argument (in radians) of a complex number. For a real
number, the argument is always 0.
Examples
========
>>> from sympy.functions import arg
>>> from sympy import I, sqrt
>>> arg(2.0)
0
>>> arg(I)
pi/2
>>> arg(sqrt(2) + I*sqrt(2))
pi/4
"""
is_real = True
is_finite = True
@classmethod
def eval(cls, arg):
if not arg.is_Atom:
c, arg_ = factor_terms(arg).as_coeff_Mul()
if arg_.is_Mul:
arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else
sign(a) for a in arg_.args])
arg_ = sign(c)*arg_
else:
arg_ = arg
x, y = re(arg_), im(arg_)
rv = atan2(y, x)
if rv.is_number and not rv.atoms(AppliedUndef):
return rv
if arg_ != arg:
return cls(arg_, evaluate=False)
def _eval_derivative(self, t):
x, y = re(self.args[0]), im(self.args[0])
return (x * Derivative(y, t, evaluate=True) - y *
Derivative(x, t, evaluate=True)) / (x**2 + y**2)
def _eval_rewrite_as_atan2(self, arg):
x, y = re(self.args[0]), im(self.args[0])
return atan2(y, x)
class conjugate(Function):
"""
Returns the `complex conjugate` Ref[1] of an argument.
In mathematics, the complex conjugate of a complex number
is given by changing the sign of the imaginary part.
Thus, the conjugate of the complex number
:math:`a + ib` (where a and b are real numbers) is :math:`a - ib`
Examples
========
>>> from sympy import conjugate, I
>>> conjugate(2)
2
>>> conjugate(I)
-I
See Also
========
sign, Abs
References
==========
.. [1] http://en.wikipedia.org/wiki/Complex_conjugation
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
def _eval_adjoint(self):
return transpose(self.args[0])
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
if x.is_real:
return conjugate(Derivative(self.args[0], x, evaluate=True))
elif x.is_imaginary:
return -conjugate(Derivative(self.args[0], x, evaluate=True))
def _eval_transpose(self):
return adjoint(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
class transpose(Function):
"""
Linear map transposition.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_transpose()
if obj is not None:
return obj
def _eval_adjoint(self):
return conjugate(self.args[0])
def _eval_conjugate(self):
return adjoint(self.args[0])
def _eval_transpose(self):
return self.args[0]
class adjoint(Function):
"""
Conjugate transpose or Hermite conjugation.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_adjoint()
if obj is not None:
return obj
obj = arg._eval_transpose()
if obj is not None:
return conjugate(obj)
def _eval_adjoint(self):
return self.args[0]
def _eval_conjugate(self):
return transpose(self.args[0])
def _eval_transpose(self):
return conjugate(self.args[0])
def _latex(self, printer, exp=None, *args):
arg = printer._print(self.args[0])
tex = r'%s^{\dag}' % arg
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, printer._print(exp))
return tex
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if printer._use_unicode:
pform = pform**prettyForm(u('\N{DAGGER}'))
else:
pform = pform**prettyForm('+')
return pform
###############################################################################
############### HANDLING OF POLAR NUMBERS #####################################
###############################################################################
class polar_lift(Function):
"""
Lift argument to the Riemann surface of the logarithm, using the
standard branch.
>>> from sympy import Symbol, polar_lift, I
>>> p = Symbol('p', polar=True)
>>> x = Symbol('x')
>>> polar_lift(4)
4*exp_polar(0)
>>> polar_lift(-4)
4*exp_polar(I*pi)
>>> polar_lift(-I)
exp_polar(-I*pi/2)
>>> polar_lift(I + 2)
polar_lift(2 + I)
>>> polar_lift(4*x)
4*polar_lift(x)
>>> polar_lift(4*p)
4*p
See Also
========
sympy.functions.elementary.exponential.exp_polar
periodic_argument
"""
is_polar = True
is_comparable = False # Cannot be evalf'd.
@classmethod
def eval(cls, arg):
from sympy import exp_polar, pi, I, arg as argument
if arg.is_number:
ar = argument(arg)
# In general we want to affirm that something is known,
# e.g. `not ar.has(argument) and not ar.has(atan)`
# but for now we will just be more restrictive and
# see that it has evaluated to one of the known values.
if ar in (0, pi/2, -pi/2, pi):
return exp_polar(I*ar)*abs(arg)
if arg.is_Mul:
args = arg.args
else:
args = [arg]
included = []
excluded = []
positive = []
for arg in args:
if arg.is_polar:
included += [arg]
elif arg.is_positive:
positive += [arg]
else:
excluded += [arg]
if len(excluded) < len(args):
if excluded:
return Mul(*(included + positive))*polar_lift(Mul(*excluded))
elif included:
return Mul(*(included + positive))
else:
return Mul(*positive)*exp_polar(0)
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
return self.args[0]._eval_evalf(prec)
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
class periodic_argument(Function):
"""
Represent the argument on a quotient of the Riemann surface of the
logarithm. That is, given a period P, always return a value in
(-P/2, P/2], by using exp(P*I) == 1.
>>> from sympy import exp, exp_polar, periodic_argument, unbranched_argument
>>> from sympy import I, pi
>>> unbranched_argument(exp(5*I*pi))
pi
>>> unbranched_argument(exp_polar(5*I*pi))
5*pi
>>> periodic_argument(exp_polar(5*I*pi), 2*pi)
pi
>>> periodic_argument(exp_polar(5*I*pi), 3*pi)
-pi
>>> periodic_argument(exp_polar(5*I*pi), pi)
0
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
principal_branch
"""
@classmethod
def _getunbranched(cls, ar):
from sympy import exp_polar, log, polar_lift
if ar.is_Mul:
args = ar.args
else:
args = [ar]
unbranched = 0
for a in args:
if not a.is_polar:
unbranched += arg(a)
elif a.func is exp_polar:
unbranched += a.exp.as_real_imag()[1]
elif a.is_Pow:
re, im = a.exp.as_real_imag()
unbranched += re*unbranched_argument(
a.base) + im*log(abs(a.base))
elif a.func is polar_lift:
unbranched += arg(a.args[0])
else:
return None
return unbranched
@classmethod
def eval(cls, ar, period):
# Our strategy is to evaluate the argument on the Riemann surface of the
# logarithm, and then reduce.
# NOTE evidently this means it is a rather bad idea to use this with
# period != 2*pi and non-polar numbers.
from sympy import ceiling, oo, atan2, atan, polar_lift, pi, Mul
if not period.is_positive:
return None
if period == oo and isinstance(ar, principal_branch):
return periodic_argument(*ar.args)
if ar.func is polar_lift and period >= 2*pi:
return periodic_argument(ar.args[0], period)
if ar.is_Mul:
newargs = [x for x in ar.args if not x.is_positive]
if len(newargs) != len(ar.args):
return periodic_argument(Mul(*newargs), period)
unbranched = cls._getunbranched(ar)
if unbranched is None:
return None
if unbranched.has(periodic_argument, atan2, arg, atan):
return None
if period == oo:
return unbranched
if period != oo:
n = ceiling(unbranched/period - S(1)/2)*period
if not n.has(ceiling):
return unbranched - n
def _eval_evalf(self, prec):
from sympy import ceiling, oo
z, period = self.args
if period == oo:
unbranched = periodic_argument._getunbranched(z)
if unbranched is None:
return self
return unbranched._eval_evalf(prec)
ub = periodic_argument(z, oo)._eval_evalf(prec)
return (ub - ceiling(ub/period - S(1)/2)*period)._eval_evalf(prec)
def unbranched_argument(arg):
from sympy import oo
return periodic_argument(arg, oo)
class principal_branch(Function):
"""
Represent a polar number reduced to its principal branch on a quotient
of the Riemann surface of the logarithm.
This is a function of two arguments. The first argument is a polar
number `z`, and the second one a positive real number of infinity, `p`.
The result is "z mod exp_polar(I*p)".
>>> from sympy import exp_polar, principal_branch, oo, I, pi
>>> from sympy.abc import z
>>> principal_branch(z, oo)
z
>>> principal_branch(exp_polar(2*pi*I)*3, 2*pi)
3*exp_polar(0)
>>> principal_branch(exp_polar(2*pi*I)*3*z, 2*pi)
3*principal_branch(z, 2*pi)
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
periodic_argument
"""
is_polar = True
is_comparable = False # cannot always be evalf'd
@classmethod
def eval(self, x, period):
from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol
if isinstance(x, polar_lift):
return principal_branch(x.args[0], period)
if period == oo:
return x
ub = periodic_argument(x, oo)
barg = periodic_argument(x, period)
if ub != barg and not ub.has(periodic_argument) \
and not barg.has(periodic_argument):
pl = polar_lift(x)
def mr(expr):
if not isinstance(expr, Symbol):
return polar_lift(expr)
return expr
pl = pl.replace(polar_lift, mr)
if not pl.has(polar_lift):
res = exp_polar(I*(barg - ub))*pl
if not res.is_polar and not res.has(exp_polar):
res *= exp_polar(0)
return res
if not x.free_symbols:
c, m = x, ()
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _eval_evalf(self, prec):
from sympy import exp, pi, I
z, period = self.args
p = periodic_argument(z, period)._eval_evalf(prec)
if abs(p) > pi or p == -pi:
return self # Cannot evalf for this argument.
return (abs(z)*exp(I*p))._eval_evalf(prec)
# /cyclic/
from sympy.core import basic as _
_.abs_ = Abs
del _
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import unittest2
import mock
from quantum.agent import l3_agent
from quantum.agent.linux import interface
from quantum.common import config as base_config
from quantum.common import constants as l3_constants
from quantum.openstack.common import cfg
from quantum.openstack.common import uuidutils
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
class TestBasicRouterOperations(unittest2.TestCase):
def setUp(self):
self.conf = cfg.CommonConfigOpts()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(l3_agent.L3NATAgent.OPTS)
self.conf.register_opts(interface.OPTS)
self.conf.set_override('interface_driver',
'quantum.agent.linux.interface.NullDriver')
self.conf.root_helper = 'sudo'
self.device_exists_p = mock.patch(
'quantum.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.utils_exec_p = mock.patch(
'quantum.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.external_process_p = mock.patch(
'quantum.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.dvr_cls_p = mock.patch('quantum.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('quantum.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.l3pluginApi_cls_p = mock.patch(
'quantum.agent.l3_agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.Mock()
l3pluginApi_cls.return_value = self.plugin_api
def tearDown(self):
self.device_exists_p.stop()
self.l3pluginApi_cls_p.stop()
self.ip_cls_p.stop()
self.dvr_cls_p.stop()
self.utils_exec_p.stop()
self.external_process_p.stop()
def testRouterInfoCreate(self):
id = _uuid()
ri = l3_agent.RouterInfo(id, self.conf.root_helper,
self.conf.use_namespaces)
self.assertTrue(ri.ns_name().endswith(id))
def testAgentCreate(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
def _test_internal_network_action(self, action):
port_id = _uuid()
router_id = _uuid()
network_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
interface_name = agent.get_internal_device_name(port_id)
cidr = '99.0.1.9/24'
mac = 'ca:fe:de:ad:be:ef'
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30'}]}
if action == 'add':
self.device_exists.return_value = False
agent.internal_network_added(ri, ex_gw_port, network_id,
port_id, cidr, mac)
self.assertEquals(self.mock_driver.plug.call_count, 1)
self.assertEquals(self.mock_driver.init_l3.call_count, 1)
elif action == 'remove':
self.device_exists.return_value = True
agent.internal_network_removed(ri, ex_gw_port, port_id, cidr)
self.assertEquals(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def testAgentAddInternalNetwork(self):
self._test_internal_network_action('add')
def testAgentRemoveInternalNetwork(self):
self._test_internal_network_action('remove')
def _test_external_gateway_action(self, action):
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
internal_cidrs = ['100.0.1.0/24', '200.74.0.0/16']
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self.device_exists.return_value = False
agent.external_gateway_added(ri, ex_gw_port, internal_cidrs)
self.assertEquals(self.mock_driver.plug.call_count, 1)
self.assertEquals(self.mock_driver.init_l3.call_count, 1)
arping_cmd = ['arping', '-A', '-U',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
'20.0.0.30']
if self.conf.use_namespaces:
self.mock_ip.netns.execute.assert_any_call(
arping_cmd, check_exit_code=True)
else:
self.utils_exec.assert_any_call(
check_exit_code=True, root_helper=self.conf.root_helper)
elif action == 'remove':
self.device_exists.return_value = True
agent.external_gateway_removed(ri, ex_gw_port, internal_cidrs)
self.assertEquals(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def testAgentAddExternalGateway(self):
self._test_external_gateway_action('add')
def testAgentRemoveExternalGateway(self):
self._test_external_gateway_action('remove')
def _test_floating_ip_action(self, action):
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
floating_ip = '20.0.0.100'
fixed_ip = '10.0.0.23'
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self.device_exists.return_value = False
agent.floating_ip_added(ri, ex_gw_port, floating_ip, fixed_ip)
arping_cmd = ['arping', '-A', '-U',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
floating_ip]
if self.conf.use_namespaces:
self.mock_ip.netns.execute.assert_any_call(
arping_cmd, check_exit_code=True)
else:
self.utils_exec.assert_any_call(
check_exit_code=True, root_helper=self.conf.root_helper)
elif action == 'remove':
self.device_exists.return_value = True
agent.floating_ip_removed(ri, ex_gw_port, floating_ip, fixed_ip)
else:
raise Exception("Invalid action %s" % action)
def testAgentAddFloatingIP(self):
self._test_floating_ip_action('add')
def testAgentRemoveFloatingIP(self):
self._test_floating_ip_action('remove')
def testProcessRouter(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
internal_port = {'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '35.4.4.4',
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '35.4.4.0/24',
'gateway_ip': '35.4.4.1'}}
fake_floatingips1 = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid()}]}
router = {
'id': router_id,
l3_constants.FLOATINGIP_KEY: fake_floatingips1['floatingips'],
l3_constants.INTERFACE_KEY: [internal_port],
'gw_port': ex_gw_port}
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.process_router(ri)
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
agent.process_router(ri)
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
agent.process_router(ri)
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
agent.process_router(ri)
def testRoutersWithAdminStateDown(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = None
routers = [
{'id': _uuid(),
'admin_state_up': False,
'external_gateway_info': {}}]
agent._process_routers(routers)
self.assertNotIn(routers[0]['id'], agent.router_info)
def testSingleLoopRouterRemoval(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = None
routers = [
{'id': _uuid(),
'admin_state_up': True,
'external_gateway_info': {}}]
agent._process_routers(routers)
agent.router_deleted(None, routers[0]['id'])
# verify that remove is called
self.assertEquals(self.mock_ip.get_devices.call_count, 1)
self.device_exists.assert_has_calls(
[mock.call(self.conf.external_network_bridge)])
def testDestroyNamespace(self):
class FakeDev(object):
def __init__(self, name):
self.name = name
self.mock_ip.get_namespaces.return_value = ['qrouter-foo']
self.mock_ip.get_devices.return_value = [FakeDev('qr-aaaa'),
FakeDev('qgw-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_all_router_namespaces()
| |
#!/usr/bin/env python
import base64
import sys, re
from pyparsing import (Regex, Suppress, Combine, Optional, CaselessKeyword,
ZeroOrMore, OneOrMore, removeQuotes, quotedString,
Empty, Literal, NoMatch, Group, oneOf, Forward,
Keyword, ParseExpression, ParseElementEnhance,
ParseException, col, lineno, restOfLine)
import rdflib
from rdflib.term import URIRef
from rdflib.sparql import bison as components
XSD_NS = rdflib.namespace.Namespace(u'http://www.w3.org/2001/XMLSchema#')
# Debug utilities:
DEBUG = False
if DEBUG:
def apply_to_pyparser_tree(parser, f, cache=None):
if cache is None:
cache = set()
if parser in cache:
return
else:
cache.add(parser)
f(parser)
if isinstance(parser, ParseElementEnhance):
apply_to_pyparser_tree(parser.expr, f, cache)
elif isinstance(parser, ParseExpression):
for expr in parser.exprs:
apply_to_pyparser_tree(expr, f, cache)
from Ft.Xml import MarkupWriter
class ParseTracer(object):
def __init__(self):
pass
def start_action(self, instring, loc, expr):
self.writer.startElement(u'attempt', attributes={
u'class': unicode(expr.__class__.__name__),
u'loc': unicode(repr(loc)), u'expr': unicode(repr(expr)),
u'lineno': unicode(lineno(loc, instring)),
u'col': unicode(col(loc, instring)),})
def success_action(self, instring, tokensStart, loc, expr, tokens):
self.writer.simpleElement(u'success')
self.writer.endElement(u'attempt')
def exception_action(self, instring, tokensStart, expr, err):
self.writer.simpleElement(u'fail', attributes={
u'err': unicode(repr(err))})
self.writer.endElement(u'attempt')
def set_debug_actions(self, parser):
parser.setDebugActions(
self.start_action, self.success_action, self.exception_action)
def parse(self, parser, input, stream):
self.parser = parser
apply_to_pyparser_tree(self.parser, self.set_debug_actions)
self.writer = MarkupWriter(indent='yes', stream=stream)
self.writer.startDocument()
self.writer.startElement(u'trace')
try:
result = self.parser.parseString(input)[0]
except ParseException, e:
self.writer.simpleElement(u'fail', attributes={
u'err': unicode(repr(e))})
#self.writer.endElement(u'attempt')
raise
finally:
self.writer.endElement(u'trace')
self.writer.endDocument()
return result
def data_dir(thing):
return [member for member in dir(thing) if (
not member.startswith('__') and
not callable(getattr(thing, member)))]
def struct_data(thing, depth=None, brief=False):
if depth == 0:
return '...'
elif depth is not None:
depth -= 1
if isinstance(thing, list):
return [struct_data(item, depth, brief) for item in thing]
if isinstance(thing, tuple):
return (struct_data(item, depth, brief) for item in thing)
d = data_dir(thing)
if len(d) > 0:
if brief:
classname = str(thing.__class__).split('.')[-1]
else:
classname = repr(thing.__class__)
result = {'__class__': thing.__class__}
for key in d:
result[key] = struct_data(getattr(thing, key), depth, brief)
return result
else:
return thing
from pprint import pprint
def debug(results, text=None):
if text is None:
text = ''
print >> sys.stderr, 'DEBUG (parse success):', text
pprint(struct_data(results.asList(), 3, True), sys.stderr)
def debug2(s, loc, toks):
print >> sys.stderr, 'DEBUG (parse success): parse string =', s
pprint(struct_data(toks.asList(), 3, True), sys.stderr)
def debug_fail(s, loc, expr, err):
print >> sys.stderr, 'DEBUG (parse fail): expr =', expr
print >> sys.stderr, err
def composition(callables):
def composed(arg):
result = arg
for callable in callables:
result = callable(result)
return result
return composed
def composition2(callables):
def composed(*args):
result = args
for callable in callables:
result = [callable(*result)]
return result[0]
return composed
def regex_group(regex):
return '(?:%s)' % (regex,)
def as_empty(results):
return [[]]
def setPropertyValueList(results):
results = results.asList()
collection = results[0]
collection.setPropertyValueList(results[1])
return collection
class ProjectionMismatchException(Exception):
pass
def refer_component(component, initial_args=None, projection=None, **kwargs):
'''
Create a function to forward parsing results to the appropriate
constructor.
The pyparsing library allows us to modify the token stream that is
returned by a particular expression with the `setParseAction()` method.
This method sets a handler function that should take a single
`ParseResults` instance as an argument, and then return a new token or
list of tokens. Mainly, we want to pass lower level tokens to SPARQL
parse tree objects; the constructors for these objects take a number of
positional arguments, so this function builds a new function that will
forward the pyparsing results to the positional arguments of the
appropriate constructor.
This function provides a bit more functionality with its additional
arguments:
- `initial_args`: static list of initial arguments to add to the
beginning of the arguments list before additional processing
- `projection`: list of integers that reorders the initial arguments
based on the indices that it contains.
Finally, any additional keyword arguments passed to this function are
passed along to the handler that is constructed.
Note that we always convert pyparsing results to a list with the
`asList()` method before using those results; this works, but we may
only need this for testing. To be safe, we include it here, but we
might want to investigate further whether or not it could be moved only
to testing code. Also, we might want to investigate whether a list-only
parsing mode could be added to pyparsing.
'''
if initial_args is None and projection is None:
def apply(results):
if DEBUG:
print >> sys.stderr, component
debug(results)
return component(*results.asList(), **kwargs)
else:
def apply(results):
if DEBUG:
print >> sys.stderr, component
debug(results)
if initial_args is not None:
results = initial_args + results.asList()
if projection is not None:
if len(results) < len(projection):
raise ProjectionMismatchException(
'Expected at least %d results to make %s, got %d.' %
(len(projection), str(component), len(results)))
projected = []
for index in projection:
projected.append(results[index])
else:
projected = results
return component(*projected, **kwargs)
return apply
# Productions for terminals, except for those that are really only
# associated with one higher-level production, in which case they are
# defined closer to that production:
LT = Suppress('<')
GT = Suppress('>')
LP = Suppress('(')
RP = Suppress(')')
LB = Suppress('[')
RB = Suppress(']')
LC = Suppress('{')
RC = Suppress('}')
COLON = Literal(':')
SEMICOLON = Suppress(';')
COMMA = Suppress(',')
PERIOD = Suppress('.')
IRI = Regex(
r'[^<>"{}|^`\\%s]*' % ''.join('\\x%02X' % i for i in range(33)))
IRI_REF = LT + IRI + GT
if DEBUG:
IRI_REF.setName('IRI_REF')
#PN_CHARS_BASE = Regex('[a-zA-Z]')
PN_CHARS_BASE_re = '[a-zA-Z]'
PN_CHARS_U_re = PN_CHARS_BASE_re + '|_'
PN_CHARS_re = PN_CHARS_U_re + '|-|[0-9]'
PN_PREFIX_re = (PN_CHARS_BASE_re +
'(?:(?:' + PN_CHARS_re + '\\.)*' + PN_CHARS_re + ')?')
PN_PREFIX = Regex(PN_PREFIX_re)
PNAME_NS = Combine(Optional(PN_PREFIX, '') + COLON)
PN_LOCAL = Regex(regex_group(PN_CHARS_U_re + '|[0-9]') +
regex_group(
regex_group(PN_CHARS_re + '|\\.') + '*' +
regex_group(PN_CHARS_re)) + '?')
PNAME_LN = Combine(PNAME_NS + PN_LOCAL)
WS_re = r'[ \t\r\n]*'
NIL = Group(Suppress(Regex(r'\(' + WS_re + r'\)')))
# BaseDecl:
BASE = Suppress(CaselessKeyword('BASE'))
BaseDecl = (BASE + IRI_REF).setParseAction(
refer_component(components.Bindings.BaseDeclaration))
if DEBUG:
BaseDecl.setName('BaseDecl')
# PrefixDecl:
PREFIX = Suppress(CaselessKeyword('PREFIX'))
PrefixDecl = (PREFIX + PNAME_NS + IRI_REF).setParseAction(
refer_component(components.Bindings.PrefixDeclaration))
if DEBUG:
PrefixDecl.setName('PrefixDecl')
# Prologue:
Prologue = (Optional(BaseDecl, None) +
Group(ZeroOrMore(PrefixDecl))).setParseAction(
refer_component(components.Query.Prolog))
if DEBUG:
Prologue.setName('Prologue')
# Var:
QM = Suppress('?')
USD = Suppress('$')
VARNAME = Regex(regex_group(PN_CHARS_U_re + '|[0-9]') +
regex_group(PN_CHARS_U_re + '|[0-9]') + '*')
Var = ((QM | USD) + VARNAME).setParseAction(
refer_component(rdflib.term.Variable))
if DEBUG:
Var.setName('Var')
# PrefixedName:
PrefixedName = PNAME_LN | PNAME_NS
if DEBUG:
PrefixedName.setName('PrefixedName')
# IRIref:
IRIref = (IRI_REF.setParseAction(refer_component(components.IRIRef.IRIRef)) |
PrefixedName.setParseAction(refer_component(components.QName.QName)))
if DEBUG:
IRIref.setName('IRIref')
# DatasetClause:
FROM = Suppress(CaselessKeyword('FROM'))
NAMED = Suppress(CaselessKeyword('NAMED'))
# Question: will this return a list containing a single token, or
# just the single token? I want the latter.
#
# Also, I think there is a bug in IRIRef.* in that they assume that the
# IRIref will be a URIRef, but it could also be a QName.
DatasetClause = (FROM + (
IRIref.copy().setParseAction(
refer_component(components.IRIRef.RemoteGraph)) |
NAMED + IRIref.copy().setParseAction(
refer_component(components.IRIRef.NamedGraph))))
if DEBUG:
DatasetClause.setName('DatasetClause')
# String:
#
# TODO: flesh this out to include multiline strings, and also
# investigate a possible bug with Expression.ParsedString; it
# doesn't look like it is properly expanding escaped characters.
String = quotedString.setParseAction(composition2(
[removeQuotes, components.Expression.ParsedString]))
if DEBUG:
String.setName('String')
# RDFLiteral
AT = Suppress('@')
LANGTAG = AT + Regex(PN_CHARS_BASE_re + '+' +
regex_group('-[a-zA-Z0-9]+') + '*')
DOUBLE_HAT = Suppress('^^')
RDFLiteral = ((String + DOUBLE_HAT + IRIref).setParseAction(
refer_component(components.Expression.ParsedDatatypedLiteral)) |
(String + Optional(LANGTAG, None)).setParseAction(
refer_component(rdflib.term.Literal)))
if DEBUG:
RDFLiteral.setName('RDFLiteral')
# NumericLiteral:
#
# TODO: sort this out so that xsd:decimals and xsd:floats are properly
# segregated.
EXPONENT_re = r'(?:[eE][+-]?[0-9]+)'
INT_re = r'[+-]?[0-9]+'
INT = Regex(INT_re).setParseAction(composition(
[refer_component(int), rdflib.term.Literal]))
INTEGER = Regex(r'[0-9]+').setParseAction(composition(
[refer_component(int), rdflib.term.Literal]))
FLOAT_re = (r'[+-]?(?:(?:[0-9]+\.[0-9]*%s?)|' +
r'(?:\.[0-9]+%s?)|(?:[0-9]+%s))') % (
(EXPONENT_re,) * 3)
FLOAT = Regex(FLOAT_re).setParseAction(composition(
[refer_component(float), rdflib.term.Literal]))
NumericLiteral = (FLOAT | INT)
if DEBUG:
NumericLiteral.setName('NumericLiteral')
# BooleanLiteral:
BooleanLiteral = (Keyword('true') | Keyword('false')).setParseAction(
refer_component(rdflib.term.Literal, datatype=XSD_NS.boolean))
if DEBUG:
BooleanLiteral.setName('BooleanLiteral')
# BlankNode:
ANON = Regex(r'\[' + WS_re + r'\]').setParseAction(
refer_component(rdflib.term.BNode, None, []))
BLANK_NODE_LABEL = (Suppress('_:') + PN_LOCAL).setParseAction(
refer_component(rdflib.term.BNode))
BlankNode = (BLANK_NODE_LABEL | ANON)
if DEBUG:
BlankNode.setName('BlankNode')
# GraphTerm:
GraphTerm = (IRIref | RDFLiteral | NumericLiteral | BooleanLiteral |
BlankNode | NIL)
if DEBUG:
GraphTerm.setName('GraphTerm')
# VarOrTerm:
VarOrTerm = Var | GraphTerm
if DEBUG:
VarOrTerm.setName('VarOrTerm')
# VarOrIRIref:
VarOrIRIref = Var | IRIref
if DEBUG:
VarOrIRIref.setName('VarOrIRIref')
# Verb:
Verb = (VarOrIRIref | Keyword('a').setParseAction(
refer_component(getattr, [rdflib.namespace.RDF, 'type'], [0, 1])))
if DEBUG:
Verb.setName('Verb')
# Expression:
Expression = Forward()
if DEBUG:
Expression.setName('Expression')
# BuiltInCall:
STR = Suppress(CaselessKeyword('STR'))
LANG = Suppress(CaselessKeyword('LANG'))
LANGMATCHES = Suppress(CaselessKeyword('LANGMATCHES'))
DATATYPE = Suppress(CaselessKeyword('DATATYPE'))
BOUND = Suppress(CaselessKeyword('BOUND'))
isIRI = Suppress(CaselessKeyword('isIRI'))
isURI = Suppress(CaselessKeyword('isURI'))
isBLANK = Suppress(CaselessKeyword('isBLANK'))
isLITERAL = Suppress(CaselessKeyword('isLITERAL'))
sameTerm = Suppress(CaselessKeyword('sameTERM'))
# RegexExpression
REGEX = Suppress(CaselessKeyword('REGEX'))
RegexExpression = (REGEX + LP + Expression + COMMA + Expression +
Optional(COMMA + Expression) + RP).setParseAction(
refer_component(components.FunctionLibrary.ParsedREGEXInvocation))
if DEBUG:
RegexExpression.setName('RegexExpression')
BuiltInCall = (
(STR + LP + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.STR])) |
(LANG + LP + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.LANG])) |
(LANGMATCHES + LP + Expression + COMMA + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.LANGMATCHES])) |
(DATATYPE + LP + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.DATATYPE])) |
(BOUND + LP + Var + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.BOUND])) |
(sameTerm + LP + Expression + COMMA + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.sameTERM])) |
(isIRI + LP + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.isIRI])) |
(isURI + LP + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.isURI])) |
(isBLANK + LP + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.isBLANK])) |
(isLITERAL + LP + Expression + RP).setParseAction(
refer_component(components.FunctionLibrary.BuiltinFunctionCall,
[components.FunctionLibrary.isLITERAL])) |
RegexExpression)
if DEBUG:
BuiltInCall.setName('BuiltInCall')
ArgList = NIL | Group(LP + Expression + ZeroOrMore(COMMA + Expression) + RP)
if DEBUG:
ArgList.setName('ArgList')
# FunctionCall:
FunctionCall = (IRIref + ArgList).setParseAction(
refer_component(components.FunctionLibrary.FunctionCall))
if DEBUG:
FunctionCall.setName('FunctionCall')
BrackettedExpression = LP + Expression + RP
if DEBUG:
BrackettedExpression.setName('BrackettedExpression')
PrimaryExpression = (BrackettedExpression | BuiltInCall | FunctionCall |
IRIref | RDFLiteral | NumericLiteral |
BooleanLiteral | Var)
if DEBUG:
PrimaryExpression.setName('PrimaryExpression')
UnaryExpression = (
(Suppress('!') + PrimaryExpression).setParseAction(
refer_component(components.Operators.LogicalNegation)) |
(Suppress('+') + PrimaryExpression).setParseAction(
refer_component(components.Operators.NumericPositive)) |
(Suppress('-') + PrimaryExpression).setParseAction(
refer_component(components.Operators.NumericNegative)) |
PrimaryExpression)
if DEBUG:
UnaryExpression.setName('UnaryExpression')
MultiplicativeExpression = Group(UnaryExpression + ZeroOrMore(
(Literal('*') | Literal('/')) + UnaryExpression)).setParseAction(
refer_component(components.Expression.ParsedMultiplicativeExpressionList))
if DEBUG:
MultiplicativeExpression.setName('MultiplicativeExpression')
AdditiveExpression = Group(MultiplicativeExpression + ZeroOrMore(
(Literal('+') | Literal('-')) + MultiplicativeExpression)).setParseAction(
refer_component(components.Expression.ParsedAdditiveExpressionList))
if DEBUG:
AdditiveExpression.setName('AdditiveExpression')
NumericExpression = AdditiveExpression
RelationalExpression = (
(NumericExpression + Suppress('=') +
NumericExpression).setParseAction(
refer_component(components.Operators.EqualityOperator)) |
(NumericExpression + Suppress('!=') +
NumericExpression).setParseAction(
refer_component(components.Operators.NotEqualOperator)) |
(NumericExpression + Suppress('<') +
NumericExpression).setParseAction(
refer_component(components.Operators.LessThanOperator)) |
(NumericExpression + Suppress('>') +
NumericExpression).setParseAction(
refer_component(components.Operators.GreaterThanOperator)) |
(NumericExpression + Suppress('<=') +
NumericExpression).setParseAction(
refer_component(components.Operators.LessThanOrEqualOperator)) |
(NumericExpression + Suppress('>=') +
NumericExpression).setParseAction(
refer_component(components.Operators.GreaterThanOrEqualOperator)) |
NumericExpression)
if DEBUG:
RelationalExpression.setName('RelationalExpression')
ValueLogical = RelationalExpression
ConditionalAndExpression = Group(ValueLogical +
ZeroOrMore(Suppress('&&') + ValueLogical)).setParseAction(
refer_component(components.Expression.ParsedRelationalExpressionList))
if DEBUG:
ConditionalAndExpression.setName('ConditionalAndExpression')
ConditionalOrExpression = Group(ConditionalAndExpression +
ZeroOrMore(Suppress('||') +
ConditionalAndExpression)).setParseAction(
refer_component(components.Expression.ParsedConditionalAndExpressionList))
if DEBUG:
ConditionalOrExpression.setName('ConditionalOrExpression')
Expression << ConditionalOrExpression
# Constraint (used only in Filter):
Constraint = ((BrackettedExpression).setParseAction(
refer_component(components.Filter.ParsedExpressionFilter)) |
(BuiltInCall | FunctionCall).setParseAction(
refer_component(components.Filter.ParsedFunctionFilter)))
if DEBUG:
Constraint.setName('Constraint')
# Filter:
FILTER = Suppress(CaselessKeyword('FILTER'))
Filter = (FILTER + Constraint).setName('Filter')
# GraphNode is recursively defined in terms of Collection, ObjectList,
# PropertyListNotEmpty, and TriplesNode.
GraphNode = Forward()
if DEBUG:
GraphNode.setName('GraphNode')
# Collection:
Collection = (LP + Group(OneOrMore(GraphNode)) + RP).setParseAction(
refer_component(components.Resource.ParsedCollection))
if DEBUG:
Collection.setName('Collection')
# ObjectList:
ObjectList = Group(GraphNode + ZeroOrMore(COMMA + GraphNode))
if DEBUG:
ObjectList.setName('ObjectList')
# PropertyListNotEmpty:
PropertyListItem = (Verb + ObjectList).setParseAction(
refer_component(components.Triples.PropertyValue))
if DEBUG:
PropertyListItem.setName('PropertyListItem')
PropertyListNotEmpty = Group(PropertyListItem + ZeroOrMore(
SEMICOLON + Optional(PropertyListItem)))
if DEBUG:
PropertyListNotEmpty.setName('PropertyListNotEmpty')
# TriplesNode:
TriplesNode = Collection | (LB + PropertyListNotEmpty + RB).setParseAction(
refer_component(components.Resource.Resource, [None]))
if DEBUG:
TriplesNode.setName('TriplesNode')
# GraphNode:
GraphNode << (VarOrTerm | TriplesNode)
# TriplesBlock:
TriplesSameSubject = ((VarOrTerm + PropertyListNotEmpty).setParseAction(
refer_component(components.Resource.Resource)) |
(LB + PropertyListNotEmpty + RB +
Optional(PropertyListNotEmpty, [])).setParseAction(
refer_component(components.Resource.TwiceReferencedBlankNode)) |
(Collection + Optional(PropertyListNotEmpty, [])).setParseAction(
setPropertyValueList))
if DEBUG:
TriplesSameSubject.setName('TriplesSameSubject')
TriplesBlock = Forward()
TriplesBlock << (TriplesSameSubject + Optional(PERIOD +
Optional(TriplesBlock)))
if DEBUG:
TriplesBlock.setName('TriplesBlock')
# GroupGraphPattern:
GroupGraphPattern = Forward()
OPTIONAL = Suppress(CaselessKeyword('OPTIONAL'))
OptionalGraphPattern = (OPTIONAL + GroupGraphPattern).setParseAction(
refer_component(components.GraphPattern.ParsedOptionalGraphPattern))
if DEBUG:
OptionalGraphPattern.setName('OptionalGraphPattern')
UNION = Suppress(CaselessKeyword('UNION'))
UnionGraphPattern = Group(GroupGraphPattern + OneOrMore(
UNION + GroupGraphPattern)).setParseAction(
refer_component(components.GraphPattern.ParsedAlternativeGraphPattern))
if DEBUG:
UnionGraphPattern.setName('UnionGraphPattern')
GRAPH = Suppress(CaselessKeyword('GRAPH'))
GraphGraphPattern = (GRAPH + VarOrIRIref + GroupGraphPattern).setParseAction(
refer_component(components.GraphPattern.ParsedGraphGraphPattern))
if DEBUG:
GraphGraphPattern.setName('GraphGraphPattern')
GraphPatternNotTriples = (OptionalGraphPattern | UnionGraphPattern |
GraphGraphPattern | GroupGraphPattern)
if DEBUG:
GraphPatternNotTriples.setName('GraphPatternNotTriples')
GraphPattern = ((Filter + Optional(PERIOD) +
Optional(Group(TriplesBlock))).setParseAction(
refer_component(components.GraphPattern.GraphPattern, [None])) |
(GraphPatternNotTriples + Optional(PERIOD) +
Optional(Group(TriplesBlock), None)).setParseAction(
refer_component(components.GraphPattern.GraphPattern, [None], [1, 0, 2])))
if DEBUG:
GraphPattern.setName('GraphPattern')
GroupGraphPattern << (LC + Optional(Group(TriplesBlock), None) +
Group(ZeroOrMore(GraphPattern)) + RC).setParseAction(
refer_component(components.GraphPattern.ParsedGroupGraphPattern))
if DEBUG:
GroupGraphPattern.setName('GroupGraphPattern')
# WhereClause:
WHERE = Suppress(Optional(CaselessKeyword('WHERE')))
WhereClause = (WHERE + GroupGraphPattern).setParseAction(
refer_component(components.Query.WhereClause))
if DEBUG:
WhereClause.setName('WhereClause')
# RecurseClause:
RECUR = Suppress(CaselessKeyword('RECUR'))
TO = Suppress(CaselessKeyword('TO'))
RecurClause = (RECUR + Group(OneOrMore(Group(Var + TO + Var))) +
Optional(GroupGraphPattern, None)).setParseAction(
refer_component(components.Query.RecurClause))
if DEBUG:
RecurClause.setName('RecurClause')
# SolutionModifier:
ASC = Suppress(Optional(CaselessKeyword('ASC')))
DESC = Suppress(Optional(CaselessKeyword('DESC')))
OrderCondition = (
(ASC + BrackettedExpression).setParseAction(
refer_component(components.SolutionModifier.ParsedOrderConditionExpression,
[components.SolutionModifier.ASCENDING_ORDER], [1, 0])) |
(DESC + BrackettedExpression).setParseAction(
refer_component(components.SolutionModifier.ParsedOrderConditionExpression,
[components.SolutionModifier.DESCENDING_ORDER], [1, 0])) |
BrackettedExpression.copy().setParseAction(
refer_component(components.SolutionModifier.ParsedOrderConditionExpression,
[components.SolutionModifier.UNSPECIFIED_ORDER], [1, 0])) |
BuiltInCall | FunctionCall | Var)
if DEBUG:
OrderCondition.setName('OrderCondition')
ORDER = Suppress(Optional(CaselessKeyword('ORDER')))
BY = Suppress(Optional(CaselessKeyword('BY')))
OrderClause = ORDER + BY + Group(OneOrMore(OrderCondition))
if DEBUG:
OrderClause.setName('OrderClause')
LIMIT = Suppress(Optional(CaselessKeyword('LIMIT')))
LimitClause = LIMIT + INTEGER
if DEBUG:
LimitClause.setName('LimitClause')
OFFSET = Suppress(Optional(CaselessKeyword('OFFSET')))
OffsetClause = OFFSET + INTEGER
if DEBUG:
OffsetClause.setName('OffsetClause')
SolutionModifier = (
(Optional(OrderClause, None) + Optional(LimitClause, None) +
Optional(OffsetClause, None)).setParseAction(
refer_component(components.SolutionModifier.SolutionModifier)) |
(Optional(OrderClause, None) + Optional(OffsetClause, None) +
Optional(LimitClause, None)).setParseAction(
refer_component(components.SolutionModifier.SolutionModifier,
projection=[0, 2, 1])))
if DEBUG:
SolutionModifier.setName('SolutionModifier')
# SelectQuery:
SELECT = Suppress(CaselessKeyword('SELECT'))
DISTINCT = Optional(CaselessKeyword('DISTINCT'), None)
SelectQuery = (SELECT + DISTINCT +
(Group(OneOrMore(Var)) | Literal('*').setParseAction(as_empty)) +
Group(ZeroOrMore(DatasetClause)) +
WhereClause + Optional(RecurClause, None) +
SolutionModifier).setParseAction(
refer_component(components.Query.SelectQuery,
projection=[1, 2, 3, 4, 5, 0]))
if DEBUG:
SelectQuery.setName('SelectQuery')
# ConstructQuery:
CONSTRUCT = Suppress(CaselessKeyword('CONSTRUCT'))
ConstructTemplate = LC + Optional(Group(TriplesBlock), []) + RC
ConstructQuery = (CONSTRUCT + ConstructTemplate +
Group(ZeroOrMore(DatasetClause)) + WhereClause +
SolutionModifier).setParseAction(
refer_component(components.Query.ConstructQuery))
if DEBUG:
ConstructQuery.setName('ConstructQuery')
# DescribeQuery:
DESCRIBE = Suppress(CaselessKeyword('DESCRIBE'))
DescribeQuery = (DESCRIBE +
(Group(OneOrMore(Var)) | Literal('*').setParseAction(as_empty)) +
Group(ZeroOrMore(DatasetClause)) + Optional(WhereClause, None) +
SolutionModifier).setParseAction(
refer_component(components.Query.DescribeQuery))
if DEBUG:
DescribeQuery.setName('DescribeQuery')
# AskQuery:
ASK = Suppress(CaselessKeyword('ASK'))
AskQuery = (ASK + Group(ZeroOrMore(DatasetClause)) +
WhereClause).setParseAction(
refer_component(components.Query.AskQuery))
if DEBUG:
AskQuery.setName('AskQuery')
# Query:
Query = (Prologue + (SelectQuery | ConstructQuery |
DescribeQuery | AskQuery)).setParseAction(
refer_component(components.Query.Query))
Query.ignore('#' + restOfLine)
if DEBUG:
Query.setName('Query')
def parse(stuff):
if DEBUG:
tracer = ParseTracer()
resultfile = open('parse-trace.xml', 'w')
return tracer.parse(Query, stuff, resultfile)
return Query.parseString(stuff)[0]
if __name__ == "__main__":
testCases = [
# basic
"""
SELECT ?name
WHERE { ?a <http://xmlns.com/foaf/0.1/name> ?name }
""",
# simple prefix
"""
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { ?a foaf:name ?name }
""",
# base statement
"""
BASE <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { ?a <name> ?name }
""",
# prefix and colon-only prefix
"""
PREFIX : <http://xmlns.com/foaf/0.1/>
PREFIX vcard: <http://www.w3.org/2001/vcard-rdf/3.0#>
SELECT ?name ?title
WHERE {
?a :name ?name .
?a vcard:TITLE ?title
}
""",
# predicate-object list notation
"""
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name ?mbox
WHERE {
?x foaf:name ?name ;
foaf:mbox ?mbox .
}
""",
# object list notation
"""
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?x
WHERE {
?x foaf:nick "Alice" ,
"Alice_" .
}
""",
# escaped literals
"""
PREFIX tag: <http://xmlns.com/foaf/0.1/>
PREFIX vcard: <http://www.w3.org/2001/vcard-rdf/3.0#>
SELECT ?name
WHERE {
?a tag:name ?name ;
vcard:TITLE "escape test vcard:TITLE " ;
<tag://test/escaping> "This is a ''' Test \"\"\"" ;
<tag://test/escaping> ?d
}
""",
# key word as variable
"""
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?PREFIX ?WHERE
WHERE {
?x foaf:name ?PREFIX ;
foaf:mbox ?WHERE .
}
""",
# key word as prefix
"""
PREFIX WHERE: <http://xmlns.com/foaf/0.1/>
SELECT ?name ?mbox
WHERE {
?x WHERE:name ?name ;
WHERE:mbox ?mbox .
}
""",
# some test cases from grammar.py
"SELECT ?title WHERE { <http://example.org/book/book1> <http://purl.org/dc/elements/1.1/title> ?title . }",
"""PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name ?mbox
WHERE { ?person foaf:name ?name .
OPTIONAL { ?person foaf:mbox ?mbox}
}""",
"""PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name ?name2
WHERE { ?person foaf:name ?name .
OPTIONAL { ?person foaf:knows ?p2 . ?p2 foaf:name ?name2 . }
}""",
"""PREFIX foaf: <http://xmlns.com/foaf/0.1/>
#PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?name ?mbox
WHERE
{
{ ?person rdf:type foaf:Person } .
OPTIONAL { ?person foaf:name ?name } .
OPTIONAL {?person foaf:mbox ?mbox} .
}"""
]
print "Content-type: text/plain\n\n"
for query in testCases:
print "\n-----\n"
print '>>> query = """' + query.replace("\n", "\n... ") + '"""'
print ">>> result = doSPARQL(query, sparqlGr)\n"
result = _buildQueryArgs(query);
print "select = ", result["select"], "\n"
print "where = ", result["where"], "\n"
print "optional = ", result["optional"], "\n"
print "result = sparqlGr.query(select, where, optional)"
| |
# Django settings for InitiativeWASH project.
import os
# import djcelery
# djcelery.setup_loader()
# BROKEN_URL = 'django://'
#DATABASE ROUTER
# RAPIDSMS_ROUTER = "rapidsms.router.db.DatabaseRouter"
##############
# The top directory for this project. Contains requirements/, manage.py,
# and README.rst, a InitiativeWASH directory with settings etc (see
# PROJECT_PATH), as well as a directory for each Django app added to this
# project.
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
# The directory with this project's templates, settings, urls, static dir,
# wsgi.py, fixtures, etc.
PROJECT_PATH = os.path.join(PROJECT_ROOT, 'InitiativeWASH')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'InitiativeWASH.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/public/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'public', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/public/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'public', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files to collect
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'vhi)ty9@61rmo7h$yv8m5o!uuf!op4j2=oxinf&)b&p0#5kz8f'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'InitiativeWASH.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'InitiativeWASH.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, 'templates'),
)
FIXTURE_DIRS = (
os.path.join(PROJECT_PATH, 'fixtures'),
)
# A sample logging configuration.
# This logs all rapidsms messages to the file `rapidsms.log`
# in the project directory. It also sends an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'basic': {
'format': '%(asctime)s %(name)-20s %(levelname)-8s %(message)s',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'basic',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'formatter': 'basic',
'filename': os.path.join(PROJECT_PATH, 'rapidsms.log'),
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'rapidsms': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# External apps
"django_nose",
#"djtables", # required by rapidsms.contrib.locations
"django_tables2",
"selectable",
"south",
# Project apps
"gateway",
# RapidSMS
"rapidsms",
"rapidsms.backends.database",
"rapidsms.contrib.handlers",
"rapidsms.contrib.httptester",
"rapidsms.contrib.messagelog",
"rapidsms.contrib.messaging",
"rapidsms.contrib.registration",
# "rapidsms.contrib.echo",
#INSTALLS THE FOLLOWING
#Celery and djcelery
# "djcelery",
# "kombu.transport.django",
# "rapidsms.router.celery",
#DatabaseRouter
# "rapidsms.router.db",
#DIRECTORY GATEWAY
"gateway",
#TROPO
# 'rtropo',
#TWILIO
"rtwilio",
#########
"rapidsms.contrib.default", # Must be last
)
INSTALLED_BACKENDS = {
"message_tester": {
"ENGINE": "rapidsms.backends.database.DatabaseBackend",
# "router.celery.eager": True,
},
# # "twilio-backend": {
# # "ENGINE": "rtwilio.outgoing.TwilioBackend",
# # 'config': {
# # 'account_sid': 'AC8155be9cc294a5aebd5737e7e87058e0', # (required)
# # 'auth_token': '7476383ef820d9cbaada64b495435131', # (required)
# # 'number': '(323) 909-4972', # your Twilio phone number (required)
# # # 'callback': 'http://herrickc.scripts.mit.edu/wash/backend/twilio/status-callback/', # optional callback URL
# # # +13239094972
# # }
# },
"telerivet": {
"ENGINE": "rapidsms_telerivet.outgoing.TelerivetBackend",
"project_id": "PJ7857fe403c2fa575",
"phone_id": "PNcc002e02c198bd4f",
"secret": "9FT4PNWCMUZ7ZCRA96EPLKZW3ZPZFDFP",
"api_key": "FTntIwlTyAJKmBJVVqp5XVFbrMMGaUIn"
},
# "my-tropo-backend": {
# "ENGINE": "rtropo.outgoing.TropoBackend",
# 'config': {
# # Your Tropo application's outbound token for messaging
# 'messaging_token': '07d15cd1cbb0ba47a68f05b57f43358be8d9e9c4efe70c368b09e16719fba8c58fa1b15561a3f5f44feb6793',
# # Your Tropo application's voice/messaging phone number (including country code)
# 'number': '+1-857-239-0091',
# },
# },
}
###########LOGGING################
# LOGGING_CONFIG = {
# "rapidsms.router.celery":{
# 'handlers':['file'],
# 'level': DEBUG,
# }
# }
###################################
LOGIN_REDIRECT_URL = '/'
RAPIDSMS_HANDLERS = (
'gateway.handlers.DataHandler',
'gateway.handlers.NeighborhoodHandler',
)
DEFAULT_RESPONSE = "Sorry, we didn't understand your message. Please try again."
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import webob
from keystone import config
from keystone.openstack.common import jsonutils
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import default_fixtures
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
OPENSTACK_REPO = 'https://review.openstack.org/p/openstack'
KEYSTONECLIENT_REPO = '%s/python-keystoneclient.git' % OPENSTACK_REPO
class CompatTestCase(tests.TestCase):
def setUp(self):
super(CompatTestCase, self).setUp()
# The backends should be loaded and initialized before the servers are
# started because the servers use the backends.
self.load_backends()
self.load_fixtures(default_fixtures)
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.metadata_foobar = self.identity_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
self.public_server = self.serveapp('keystone', name='main')
self.admin_server = self.serveapp('keystone', name='admin')
revdir = tests.checkout_vendor(*self.get_checkout())
self.add_path(revdir)
self.clear_module('keystoneclient')
def tearDown(self):
self.public_server.kill()
self.admin_server.kill()
self.public_server = None
self.admin_server = None
super(CompatTestCase, self).tearDown()
def _public_url(self):
public_port = self.public_server.socket_info['socket'][1]
return "http://localhost:%s/v2.0" % public_port
def _admin_url(self):
admin_port = self.admin_server.socket_info['socket'][1]
return "http://localhost:%s/v2.0" % admin_port
def _client(self, admin=False, **kwargs):
from keystoneclient.v2_0 import client as ks_client
url = self._admin_url() if admin else self._public_url()
kc = ks_client.Client(endpoint=url,
auth_url=self._public_url(),
**kwargs)
kc.authenticate()
# have to manually overwrite the management url after authentication
kc.management_url = url
return kc
def get_client(self, user_ref=None, tenant_ref=None, admin=False):
if user_ref is None:
user_ref = self.user_foo
if tenant_ref is None:
for user in default_fixtures.USERS:
if user['id'] == user_ref['id']:
tenant_id = user['tenants'][0]
else:
tenant_id = tenant_ref['id']
return self._client(username=user_ref['name'],
password=user_ref['password'],
tenant_id=tenant_id,
admin=admin)
class KeystoneClientTests(object):
"""Tests for all versions of keystoneclient."""
def test_authenticate_tenant_name_and_tenants(self):
client = self.get_client()
tenants = client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_tenant_id_and_tenants(self):
client = self._client(username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id='bar')
tenants = client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_invalid_tenant_id(self):
from keystoneclient import exceptions as client_exceptions
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id='baz')
def test_authenticate_token_no_tenant(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token)
tenants = token_client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_token_tenant_id(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token, tenant_id='bar')
tenants = token_client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_token_invalid_tenant_id(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
token = client.auth_token
self.assertRaises(client_exceptions.Unauthorized,
self._client, token=token,
tenant_id=uuid.uuid4().hex)
def test_authenticate_token_invalid_tenant_name(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
token = client.auth_token
self.assertRaises(client_exceptions.Unauthorized,
self._client, token=token,
tenant_name=uuid.uuid4().hex)
def test_authenticate_token_tenant_name(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token, tenant_name='BAR')
tenants = token_client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
def test_authenticate_and_delete_token(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
token = client.auth_token
token_client = self._client(token=token)
tenants = token_client.tenants.list()
self.assertEquals(tenants[0].id, self.tenant_bar['id'])
client.tokens.delete(token_client.auth_token)
self.assertRaises(client_exceptions.Unauthorized,
token_client.tenants.list)
def test_authenticate_no_password(self):
from keystoneclient import exceptions as client_exceptions
user_ref = self.user_foo.copy()
user_ref['password'] = None
self.assertRaises(client_exceptions.AuthorizationFailure,
self.get_client,
user_ref)
def test_authenticate_no_username(self):
from keystoneclient import exceptions as client_exceptions
user_ref = self.user_foo.copy()
user_ref['name'] = None
self.assertRaises(client_exceptions.AuthorizationFailure,
self.get_client,
user_ref)
def test_authenticate_disabled_tenant(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
tenant = {
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': False,
}
tenant_ref = admin_client.tenants.create(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=tenant['enabled'])
tenant['id'] = tenant_ref.id
user = {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'email': uuid.uuid4().hex,
'tenant_id': tenant['id'],
}
user_ref = admin_client.users.create(
name=user['name'],
password=user['password'],
email=user['email'],
tenant_id=user['tenant_id'])
user['id'] = user_ref.id
# password authentication
self.assertRaises(
client_exceptions.Unauthorized,
self._client,
username=user['name'],
password=user['password'],
tenant_id=tenant['id'])
# token authentication
client = self._client(
username=user['name'],
password=user['password'])
self.assertRaises(
client_exceptions.Unauthorized,
self._client,
token=client.auth_token,
tenant_id=tenant['id'])
# FIXME(ja): this test should require the "keystone:admin" roled
# (probably the role set via --keystone_admin_role flag)
# FIXME(ja): add a test that admin endpoint is only sent to admin user
# FIXME(ja): add a test that admin endpoint returns unauthorized if not
# admin
def test_tenant_create_update_and_delete(self):
from keystoneclient import exceptions as client_exceptions
tenant_name = 'original_tenant'
tenant_description = 'My original tenant!'
tenant_enabled = True
client = self.get_client(admin=True)
# create, get, and list a tenant
tenant = client.tenants.create(tenant_name=tenant_name,
description=tenant_description,
enabled=tenant_enabled)
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
tenant = client.tenants.get(tenant_id=tenant.id)
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
# update, get, and list a tenant
tenant_name = 'updated_tenant'
tenant_description = 'Updated tenant!'
tenant_enabled = False
tenant = client.tenants.update(tenant_id=tenant.id,
tenant_name=tenant_name,
enabled=tenant_enabled,
description=tenant_description)
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
tenant = client.tenants.get(tenant_id=tenant.id)
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEquals(tenant.name, tenant_name)
self.assertEquals(tenant.description, tenant_description)
self.assertEquals(tenant.enabled, tenant_enabled)
# delete, get, and list a tenant
client.tenants.delete(tenant=tenant.id)
self.assertRaises(client_exceptions.NotFound, client.tenants.get,
tenant.id)
self.assertFalse([t for t in client.tenants.list()
if t.id == tenant.id])
def test_tenant_create_update_and_delete_unicode(self):
from keystoneclient import exceptions as client_exceptions
tenant_name = u'original \u540d\u5b57'
tenant_description = 'My original tenant!'
tenant_enabled = True
client = self.get_client(admin=True)
# create, get, and list a tenant
tenant = client.tenants.create(tenant_name,
description=tenant_description,
enabled=tenant_enabled)
self.assertEqual(tenant.name, tenant_name)
self.assertEqual(tenant.description, tenant_description)
self.assertIs(tenant.enabled, tenant_enabled)
tenant = client.tenants.get(tenant.id)
self.assertEqual(tenant.name, tenant_name)
self.assertEqual(tenant.description, tenant_description)
self.assertIs(tenant.enabled, tenant_enabled)
# multiple tenants exist due to fixtures, so find the one we're testing
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEqual(tenant.name, tenant_name)
self.assertEqual(tenant.description, tenant_description)
self.assertIs(tenant.enabled, tenant_enabled)
# update, get, and list a tenant
tenant_name = u'updated \u540d\u5b57'
tenant_description = 'Updated tenant!'
tenant_enabled = False
tenant = client.tenants.update(tenant.id,
tenant_name=tenant_name,
enabled=tenant_enabled,
description=tenant_description)
self.assertEqual(tenant.name, tenant_name)
self.assertEqual(tenant.description, tenant_description)
self.assertIs(tenant.enabled, tenant_enabled)
tenant = client.tenants.get(tenant.id)
self.assertEqual(tenant.name, tenant_name)
self.assertEqual(tenant.description, tenant_description)
self.assertIs(tenant.enabled, tenant_enabled)
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEqual(tenant.name, tenant_name)
self.assertEqual(tenant.description, tenant_description)
self.assertIs(tenant.enabled, tenant_enabled)
# delete, get, and list a tenant
client.tenants.delete(tenant.id)
self.assertRaises(client_exceptions.NotFound, client.tenants.get,
tenant.id)
self.assertFalse([t for t in client.tenants.list()
if t.id == tenant.id])
def test_tenant_create_no_name(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.BadRequest,
client.tenants.create,
tenant_name="")
def test_tenant_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.delete,
tenant=uuid.uuid4().hex)
def test_tenant_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.get,
tenant_id=uuid.uuid4().hex)
def test_tenant_update_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.update,
tenant_id=uuid.uuid4().hex)
def test_tenant_list(self):
client = self.get_client()
tenants = client.tenants.list()
self.assertEquals(len(tenants), 1)
# Admin endpoint should return *all* tenants
client = self.get_client(admin=True)
tenants = client.tenants.list()
self.assertEquals(len(tenants), len(default_fixtures.TENANTS))
def test_invalid_password(self):
from keystoneclient import exceptions as client_exceptions
good_client = self._client(username=self.user_foo['name'],
password=self.user_foo['password'])
good_client.tenants.list()
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=self.user_foo['name'],
password=uuid.uuid4().hex)
def test_invalid_user_and_password(self):
from keystoneclient import exceptions as client_exceptions
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
def test_change_password_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
username = uuid.uuid4().hex
passwd = uuid.uuid4().hex
user = client.users.create(name=username, password=passwd,
email=uuid.uuid4().hex)
token_id = client.tokens.authenticate(username=username,
password=passwd).id
# authenticate with a token should work before a password change
client.tokens.authenticate(token=token_id)
client.users.update_password(user=user.id, password=uuid.uuid4().hex)
# authenticate with a token should not work after a password change
self.assertRaises(client_exceptions.Unauthorized,
client.tokens.authenticate,
token=token_id)
def test_disable_tenant_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
foo_client = self.get_client(self.user_foo)
tenant_bar = admin_client.tenants.get(self.tenant_bar['id'])
# Disable the tenant.
tenant_bar.update(enabled=False)
# Test that the token has been removed.
self.assertRaises(client_exceptions.Unauthorized,
foo_client.tokens.authenticate,
token=foo_client.auth_token)
# Test that the user access has been disabled.
self.assertRaises(client_exceptions.Unauthorized,
self.get_client,
self.user_foo)
def test_delete_tenant_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
foo_client = self.get_client(self.user_foo)
tenant_bar = admin_client.tenants.get(self.tenant_bar['id'])
# Delete the tenant.
tenant_bar.delete()
# Test that the token has been removed.
self.assertRaises(client_exceptions.Unauthorized,
foo_client.tokens.authenticate,
token=foo_client.auth_token)
# Test that the user access has been disabled.
self.assertRaises(client_exceptions.Unauthorized,
self.get_client,
self.user_foo)
def test_disable_user_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
foo_client = self.get_client(self.user_foo)
admin_client.users.update_enabled(user=self.user_foo['id'],
enabled=False)
self.assertRaises(client_exceptions.Unauthorized,
foo_client.tokens.authenticate,
token=foo_client.auth_token)
self.assertRaises(client_exceptions.Unauthorized,
self.get_client,
self.user_foo)
def test_delete_user_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
client = self.get_client(admin=False)
username = uuid.uuid4().hex
password = uuid.uuid4().hex
user_id = admin_client.users.create(
name=username, password=password, email=uuid.uuid4().hex).id
token_id = client.tokens.authenticate(
username=username, password=password).id
# token should be usable before the user is deleted
client.tokens.authenticate(token=token_id)
admin_client.users.delete(user=user_id)
# authenticate with a token should not work after the user is deleted
self.assertRaises(client_exceptions.Unauthorized,
client.tokens.authenticate,
token=token_id)
def test_token_expiry_maintained(self):
timeutils.set_time_override()
foo_client = self.get_client(self.user_foo)
orig_token = foo_client.service_catalog.catalog['token']
timeutils.advance_time_seconds(1)
reauthenticated_token = foo_client.tokens.authenticate(
token=foo_client.auth_token)
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(orig_token['expires']),
timeutils.parse_isotime(reauthenticated_token.expires))
def test_user_create_update_delete(self):
from keystoneclient import exceptions as client_exceptions
test_username = 'new_user'
client = self.get_client(admin=True)
user = client.users.create(name=test_username,
password='password',
email='user1@test.com')
self.assertEquals(user.name, test_username)
user = client.users.get(user=user.id)
self.assertEquals(user.name, test_username)
user = client.users.update(user=user,
name=test_username,
email='user2@test.com')
self.assertEquals(user.email, 'user2@test.com')
# NOTE(termie): update_enabled doesn't return anything, probably a bug
client.users.update_enabled(user=user, enabled=False)
user = client.users.get(user.id)
self.assertFalse(user.enabled)
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=test_username,
password='password')
client.users.update_enabled(user, True)
user = client.users.update_password(user=user, password='password2')
self._client(username=test_username,
password='password2')
user = client.users.update_tenant(user=user, tenant='bar')
# TODO(ja): once keystonelight supports default tenant
# when you login without specifying tenant, the
# token should be scoped to tenant 'bar'
client.users.delete(user.id)
self.assertRaises(client_exceptions.NotFound, client.users.get,
user.id)
# Test creating a user with a tenant (auto-add to tenant)
user2 = client.users.create(name=test_username,
password='password',
email='user1@test.com',
tenant_id='bar')
self.assertEquals(user2.name, test_username)
def test_update_default_tenant_to_existing_value(self):
client = self.get_client(admin=True)
user = client.users.create(
name=uuid.uuid4().hex,
password=uuid.uuid4().hex,
email=uuid.uuid4().hex,
tenant_id=self.tenant_bar['id'])
# attempting to update the tenant with the existing value should work
user = client.users.update_tenant(
user=user, tenant=self.tenant_bar['id'])
def test_user_create_no_string_password(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.BadRequest,
client.users.create,
name='test_user',
password=12345,
email=uuid.uuid4().hex)
def test_user_create_no_name(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.BadRequest,
client.users.create,
name="",
password=uuid.uuid4().hex,
email=uuid.uuid4().hex)
def test_user_create_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.create,
name=uuid.uuid4().hex,
password=uuid.uuid4().hex,
email=uuid.uuid4().hex,
tenant_id=uuid.uuid4().hex)
def test_user_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.get,
user=uuid.uuid4().hex)
def test_user_list_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.list,
tenant_id=uuid.uuid4().hex)
def test_user_update_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.update,
user=uuid.uuid4().hex)
def test_user_update_tenant_404(self):
self.skipTest('N/A')
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.update,
user=self.user_foo['id'],
tenant_id=uuid.uuid4().hex)
def test_user_update_password_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.update_password,
user=uuid.uuid4().hex,
password=uuid.uuid4().hex)
def test_user_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.delete,
user=uuid.uuid4().hex)
def test_user_list(self):
client = self.get_client(admin=True)
users = client.users.list()
self.assertTrue(len(users) > 0)
user = users[0]
self.assertRaises(AttributeError, lambda: user.password)
def test_user_get(self):
client = self.get_client(admin=True)
user = client.users.get(user=self.user_foo['id'])
self.assertRaises(AttributeError, lambda: user.password)
def test_role_get(self):
client = self.get_client(admin=True)
role = client.roles.get(role=self.role_admin['id'])
self.assertEquals(role.id, self.role_admin['id'])
def test_role_crud(self):
from keystoneclient import exceptions as client_exceptions
test_role = 'new_role'
client = self.get_client(admin=True)
role = client.roles.create(name=test_role)
self.assertEquals(role.name, test_role)
role = client.roles.get(role=role.id)
self.assertEquals(role.name, test_role)
client.roles.delete(role=role.id)
self.assertRaises(client_exceptions.NotFound,
client.roles.delete,
role=role.id)
self.assertRaises(client_exceptions.NotFound,
client.roles.get,
role=role.id)
def test_role_create_no_name(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.BadRequest,
client.roles.create,
name="")
def test_role_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.get,
role=uuid.uuid4().hex)
def test_role_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.delete,
role=uuid.uuid4().hex)
def test_role_list_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=uuid.uuid4().hex,
tenant=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=self.user_foo['id'],
tenant=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=uuid.uuid4().hex,
tenant=self.tenant_bar['id'])
def test_role_list(self):
client = self.get_client(admin=True)
roles = client.roles.list()
# TODO(devcamcar): This assert should be more specific.
self.assertTrue(len(roles) > 0)
def test_service_crud(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
service_name = uuid.uuid4().hex
service_type = uuid.uuid4().hex
service_desc = uuid.uuid4().hex
# create & read
service = client.services.create(name=service_name,
service_type=service_type,
description=service_desc)
self.assertEquals(service_name, service.name)
self.assertEquals(service_type, service.type)
self.assertEquals(service_desc, service.description)
service = client.services.get(id=service.id)
self.assertEquals(service_name, service.name)
self.assertEquals(service_type, service.type)
self.assertEquals(service_desc, service.description)
service = [x for x in client.services.list() if x.id == service.id][0]
self.assertEquals(service_name, service.name)
self.assertEquals(service_type, service.type)
self.assertEquals(service_desc, service.description)
# update is not supported in API v2...
# delete & read
client.services.delete(id=service.id)
self.assertRaises(client_exceptions.NotFound,
client.services.get,
id=service.id)
services = [x for x in client.services.list() if x.id == service.id]
self.assertEquals(len(services), 0)
def test_service_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.services.delete,
id=uuid.uuid4().hex)
def test_service_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.services.get,
id=uuid.uuid4().hex)
def test_endpoint_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.endpoints.delete,
id=uuid.uuid4().hex)
def test_admin_requires_adminness(self):
from keystoneclient import exceptions as client_exceptions
# FIXME(ja): this should be Unauthorized
exception = client_exceptions.ClientException
two = self.get_client(self.user_two, admin=True) # non-admin user
# USER CRUD
self.assertRaises(exception,
two.users.list)
self.assertRaises(exception,
two.users.get,
user=self.user_two['id'])
self.assertRaises(exception,
two.users.create,
name='oops',
password='password',
email='oops@test.com')
self.assertRaises(exception,
two.users.delete,
user=self.user_foo['id'])
# TENANT CRUD
self.assertRaises(exception,
two.tenants.list)
self.assertRaises(exception,
two.tenants.get,
tenant_id=self.tenant_bar['id'])
self.assertRaises(exception,
two.tenants.create,
tenant_name='oops',
description="shouldn't work!",
enabled=True)
self.assertRaises(exception,
two.tenants.delete,
tenant=self.tenant_baz['id'])
# ROLE CRUD
self.assertRaises(exception,
two.roles.get,
role=self.role_admin['id'])
self.assertRaises(exception,
two.roles.list)
self.assertRaises(exception,
two.roles.create,
name='oops')
self.assertRaises(exception,
two.roles.delete,
role=self.role_admin['id'])
# TODO(ja): MEMBERSHIP CRUD
# TODO(ja): determine what else todo
class KcMasterTestCase(CompatTestCase, KeystoneClientTests):
def get_checkout(self):
return KEYSTONECLIENT_REPO, 'master'
def test_tenant_add_and_remove_user(self):
client = self.get_client(admin=True)
client.roles.add_user_role(tenant=self.tenant_bar['id'],
user=self.user_two['id'],
role=self.role_other['id'])
user_refs = client.tenants.list_users(tenant=self.tenant_bar['id'])
self.assert_(self.user_two['id'] in [x.id for x in user_refs])
client.roles.remove_user_role(tenant=self.tenant_bar['id'],
user=self.user_two['id'],
role=self.role_other['id'])
roles = client.roles.roles_for_user(user=self.user_foo['id'],
tenant=self.tenant_bar['id'])
self.assertNotIn(self.role_other['id'], roles)
user_refs = client.tenants.list_users(tenant=self.tenant_bar['id'])
self.assertNotIn(self.user_two['id'], [x.id for x in user_refs])
def test_user_role_add_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=uuid.uuid4().hex,
user=self.user_foo['id'],
role=self.role_member['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=self.tenant_baz['id'],
user=uuid.uuid4().hex,
role=self.role_member['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=uuid.uuid4().hex)
def test_user_role_remove_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=uuid.uuid4().hex,
user=self.user_foo['id'],
role=self.role_member['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=uuid.uuid4().hex,
role=self.role_member['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=self.role_member['id'])
def test_tenant_list_marker(self):
client = self.get_client()
# Add two arbitrary tenants to user for testing purposes
for i in range(2):
tenant_id = uuid.uuid4().hex
tenant = {'name': 'tenant-%s' % tenant_id, 'id': tenant_id,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(tenant_id, tenant)
self.identity_api.add_user_to_project(tenant_id,
self.user_foo['id'])
tenants = client.tenants.list()
self.assertEqual(len(tenants), 3)
tenants_marker = client.tenants.list(marker=tenants[0].id)
self.assertEqual(len(tenants_marker), 2)
self.assertEqual(tenants[1].name, tenants_marker[0].name)
self.assertEqual(tenants[2].name, tenants_marker[1].name)
def test_tenant_list_marker_not_found(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, marker=uuid.uuid4().hex)
def test_tenant_list_limit(self):
client = self.get_client()
# Add two arbitrary tenants to user for testing purposes
for i in range(2):
tenant_id = uuid.uuid4().hex
tenant = {'name': 'tenant-%s' % tenant_id, 'id': tenant_id,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(tenant_id, tenant)
self.identity_api.add_user_to_project(tenant_id,
self.user_foo['id'])
tenants = client.tenants.list()
self.assertEqual(len(tenants), 3)
tenants_limited = client.tenants.list(limit=2)
self.assertEqual(len(tenants_limited), 2)
self.assertEqual(tenants[0].name, tenants_limited[0].name)
self.assertEqual(tenants[1].name, tenants_limited[1].name)
def test_tenant_list_limit_bad_value(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, limit='a')
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, limit=-1)
def test_roles_get_by_user(self):
client = self.get_client(admin=True)
roles = client.roles.roles_for_user(user=self.user_foo['id'],
tenant=self.tenant_bar['id'])
self.assertTrue(len(roles) > 0)
def test_user_can_update_passwd(self):
client = self.get_client(self.user_two)
token_id = client.auth_token
new_password = uuid.uuid4().hex
# TODO(derekh): Update to use keystoneclient when available
class FakeResponse(object):
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
responseobject = FakeResponse()
req = webob.Request.blank(
'/v2.0/OS-KSCRUD/users/%s' % self.user_two['id'],
headers={'X-Auth-Token': token_id})
req.method = 'PATCH'
req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
(new_password, self.user_two['password']))
self.public_server.application(req.environ,
responseobject.start_fake_response)
self.user_two['password'] = new_password
self.get_client(self.user_two)
def test_user_cannot_update_other_users_passwd(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(self.user_two)
token_id = client.auth_token
new_password = uuid.uuid4().hex
# TODO(derekh): Update to use keystoneclient when available
class FakeResponse(object):
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
responseobject = FakeResponse()
req = webob.Request.blank(
'/v2.0/OS-KSCRUD/users/%s' % self.user_foo['id'],
headers={'X-Auth-Token': token_id})
req.method = 'PATCH'
req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
(new_password, self.user_two['password']))
self.public_server.application(req.environ,
responseobject.start_fake_response)
self.assertEquals(403, responseobject.response_status)
self.user_two['password'] = new_password
self.assertRaises(client_exceptions.Unauthorized,
self.get_client, self.user_two)
def test_tokens_after_user_update_passwd(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(self.user_two)
token_id = client.auth_token
new_password = uuid.uuid4().hex
# TODO(derekh): Update to use keystoneclient when available
class FakeResponse(object):
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
responseobject = FakeResponse()
req = webob.Request.blank(
'/v2.0/OS-KSCRUD/users/%s' % self.user_two['id'],
headers={'X-Auth-Token': token_id})
req.method = 'PATCH'
req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
(new_password, self.user_two['password']))
rv = self.public_server.application(
req.environ,
responseobject.start_fake_response)
response_json = jsonutils.loads(rv.pop())
new_token_id = response_json['access']['token']['id']
self.assertRaises(client_exceptions.Unauthorized, client.tenants.list)
client.auth_token = new_token_id
client.tenants.list()
class KcEssex3TestCase(CompatTestCase, KeystoneClientTests):
def get_checkout(self):
return KEYSTONECLIENT_REPO, 'essex-3'
def test_tenant_add_and_remove_user(self):
client = self.get_client(admin=True)
client.roles.add_user_to_tenant(tenant_id=self.tenant_bar['id'],
user_id=self.user_two['id'],
role_id=self.role_member['id'])
role_refs = client.roles.get_user_role_refs(
user_id=self.user_two['id'])
self.assert_(self.tenant_baz['id'] in [x.tenantId for x in role_refs])
# get the "role_refs" so we get the proper id, this is how the clients
# do it
roleref_refs = client.roles.get_user_role_refs(
user_id=self.user_two['id'])
for roleref_ref in roleref_refs:
if (roleref_ref.roleId == self.role_member['id']
and roleref_ref.tenantId == self.tenant_baz['id']):
# use python's scope fall through to leave roleref_ref set
break
client.roles.remove_user_from_tenant(tenant_id=self.tenant_bar['id'],
user_id=self.user_two['id'],
role_id=roleref_ref.id)
role_refs = client.roles.get_user_role_refs(
user_id=self.user_two['id'])
self.assert_(self.tenant_baz['id'] not in
[x.tenantId for x in role_refs])
def test_roles_get_by_user(self):
client = self.get_client(admin=True)
roles = client.roles.get_user_role_refs(user_id='foo')
self.assertTrue(len(roles) > 0)
def test_role_list_404(self):
self.skipTest('N/A')
def test_authenticate_and_delete_token(self):
self.skipTest('N/A')
def test_user_create_update_delete(self):
from keystoneclient import exceptions as client_exceptions
test_username = 'new_user'
client = self.get_client(admin=True)
user = client.users.create(name=test_username,
password='password',
email='user1@test.com')
self.assertEquals(user.name, test_username)
user = client.users.get(user=user.id)
self.assertEquals(user.name, test_username)
user = client.users.update_email(user=user, email='user2@test.com')
self.assertEquals(user.email, 'user2@test.com')
# NOTE(termie): update_enabled doesn't return anything, probably a bug
client.users.update_enabled(user=user, enabled=False)
user = client.users.get(user.id)
self.assertFalse(user.enabled)
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=test_username,
password='password')
client.users.update_enabled(user, True)
user = client.users.update_password(user=user, password='password2')
self._client(username=test_username,
password='password2')
user = client.users.update_tenant(user=user, tenant='bar')
# TODO(ja): once keystonelight supports default tenant
# when you login without specifying tenant, the
# token should be scoped to tenant 'bar'
client.users.delete(user.id)
self.assertRaises(client_exceptions.NotFound, client.users.get,
user.id)
def test_user_update_404(self):
self.skipTest('N/A')
def test_endpoint_create_404(self):
self.skipTest('N/A')
def test_endpoint_delete_404(self):
self.skipTest('N/A')
def test_policy_crud(self):
self.skipTest('N/A due to lack of endpoint CRUD')
def test_disable_tenant_invalidates_token(self):
self.skipTest('N/A')
def test_delete_tenant_invalidates_token(self):
self.skipTest('N/A')
class Kc11TestCase(CompatTestCase, KeystoneClientTests):
def get_checkout(self):
return KEYSTONECLIENT_REPO, '0.1.1'
def test_policy_crud(self):
self.skipTest('N/A')
| |
"""
Various bayesian regression
"""
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from ._base import LinearModel, _rescale_data
from ..base import RegressorMixin
from ._base import _deprecate_normalize
from ..utils.extmath import fast_logdet
from scipy.linalg import pinvh
from ..utils.validation import _check_sample_weight
###############################################################################
# BayesianRidge regression
class BayesianRidge(RegressorMixin, LinearModel):
"""Bayesian ridge regression.
Fit a Bayesian ridge model. See the Notes section for details on this
implementation and the optimization of the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, default=300
Maximum number of iterations. Should be greater than or equal to 1.
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
alpha_init : float, default=None
Initial value for alpha (precision of the noise).
If not set, alpha_init is 1/Var(y).
.. versionadded:: 0.22
lambda_init : float, default=None
Initial value for lambda (precision of the weights).
If not set, lambda_init is 1.
.. versionadded:: 0.22
compute_score : bool, default=False
If True, compute the log marginal likelihood at each iteration of the
optimization.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model.
The intercept is not treated as a probabilistic parameter
and thus has no associated variance. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and will be removed in
1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated precision of the noise.
lambda_ : float
Estimated precision of the weights.
sigma_ : array-like of shape (n_features, n_features)
Estimated variance-covariance matrix of the weights
scores_ : array-like of shape (n_iter_+1,)
If computed_score is True, value of the log marginal likelihood (to be
maximized) at each iteration of the optimization. The array starts
with the value of the log marginal likelihood obtained for the initial
values of alpha and lambda and ends with the value obtained for the
estimated alpha and lambda.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
X_offset_ : float
If `normalize=True`, offset subtracted for centering data to a
zero mean.
X_scale_ : float
If `normalize=True`, parameter used to scale data to a unit
standard deviation.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
BayesianRidge()
>>> clf.predict([[1, 1]])
array([1.])
Notes
-----
There exist several strategies to perform Bayesian ridge regression. This
implementation is based on the algorithm described in Appendix A of
(Tipping, 2001) where updates of the regularization parameters are done as
suggested in (MacKay, 1992). Note that according to A New
View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
update rules do not guarantee that the marginal likelihood is increasing
between two consecutive iterations of the optimization.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
Journal of Machine Learning Research, Vol. 1, 2001.
"""
def __init__(self, *, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, alpha_init=None,
lambda_init=None, compute_score=False, fit_intercept=True,
normalize='deprecated', copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.alpha_init = alpha_init
self.lambda_init = lambda_init
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the model
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : returns an instance of self.
"""
self._normalize = _deprecate_normalize(
self.normalize, default=False,
estimator_name=self.__class__.__name__
)
if self.n_iter < 1:
raise ValueError('n_iter should be greater than or equal to 1.'
' Got {!r}.'.format(self.n_iter))
X, y = self._validate_data(X, y, dtype=np.float64, y_numeric=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self._normalize, self.copy_X,
sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1. / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# update posterior mean coef_ based on alpha_ and lambda_ and
# compute corresponding rmse
coef_, rmse_ = self._update_coef_(X, y, n_samples, n_features,
XT_y, U, Vh, eigen_vals_,
alpha_, lambda_)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(n_samples, n_features,
eigen_vals_,
alpha_, lambda_,
coef_, rmse_)
self.scores_.append(s)
# Update alpha and lambda according to (MacKay, 1992)
gamma_ = np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
# return regularization parameters and corresponding posterior mean,
# log marginal likelihood and posterior covariance
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_, rmse_ = self._update_coef_(X, y, n_samples, n_features,
XT_y, U, Vh, eigen_vals_,
alpha_, lambda_)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(n_samples, n_features,
eigen_vals_,
alpha_, lambda_,
coef_, rmse_)
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
# posterior covariance is given by 1/alpha_ * scaled_sigma_
scaled_sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self._normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
def _update_coef_(self, X, y, n_samples, n_features, XT_y, U, Vh,
eigen_vals_, alpha_, lambda_):
"""Update posterior mean and compute corresponding rmse.
Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where
scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)
+ np.dot(X.T, X))^-1
"""
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T,
Vh / (eigen_vals_ + lambda_ /
alpha_)[:, np.newaxis],
XT_y])
else:
coef_ = np.linalg.multi_dot([X.T,
U / (eigen_vals_ + lambda_ /
alpha_)[None, :],
U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
return coef_, rmse_
def _log_marginal_likelihood(self, n_samples, n_features, eigen_vals,
alpha_, lambda_, coef, rmse):
"""Log marginal likelihood."""
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
# compute the log of the determinant of the posterior covariance.
# posterior covariance is given by
# sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1
if n_samples > n_features:
logdet_sigma = - np.sum(np.log(lambda_ + alpha_ * eigen_vals))
else:
logdet_sigma = np.full(n_features, lambda_,
dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals
logdet_sigma = - np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse -
lambda_ * np.sum(coef ** 2) +
logdet_sigma -
n_samples * log(2 * np.pi))
return score
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(RegressorMixin, LinearModel):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, default=300
Maximum number of iterations.
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
compute_score : bool, default=False
If True, compute the objective function at each step of the model.
threshold_lambda : float, default=10 000
threshold for removing (pruning) weights with high precision from
the computation.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and will be removed in
1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array-like of shape (n_features,)
estimated precisions of the weights.
sigma_ : array-like of shape (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
X_offset_ : float
If `normalize=True`, offset subtracted for centering data to a
zero mean.
X_scale_ : float
If `normalize=True`, parameter used to scale data to a unit
standard deviation.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
ARDRegression()
>>> clf.predict([[1, 1]])
array([1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_ard.py
<sphx_glr_auto_examples_linear_model_plot_ard.py>`.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
ARD is a little different than the slide: only dimensions/features for
which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
discarded.
"""
def __init__(self, *, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True,
normalize='deprecated', copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary
Returns
-------
self : returns an instance of self.
"""
self._normalize = _deprecate_normalize(
self.normalize, default=False,
estimator_name=self.__class__.__name__
)
X, y = self._validate_data(X, y, dtype=np.float64, y_numeric=True,
ensure_min_samples=2)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self._normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = 1. / (np.var(y) + eps)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([
sigma_, X[:, keep_lambda].T, y])
return coef_
update_sigma = (self._update_sigma if n_samples >= n_features
else self._update_sigma_woodbury)
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
if not keep_lambda.any():
break
if keep_lambda.any():
# update sigma and mu using updated params from the last iteration
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
else:
sigma_ = np.array([]).reshape(0, 0)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples < n_features and will invert
# a matrix of shape (n_samples, n_samples) making use of the
# woodbury formula:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
n_samples = X.shape[0]
X_keep = X[:, keep_lambda]
inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1)
sigma_ = pinvh(
np.eye(n_samples) / alpha_ + np.dot(X_keep * inv_lambda, X_keep.T)
)
sigma_ = np.dot(sigma_, X_keep * inv_lambda)
sigma_ = - np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_)
sigma_[np.diag_indices(sigma_.shape[1])] += 1. / lambda_[keep_lambda]
return sigma_
def _update_sigma(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples >= n_features and will
# invert a matrix of shape (n_features, n_features)
X_keep = X[:, keep_lambda]
gram = np.dot(X_keep.T, X_keep)
eye = np.eye(gram.shape[0])
sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram
sigma_ = pinvh(sigma_inv)
return sigma_
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self._normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| |
import re
import socket
import subprocess
import time
from os import mkdir
from shutil import rmtree
import mechanicalsoup
import requests
import unittest2 as unittest
from bs4 import BeautifulSoup
from django_docker_engine.docker_utils import (DockerClientRunWrapper,
DockerClientSpec,
DockerContainerSpec)
from tests import ALPINE_IMAGE, ECHO_IMAGE, NGINX_IMAGE
def free_port():
s = socket.socket()
s.bind(('', 0))
return str(s.getsockname()[1])
def wait_for_server(url):
for i in range(5):
try:
requests.get(url)
return
except:
time.sleep(1)
raise Exception('{} never responded'.format(url))
class PathRoutingMechanicalSoupTests(unittest.TestCase):
def setUp(self):
self.port = free_port()
self.process = subprocess.Popen(
['./manage.py', 'runserver', self.port])
self.home = 'http://localhost:{}/'.format(self.port)
wait_for_server(self.home)
def tearDown(self):
self.process.kill()
def assert_tool(self, tool, expected):
browser = mechanicalsoup.StatefulBrowser()
browser.open(self.home)
browser.select_form('#launch')
browser['tool'] = tool
container_name = 'test-{}-{}'.format(tool, self.port)
browser['parameters_json'] = '[]'
browser['container_name'] = container_name
browser.submit_selected()
for i in range(5):
response = browser.refresh()
if response.status_code == 200:
break
else:
time.sleep(1)
self.assertEqual(browser.get_url(),
'{}docker/{}/'.format(self.home, container_name))
current_page = browser.get_current_page()
if current_page.h1:
self.assertIn(expected, current_page.h1)
elif current_page.title:
self.assertIn(expected, current_page.title)
else:
self.fail('No title or h1 in {}'.format(current_page))
browser.open(self.home)
browser.select_form('#kill-' + container_name)
browser.submit_selected()
self.assertEqual(browser.get_url(), self.home)
def testDebuggerLaunch(self):
self.assert_tool('debugger', 'Tool Launch Data')
def testLineupLaunch(self):
self.assert_tool('lineup', 'LineUp')
class PathRoutingClientTests(unittest.TestCase):
"""
Check that the basic functionality works from end-to-end,
starting the django server as you would from the command-line.
"""
try:
assertRegex
except NameError: # Python 2 fallback
def assertRegex(self, s, re):
self.assertRegexpMatches(s, re)
def setUp(self):
self.port = free_port()
self.process = subprocess.Popen(
['./manage.py', 'runserver', self.port])
home = 'http://localhost:' + self.port
wait_for_server(home)
self.container_name = 'test-' + self.port
self.url = '{}/docker/{}/'.format(home, self.container_name)
self.tmp_dir = '/tmp/test-' + self.port
mkdir(self.tmp_dir)
# TODO: Might use mkdtemp, but Docker couldn't see the directory?
# self.tmp_dir = mkdtemp()
# chmod(self.tmp_dir, 0777)
spec = DockerClientSpec(do_input_json_envvar=True)
self.client = DockerClientRunWrapper(spec, mem_limit_mb=35)
# 35MB is enough for two nginx containers.
def tearDown(self):
self.process.kill()
rmtree(self.tmp_dir)
self.client.purge_by_label('subprocess-test-label')
def test_please_wait(self):
self.client.run(
DockerContainerSpec(
image_name=ALPINE_IMAGE, # Will never response to HTTP
container_name=self.container_name,
labels={'subprocess-test-label': 'True'},
mem_reservation_mb=15
)
)
r = requests.get(self.url)
self.assert_in_html('Please wait', r.content)
self.assertIn('Container not yet available', r.reason)
# There is more, but it varies, depending on startup phase:
# possibly: "Max retries exceeded"
# or: "On container container-name, port 80 is not available"
def assert_in_html(self, substring, html):
# Looks for substring in the text content of html.
soup = BeautifulSoup(html, 'html.parser', from_encoding='latin-1')
# Python error page may be misencoded?
# Pick "latin-1" because it's forgiving.
text = soup.get_text()
text = re.sub(
r'.*(Environment:.*?)\s*Request information.*',
r'\1\n\n(NOTE: More info is available; This is abbreviated.)',
text, flags=re.DOTALL)
# If it's the Django error page, try to just get the stack trace.
if substring not in text:
self.fail('"{}" not found in text of html:\n{}'
.format(substring, text))
def test_not_enough_memory(self):
with self.assertLogs() as log:
self.client.run(
DockerContainerSpec(
image_name=NGINX_IMAGE,
container_name=self.container_name,
labels={'subprocess-test-label': 'True'},
mem_reservation_mb=50 # Artificially inflated
)
)
self.assertEqual(
log.output,
['WARNING:django_docker_engine.docker_utils:50MB requested '
'+ 0MB in use - 35MB limit = 15MB > 0',
'WARNING:django_docker_engine.docker_utils:No more '
'containers to kill, but we still do not have the requested '
'memory; Starting anyway!'])
def test_nginx_container(self):
self.client.run(
DockerContainerSpec(
image_name=NGINX_IMAGE,
container_name=self.container_name,
labels={'subprocess-test-label': 'True'},
mem_reservation_mb=15
)
)
time.sleep(1) # TODO: Race condition kludge!
r_good = requests.get(self.url)
self.assert_in_html('Welcome to nginx', r_good.content)
r_bad = requests.get(self.url + 'bad-path')
self.assert_in_html('Not Found', r_bad.content)
self.assertEqual(404, r_bad.status_code)
logs = self.client.logs(self.container_name).decode('utf-8')
self.assertIn('"GET / HTTP/1.1" 200', logs)
history = self.client.history(self.container_name)
self.assertEqual([event[1] for event in history], ['/', '/bad-path'])
def test_multi_container_lru_killer(self):
self.assertEqual(len(self.client.list()), 0)
for i in range(3):
self.client.run(
DockerContainerSpec(
image_name=NGINX_IMAGE,
container_name='{}-{}'.format(self.container_name, i),
mem_reservation_mb=15,
labels={'subprocess-test-label': 'True'}
)
)
self.assertEqual(len(self.client.list()), 2)
# ie, one less than the number of containers started.
# File-system timestamps that Historian relies on only have
# one-second resolution, so we can't say which container will
# have been killed.
def assert_http_verb(self, verb):
response = requests.__dict__[verb.lower()](self.url)
self.assert_in_html('HTTP/1.1 {} /'.format(verb), response.content)
# Response shouldn't be HTML, but if it fails and we get the
# Django error page, this will make it much more readable.
def test_http_echo_verbs(self):
self.client.run(
DockerContainerSpec(
image_name=ECHO_IMAGE,
container_port=8080, # and/or set PORT envvar
container_name=self.container_name,
labels={'subprocess-test-label': 'True'},
mem_reservation_mb=15
)
)
time.sleep(1) # TODO: Race condition kludge!
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
self.assert_http_verb('GET')
# HEAD has no body, understandably
# self.assert_http_verb('HEAD')
self.assert_http_verb('POST')
self.assert_http_verb('PUT')
self.assert_http_verb('DELETE')
# CONNECT not supported by Requests
# self.assert_http_verb('CONNECT')
self.assert_http_verb('OPTIONS')
# TRACE not supported by Requests
# self.assert_http_verb('TRACE')
self.assert_http_verb('PATCH')
def assert_http_body(self, verb):
body = verb + '/body'
response = requests.__dict__[verb.lower()](self.url, data=body)
self.assert_in_html('HTTP/1.1 {} /'.format(verb), response.content)
self.assert_in_html(body, response.content)
def test_http_echo_body(self):
self.client.run(
DockerContainerSpec(
image_name=ECHO_IMAGE,
container_port=8080, # and/or set PORT envvar
container_name=self.container_name,
labels={'subprocess-test-label': 'True'},
mem_reservation_mb=15
)
)
self.assert_http_body('POST')
self.assert_http_body('PUT')
def test_url(self):
self.assertRegex(
self.url, r'http://localhost:\d+/docker/test-\d+/')
class HostRoutingClientTests(PathRoutingClientTests):
def setUp(self):
self.container_name = 'container-name'
hostname = self.container_name + '.docker.localhost'
with open('/etc/hosts') as f:
etc_hosts = f.read()
if hostname not in etc_hosts:
self.fail('In /etc/hosts add entry for "{}"; currently: {}'.format(
hostname, etc_hosts
))
self.port = free_port()
self.url = 'http://{}:{}/'.format(hostname, self.port)
self.tmp_dir = '/tmp/test-' + self.port
mkdir(self.tmp_dir)
# Wanted to use mkdtemp, but Docker couldn't see the directory?
# self.tmp_dir = mkdtemp()
# chmod(self.tmp_dir, 0777)
self.process = subprocess.Popen([
'./manage.py', 'runserver', self.port,
'--settings', 'demo_host_routing.settings'
])
home = 'http://localhost:' + self.port
wait_for_server(home)
spec = DockerClientSpec(do_input_json_envvar=True)
self.client = DockerClientRunWrapper(spec, mem_limit_mb=35)
# Tests from superclass are run
def test_url(self):
self.assertRegex(
self.url, r'http://container-name.docker.localhost:\d+/')
| |
# -*- coding: utf-8 -*-
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""The classes in this module represent objects sent and received from
the XMS REST API.
"""
from __future__ import absolute_import, division, print_function
class ReportType(object):
"""A collection of known delivery report types.
These values are known to be valid in
:py:attr:`MtSmsBatch.delivery_report`.
"""
NONE = 'none'
SUMMARY = 'summary'
FULL = 'full'
PER_RECIPIENT = 'per_recipient'
class DeliveryStatus(object):
"""A collection of known delivery statuses.
Note, new statuses may be introduced to the XMS API.
"""
QUEUED = "Queued"
"""Message is queued within REST API system and will be dispatched
according to the rate of the account."""
DISPATCHED = "Dispatched"
"""Message has been dispatched and accepted for delivery by the SMSC."""
ABORTED = "Aborted"
"""Message was aborted before reaching SMSC."""
REJECTED = "Rejected"
"""Message was rejected by SMSC."""
DELIVERED = "Delivered"
"""Message has been delivered."""
FAILED = "Failed"
"""Message failed to be delivered."""
EXPIRED = "Expired"
"""Message expired before delivery."""
UNKNOWN = "Unknown"
"""It is not known if message was delivered or not."""
class DeliveryReportType(object):
"""The types of delivery reports that can be retrieved."""
SUMMARY = "summary"
"""Indicates a summary batch delivery report.
The summary delivery report does not include the per-recipient
result but rather aggregated statistics about the deliveries.
"""
FULL = "full"
"""Indicates a full batch delivery report.
This includes per-recipient delivery results. For batches with
many destinations such reports may be very large.
"""
class Reset(object):
"""A class whose instances indicate that a value should be reset.
This is used when updating previously created XMS objects. Note,
it is typically not necessary to created new objects of this type,
instead use the constant :const:`.RESET`.
"""
def __init__(self):
pass
RESET = Reset()
"""Object used to indicate that a XMS field should be reset to its
default value."""
class MtBatchSms(object):
"""Base class for all SMS batch classes.
Holds fields that are common to both the create and response
classes.
.. attribute:: recipients
One or more MSISDNs indicating the batch recipients.
:type: set[str]
.. attribute:: sender
The batch sender, typically a short code or long number.
:type: str
.. attribute:: delivery_report
The type of delivery report to use for this batch.
:type: str
.. attribute:: send_at
The time at which this batch should be sent.
:type: datetime
.. attribute:: expire_at
The time at which this batch should expire.
:type: datetime
.. attribute:: callback_url
The URL to which callbacks should be sent.
:type: str
"""
def __init__(self):
self.recipients = set()
self.sender = None
self.delivery_report = None
self.send_at = None
self.expire_at = None
self.callback_url = None
class MtBatchSmsCreate(MtBatchSms):
"""Describes parameters available during batch creation.
We can create two kinds of batches, textual and binary, described
in the child classes :py:class:`MtBatchTextSmsCreate` and
:py:class:`MtBatchTextSmsCreate`, respectively.
.. attribute:: tags
The initial set of tags to give the batch.
:type: set[str]
"""
def __init__(self):
MtBatchSms.__init__(self)
self.tags = set()
class MtBatchTextSmsCreate(MtBatchSmsCreate):
"""Class whose fields describe a text batch.
.. attribute:: body
The message body or template.
:type: str
.. attribute:: parameters
The template parameters.
This property is only relevant is the :py:attr:`body` property
is a template. This is expected to be an associative array
mapping parameter keys to associative arrays themselves mapping
recipient numbers to substitution strings.
More concretely we may have for the parameterized message
"Hello, ${name}!" have::
batch.parameters = {
'name': {
'123456789': 'Mary',
'987654321': 'Joe',
'default': 'valued customer'
}
}
And the recipient with MSISDN "123456789" would then receive the
message "Hello, Mary!".
Note the use of "default" to indicate the substitution for
recipients not explicitly given. For example, the recipient
"555555555" would receive the message "Hello, valued customer!".
:type: dict[str, dict[str, str]]
"""
def __init__(self):
MtBatchSmsCreate.__init__(self)
self.body = None
self.parameters = {}
class MtBatchBinarySmsCreate(MtBatchSmsCreate):
"""Describes a binary batch.
This class holds all parameters that can be used when creating a
binary SMS batch.
.. attribute:: body
The body of this binary message.
:type: bytes
.. attribute:: udh
The User Data Header of this binary message.
:type: bytes
"""
def __init__(self):
MtBatchSmsCreate.__init__(self)
self.body = None
self.udh = None
class MtBatchSmsUpdate(object):
"""Describes updates that can be performed on text and binary SMS
batches.
.. attribute:: recipient_insertions
The message destinations to add to the batch. This should have
zero or more MSISDNs.
:type: set[str]
.. attribute:: recipient_removals
The message destinations to remove from the batch. This should
have zero or more MSISDNs.
:type: set[str]
.. attribute:: sender
The message originator as a long number or short code. If
``None`` then the current value is kept, if :const:`.RESET` then
the value is reset to its XMS default, and if set to a string
the sender is updated.
:type: str or None or Reset
.. attribute:: delivery_report
Description of how to update the batch delivery report value. If
``None`` then the current value is kept, if :const:`.RESET` then
the value is reset to its XMS default, and if set to a string
the delivery report value is updated.
See :class:`ReportType` for valid report types.
:type: str or None or Reset
.. attribute:: send_at
Description of how to update the batch send at value. If
``None`` then the current value is kept, if :const:`.RESET` then
the value is reset to its XMS default, and if set to a date time
the send at value is updated.
:type: datetime or None or Reset
.. attribute:: expire_at
Description of how to update the batch expire at value. If
``None`` then the current value is kept, if :const:`.RESET` then
the value is reset to its XMS default, and if set to a date time
the expire at value is updated.
:type: datetime or None or Reset
.. attribute:: callback_url
Description of how to update the batch callback URL. If ``None``
then the current value is kept, if :const:`.RESET` then the
value is reset to its XMS default, and if set to a string the
callback URL value is updated.
:type: str or None or Reset
"""
def __init__(self):
self.recipient_insertions = set()
self.recipient_removals = set()
self.sender = None
self.delivery_report = None
self.send_at = None
self.expire_at = None
self.callback_url = None
class MtBatchTextSmsUpdate(MtBatchSmsUpdate):
"""Class that the update operations that can be performed on a text
batch.
.. attribute:: body
The updated batch message body. If ``None`` then the current
batch message is kept.
:type: str or None
.. attribute:: parameters
Description of how to update the batch parameters. If ``None``
then the current value is kept, if :const:`.RESET` then the
value is reset to its XMS default, and if set to a dictionary
the parameters value is updated.
:type: dict or None or Reset
"""
def __init__(self):
MtBatchSmsUpdate.__init__(self)
self.body = None
self.parameters = None
class MtBatchBinarySmsUpdate(MtBatchSmsUpdate):
"""Describes updates to a binary SMS batch.
.. attribute:: body
The updated binary batch body. If ``None`` then the existing
body is left as-is.
:type: bytes or None
.. attribute:: udh
The updated binary User Data Header. If ``None`` then the
existing UDH is left as-is.
:type: bytes or None
"""
def __init__(self):
MtBatchSmsUpdate.__init__(self)
self.body = None
self.udh = None
class MtBatchSmsResult(MtBatchSms):
"""Contains the common fields of text and binary batches.
.. attribute:: batch_id
The unique batch identifier.
:type: str
.. attribute:: created_at
Time when this batch was created.
:type: datetime
.. attribute:: modified_at
Time when this batch was last modified.
:type: datetime
.. attribute:: canceled
Whether this batch has been canceled.
:type: bool
"""
def __init__(self):
MtBatchSms.__init__(self)
self.batch_id = None
self.created_at = None
self.modified_at = None
self.canceled = None
class MtBatchTextSmsResult(MtBatchSmsResult):
"""A textual batch as returned by the XMS endpoint.
This differs from the batch creation definition by the addition
of, for example, the batch identifier and the creation time.
.. attribute:: body
The message body or template. See
:py:attr:`MtBatchTextSmsCreate.parameters`.
:type: str
.. attribute:: parameters
The template parameters.
type *dict[str, dict[str, str]]*
"""
def __init__(self):
MtBatchSmsResult.__init__(self)
self.body = None
self.parameters = None
class MtBatchBinarySmsResult(MtBatchSmsResult):
"""A binary SMS batch as returned by XMS.
.. attribute:: body
The body of this binary message.
:type: bytes
.. attribute:: udh
The User Data Header of this binary message.
:type: bytes
"""
def __init__(self):
MtBatchSmsResult.__init__(self)
self.body = None
self.udh = None
class BatchDeliveryReport(object):
"""Batch delivery report.
A batch delivery report is divided into a number of *buckets* and
each such bucket contain statistics about batch messages having a
specific delivery status. The :py:attr:`statuses` property
contains the various buckets.
.. attribute:: batch_id
Identifier of the batch that this report covers.
:type: str
.. attribute:: total_message_count
The total number of messages sent as part of this batch.
:type: int
.. attribute:: statuses
The batch status buckets. This array describes the aggregated
status for the batch where each array element contains
information about messages having a certain delivery status and
delivery code.
:type: list[BatchDeliveryReportStatus]
"""
def __init__(self):
self.batch_id = None
self.total_message_count = None
self.statuses = []
class BatchDeliveryReportStatus(object):
"""Aggregated statistics for a given batch.
This represents the delivery statistics for a given statistics
*bucket*. See :py:class:`BatchDeliveryReport`.
.. attribute:: code
The delivery status code for this recipient bucket.
:type: int
.. attribute:: status
The delivery status for this recipient bucket.
:type: str
.. attribute:: count
The number of recipients belonging to this bucket.
:type: int
.. attribute:: recipients
The recipients having this status.
Note, this is non-empty only if a `full` delivery report has been
requested.
:type: set[str]
"""
def __init__(self):
self.code = None
self.status = None
self.count = None
self.recipients = set()
class BatchRecipientDeliveryReport(object):
"""A delivery report for an individual batch recipient.
.. attribute:: batch_id
The batch identifier.
:type: string
.. attribute:: recipient
The recipient address.
:type: string
.. attribute:: code
The delivery code.
:type: int
.. attribute:: status
The delivery status.
:type: int
.. attribute:: status_message
The delivery status message. The status message is not always
available and the attribute is set to *None* in those cases.
:type: string or None
.. attribute:: operator
The recipient's mobile operator. If the operator is not known,
then this is set to *None*.
:type: string or None
.. attribute:: status_at
The time at delivery.
:type: datetime
.. attribute:: operator_status_at
The time of delivery as reported by operator.
:type: datetime or None
"""
def __init__(self):
self.batch_id = None
self.recipient = None
self.code = None
self.status = None
self.status_message = None
self.operator = None
self.status_at = None
self.operator_status_at = None
class Error(object):
"""Describes error responses given by XMS.
:param str code: the error code
:param str text: the human readable error text
.. attribute:: code
A code that can be used to programmatically recognize the error.
:type: str
.. attribute:: text
Human readable description of the error.
:type: str
"""
def __init__(self, code, text):
self.code = code
self.text = text
class MtBatchDryRunResult(object):
"""A batch dry run report.
.. attribute:: number_of_recipients
The number of recipients that would receive the batch message.
:type: int
.. attribute:: number_of_message
The number of messages that will be sent.
:type: int
.. attribute:: per_recipient
The per-recipient dry-run result.
:type: list[DryRunPerRecipient]
"""
def __init__(self):
self.number_of_recipients = None
self.number_of_messages = None
self.per_recipient = []
class DryRunPerRecipient(object):
"""Per-recipient dry-run result.
Object of this class only occur within dry-run results. See
:class:`MtBatchDryRunResult`.
.. attribute:: recipient
The recipient.
:type: str
.. attribute:: number_of_parts
Number of message parts needed for the recipient.
:type: int
.. attribute:: body
Message body sent to this recipient.
:type: str
.. attribute:: encoding
Indicates the text encoding used for this recipient.
This is one of "text" or "unicode". See :const:`ENCODING_TEXT`
and :const:`ENCODING_UNICODE`.
:type: str
"""
ENCODING_TEXT = "text"
"""Constant indicating non-unicode encoding."""
ENCODING_UNICODE = "unicode"
"""Constant indicating unicode encoding."""
def __init__(self):
self.recipient = None
self.number_of_parts = None
self.body = None
self.encoding = None
class GroupAutoUpdate(object):
"""A description of automatic group updates.
An automatic update is triggered by a mobile originated message to
a given number containing special keywords.
When the given recipient receives a mobile originated SMS
containing keywords (first and/or second) matching the given
``add`` arguments then the sender MSISDN is added to the group.
Similarly, if the MO is matching the given ``remove`` keyword
arguments then the MSISDN is removed from the group.
For example::
GroupAutoUpdate(
recipient='12345',
add_first_word='add',
remove_first_word='remove')
would trigger based solely on the first keyword of the MO message.
On the other hand::
GroupAutoUpdate(
recipient='12345',
add_first_word='alert',
add_second_word='add',
remove_first_word='alert',
remove_second_word='remove')
would trigger only when both the first and second keyword are
given in the MO message.
:param str recipient: recipient that triggers this rule
:param add_first_word: first ``add`` keyword, default is `None`.
:type add_first_word: str or None
:param add_second_word: second ``add`` keyword, default is `None`.
:type add_second_word: str or None
:param remove_first_word: first ``remove`` keyword, default is `None`.
:type remove_first_word: str or None
:param remove_second_word: second ``remove`` keywords, default is `None`.
:type remove_second_word: str or None
.. attribute:: recipient
The recipient of the mobile originated message. A short code or
long number.
:type: str
.. attribute:: add_word_pair
A two-element tuple holding the first and second keyword that
causes the MO sender to be added to the group.
:type: tuple[str or None, str or None]
.. attribute:: remove_word_pair
A two-element tuple holding the first and second keyword that
causes the MO sender to be removed from the group.
:type: tuple[str or None, str or None]
"""
def __init__(self,
recipient,
add_first_word=None,
add_second_word=None,
remove_first_word=None,
remove_second_word=None):
self.recipient = recipient
self.add_word_pair = (add_first_word, add_second_word)
self.remove_word_pair = (remove_first_word, remove_second_word)
class GroupCreate(object):
"""A description of the fields necessary to create a group.
.. attribute:: name
The group name.
:type: str
.. attribute:: members
A set of MSISDNs that belong to this group.
:type: set[str]
.. attribute:: child_groups
A set of groups that in turn belong to this group.
:type: set[str]
.. attribute:: auto_update
Describes how this group should be auto updated.
If no auto updating should be performed for the group then this
value is ``None``.
:type: GroupAutoUpdate or None
.. attribute:: tags
The tags associated to this group.
:type: set[str]
"""
def __init__(self):
self.name = None
self.members = set()
self.child_groups = set()
self.auto_update = None
self.tags = set()
class GroupResult(object):
"""This class holds the result of a group fetch operation.
This may be used either standalone or as an element of a paged
result.
.. attribute:: group_id
The unique group identifier.
:type: str
.. attribute:: name
The group name.
:type: str
.. attribute:: size
The number of members of this group.
:type: int
.. attribute:: child_groups
A set of groups that in turn belong to this group.
:type: set[str]
.. attribute:: auto_update
Describes how this group should be auto updated.
If no auto updating should be performed for the group then this
value is ``None``.
:type: GroupAutoUpdate or None
.. attribute:: created_at
The time at which this group was created.
:type: datetime
.. attribute:: modified_at
The time when this group was last modified.
:type: datetime
"""
def __init__(self):
self.group_id = None
self.name = None
self.size = None
self.child_groups = set()
self.auto_update = None
self.created_at = None
self.modified_at = None
class GroupUpdate(object):
"""Describes updates that can be performed on a group.
.. attribute:: name
Updates the group name.
If ``None`` then the current value is kept, if :const:`.RESET`
then the value is reset to its XMS default, and if set to a
string the name is updated.
:type: None or str or Reset
.. attribute:: member_insertions
The MSISDNs that should be added to this group.
:type: set[str]
.. attribute:: member_removals
The MSISDNs that should be removed from this group.
:type: set[str]
.. attribute:: child_group_insertions
The child groups that should be added to this group.
:type: set[str]
.. attribute:: child_group_removals
The child groups that should be removed from this group.
:type: set[str]
.. attribute:: add_from_group
Identifier of a group whose members should be added to this
group.
:type: str
.. attribute:: remove_from_group
Identifier of a group whose members should be removed from this
group.
:type: str
.. attribute:: auto_update
Describes how this group should be auto updated.
If ``None`` then the current value is kept, if :const:`.RESET`
then the value is reset to its XMS default, and if set to a
``GroupAutoUpdate`` object the value is updated.
:type: None or GroupAutoUpdate or Reset
"""
def __init__(self):
self.name = None
self.member_insertions = set()
self.member_removals = set()
self.child_group_insertions = set()
self.child_group_removals = set()
self.add_from_group = None
self.remove_from_group = None
self.auto_update = None
class MoSms(object):
"""Base class for SMS mobile originated messages.
Holds fields that are common to both the textual and binary MO
classes.
.. attribute:: message_id
The message identifier.
:type: str
.. attribute:: recipient
The message recipient. This is a short code or long number.
:type: str
.. attribute:: sender
The message sender. This is an MSISDN.
:type: str
.. attribute:: operator
The MCCMNC of the originating operator, if available.
:type: str or None
.. attribute:: sent_at
The time when this message was sent, if available.
:type: datetime or None
.. attribute:: received_at
The time when the messaging system received this message.
:type: datetime
"""
def __init__(self):
self.message_id = None
self.recipient = None
self.sender = None
self.operator = None
self.sent_at = None
self.received_at = None
class MoTextSms(MoSms):
"""An SMS mobile originated message with textual content.
.. attribute:: body
The message body.
:type: str
.. attribute:: keyword
The message keyword, if available.
:type: str or None
"""
def __init__(self):
MoSms.__init__(self)
self.body = None
self.keyword = None
class MoBinarySms(MoSms):
"""An SMS mobile originated message with binary content.
.. attribute:: body
The binary message body.
:type: bytes
.. attribute:: udh
The user data header.
:type: bytes
"""
def __init__(self):
MoSms.__init__(self)
self.body = None
self.udh = None
class Page(object):
"""A page of elements.
The element type depends on the type of page that has been
retrieved. Typically it is one of :class:`MtSmsBatchResponse` or
:class:`GroupResponse`.
.. attribute:: page
The page number, starting from zero.
:type: int
.. attribute:: page
The number of elements on this page.
:type: int
.. attribute:: total_size
The total number of elements across all fetched pages.
:type: int
.. attribute:: content
The page elements.
:type: list[obj]
"""
def __init__(self):
self.page = None
self.size = None
self.total_size = None
self.content = None
def __iter__(self):
"""Returns an iterator over the content of this page.
For example, if the page is the result of a batch listing then
this iterator will yield batch results.
:returns: the page iterator
:rtype: iterator
"""
return iter(self.content)
class Pages(object):
"""A paged result.
It is possible to, for example, fetch individual pages or iterate
over all pages.
:param worker: worker function that fetches pages
"""
def __init__(self, worker):
self._worker = worker
def get(self, page):
"""Downloads a specific page.
:param int page: number of the page to fetch
:return: a page
:rtype: Page
"""
return self._worker(page)
def __iter__(self):
"""Iterator across all pages."""
return PagesIterator(self)
class PagesIterator(object):
"""An iterator over a paged result.
The key is the page number and the value corresponds to the
content of the pages.
:param Pages pages: the pages that we are iterating over
"""
def __init__(self, pages):
self._pages = pages
self._cur_page = None
self._position = 0
def next(self):
return self.__next__()
def __next__(self):
"""Steps this iterator to the next page."""
if not self._cur_page or self._cur_page.page != self._position:
self._cur_page = self._pages.get(self._position)
self._position += 1
# If we fetched an empty page then the iteration is over.
if self._cur_page.size <= 0:
raise StopIteration
else:
return self._cur_page
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 6 10:24:07 2020
@author: TeSolva
"""
import numpy as np
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import os
import re
#%%
def my_int(my_string):
try:
out = int(my_string)
except:
out = -9999
return out
#%%
def func_nummer(x):
temp = re.findall(r"[\w']+", x)
my_list=list(map(my_int, temp))
temp = np.array(my_list)[np.array(my_list)>-9999]
if len(temp) > 1:
out = temp[0]
else:
out = False
return out
#%%
def func_id(x):
temp = re.findall(r"[\w']+", x)
my_list=list(map(my_int, temp))
temp = np.array(my_list)[np.array(my_list)>-9999]
if len(temp) > 1:
out = temp[1]
else:
out = False
return out
#%%
def outlier_1d_mad_based(sample, thresh=3.5):
"""
outlier_1d_mad_based(sample, thresh=3.5)
routine to analyse a given 1d data sample to check for outliers.
see reference for more details on the background of the used algorithm.
the function returns a boolean array with True if a value in the sample
is an outliers and False otherwise.
Parameters:
-----------
sample : array_like
An numobservations by numdimensions array of observations
thresh : float
The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
A numobservations-length boolean array.
Examples
--------
# Generate some data
sample = np.random.normal(0, 0.5, 50)
# Add three outliers...
sample = np.r_[sample, -3, -10, 12]
# call function and check for outliers
out = outlier_1d_mad_based(sample)
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(sample.shape) == 1:
sample = sample[:, None]
median = np.median(sample, axis=0)
diff = np.sum((sample - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
#%%
# #path = "PATH\"
# # define all filenames in current directory
# path = os.getcwd() # path to this file here
# list_dir = os.listdir(os.getcwd()) # all filenames in the directory
# file_set = []
# for i in list_dir:
# if i.endswith(".csv"):
# file_set.append(i)
#%%
# define file name
file1 = "chronicreplay-audiothek-appapi.tesolva.dev_2020-08-08_07-29-08.csv"
file2 = "chronicreplay-audiothek-appapi.solr.tesolva.dev_2020-08-11_08-47-32.csv"
#
file_set = [file1, file2]
###############################################################################
#%% pre-process
X_file = file_set[1]
filename = os.path.splitext(X_file)[0]
path = os.getcwd()
outputPath = path + '/' + filename + '/' + filename
os.makedirs(filename, exist_ok=True)
df = pd.read_csv(X_file,sep='\t')
df.StartTime = pd.to_datetime(df.StartTime)
# create new columns
df['Request_No']=df.Request
df['Request_ID']=df.Request
# add values (No and ID) from URL to new columns
df.Request_No = df.Request_No.apply(func_nummer)
df.Request_ID = df.Request_ID.apply(func_id)
# Requests without /health and /metric
df = df[df['Request_No'] != False]
#%%
# Threshold to find URLs with high values of request time
THLD = 1000 # Threshold
df['Duration_greaterThan_1000'] = (df.Duration>THLD).astype(int)
#%%
plt.figure(figsize = (8.5,11))
myFmt = mdates.DateFormatter('%H:%M')
plt.gca().xaxis.set_major_formatter(myFmt)
plt.scatter(df.StartTime[df.Duration<THLD].values, df.Duration[df.Duration<THLD].values, marker='^', label='Duration < THLD [ms]',
alpha=0.3, edgecolors='none')
plt.scatter(df.StartTime[df.Duration>THLD].values, df.Duration[df.Duration>THLD].values, marker='o', label='Duration > THLD [ms]',
alpha=0.3, edgecolors='none')
# Show the boundary between the regions:
plt.plot(df.StartTime.values, np.ones(len(df))*THLD,'-.k')
plt.legend()
plt.ylabel("Duration of request time (ms)")
plt.xlabel("Time")
plt.title("file:" + X_file)
plt.axis([None, None, 0, THLD*2])
plt.savefig(outputPath + '-Duration.png')
#%%
plt.figure(figsize = (8.5,11))
myFmt = mdates.DateFormatter('%H:%M')
plt.gca().xaxis.set_major_formatter(myFmt)
plt.scatter(df.StartTime[df.Duration<THLD].values, df.Difference[df.Duration<THLD].values, marker='^', label='Duration < THLD [ms]',
alpha=0.3, edgecolors='none')
plt.scatter(df.StartTime[df.Duration>THLD].values, df.Difference[df.Duration>THLD].values, marker='o', label='Duration > THLD [ms]',
alpha=0.3, edgecolors='none')
# Show the boundary between the regions:
plt.plot(df.StartTime.values, np.ones(len(df))*THLD,'-.k')
plt.legend()
plt.ylabel("Difference of request time (ms)")
plt.xlabel("Time")
plt.title("file:" + X_file)
plt.axis([None, None, 0, THLD*2])
plt.savefig(outputPath + '-Difference.png')
#%%
# only interested in URLs with request time greater than THLD
df_Upper = df[df.Duration>THLD]
df_Upper['Percent'] = len(df_Upper)/len(df.index)*100
df_Upper['Mean'] = df_Upper["Duration"].mean()
# export csv files
df_Upper[['Duration', 'Request_No', 'Request_ID', 'Percent','Mean', 'Request']].to_csv(outputPath + '-URLs-request-time-Upper.csv', index=False)
#%%
# count Request_No for Requests time > THLD
df_temp = (df_Upper.drop_duplicates().Request_No.value_counts())
df_Counts = pd.DataFrame({'Request_No':df_temp.index, 'Counts':df_temp.values})
df_Counts.to_csv(outputPath + '-RequestsNo-Counts-Upper.csv', index=False)
#%%
# values between 500 and 1000 ms
df_Between = df[df['Duration'].between(500, 1000, inclusive=True)]
# export csv files
df_Between['Percent'] = len(df_Between)/len(df.index)*100
df_Between['Mean'] = df_Between["Duration"].mean()
df_Between[['Duration', 'Request_No', 'Request_ID', 'Percent','Mean', 'Request']].to_csv(outputPath + '-URLs-request-time-Between.csv', index=False)
# count Request_No for Requests time 500 < time < 1000
df_temp2 = (df_Between.drop_duplicates().Request_No.value_counts())
df_CountsBetween = pd.DataFrame({'RequestNo':df_temp2.index, 'Counts':df_temp2.values})
df_CountsBetween.to_csv(outputPath + '-RequestsNo-Counts-Between.csv', index=False)
#%%
# values lower than 500 ms
df_Lower = df[df['Duration'].between(0, 500, inclusive=False)]
# export csv files
df_Lower['Percent'] = len(df_Lower)/len(df.index)*100
df_Lower['Mean'] = df_Lower["Duration"].mean()
df_Lower[['Duration', 'Request_No', 'Request_ID', 'Percent','Mean', 'Request']].to_csv(outputPath + '-URLs-request-time-Lower.csv', index=False)
# count Request_No for Requests time < 500
df_temp2 = (df_Lower.drop_duplicates().Request_No.value_counts())
df_CountsLower = pd.DataFrame({'RequestNo':df_temp2.index, 'Counts':df_temp2.values})
df_CountsLower.to_csv(outputPath + '-RequestsNo-Counts-Lower.csv', index=False)
#%%
# df_notSameStatus = df[df.SameStatus==False]
# # export csv files
# df_notSameStatus.to_csv(outputPath + '-URLs-wo-same-status.csv', index=False)
#%%
del(X_file,file1, file2, file_set) # delete the variable from workspace
#%%
| |
from __future__ import print_function
from numpy import dot, sum
import numpy.testing as npt
from proteus.EGeometry import (X,Y,Z)
from proteus.Quadrature import (SimplexGaussQuadrature,
SimplexLobattoQuadrature,
CubeGaussQuadrature,
LobattoEdgeAlt,
CompositeTrapezoidalEdge,
FaceBarycenterEdge,
CompositeTrapezoidalTriangle,
CompositeTriangle,
FaceBarycenterTriangle)
gaussPoint=SimplexGaussQuadrature(nd=0, order=1)
gaussEdge=SimplexGaussQuadrature(nd=1, order=1)
compositeTrapezoidalEdge = CompositeTrapezoidalEdge(order=1)
faceBarycenterEdge = FaceBarycenterEdge(order=1)
gaussTriangle=SimplexGaussQuadrature(nd=2, order=1)
compositeTrapezoidalTriangle = CompositeTrapezoidalTriangle(order=1)
faceBarycenterTriangle = FaceBarycenterTriangle(order=1)
gaussTetrahedron=SimplexGaussQuadrature(nd=3, order=1)
gaussSquare=CubeGaussQuadrature(nd=2, order=1)
gaussCube=CubeGaussQuadrature(nd=3, order=1)
#define some simple functions to integrate
a=1.1
b=0.92
c=1.34
def f0(x):
return [1.0 for y in x]
def f1(x):
return [1.0 + a*y[X] + b*y[Y] + c*y[Z] for y in x]
def f2(x):
return [1.0 + a*y[X]**2 + c*y[Y]**2 + b*y[Z]**2 for y in x]
def f3(x):
return [1.0 + b*y[X]**3 + a*y[Y]**3 + c*y[Z]**3 for y in x]
def f4(x):
return [1.0 + c*y[X]**4 + b*y[Y]**4 + a*y[Z]**4 for y in x]
def test_gauss_point():
gaussPoint.setOrder(1)
int0_f4 = dot(f4(gaussPoint.points),gaussPoint.weights)
print(int0_f4)
gaussPoint.setOrder(2)
int1_f4 = dot(f4(gaussPoint.points),gaussPoint.weights)
print(int1_f4)
assert(int0_f4 == int1_f4)
def test_gauss_tri4():
print("4th Order Polynomial")
print("Triangle")
gaussTriangle.setOrder(1)
int0_f4 = dot(f4(gaussTriangle.points),gaussTriangle.weights)
print(int0_f4)
gaussTriangle.setOrder(2)
int1_f4 = dot(f4(gaussTriangle.points),gaussTriangle.weights)
print(int1_f4)
gaussTriangle.setOrder(3)
int2_f4 = dot(f4(gaussTriangle.points),gaussTriangle.weights)
print(int2_f4)
gaussTriangle.setOrder(4)
int3_f4 = dot(f4(gaussTriangle.points),gaussTriangle.weights)
print(int3_f4)
gaussTriangle.setOrder(5)
int4_f4 = dot(f4(gaussTriangle.points),gaussTriangle.weights)
print(int4_f4)
gaussTriangle.setOrder(6)
int5_f4 = dot(f4(gaussTriangle.points),gaussTriangle.weights)
print(int5_f4)
npt.assert_almost_equal(int3_f4,int4_f4)
npt.assert_almost_equal(int4_f4,int5_f4)
def test_gauss_tet4():
print("4th Order Polynomial")
print("Tetrahedron")
gaussTetrahedron.setOrder(1)
int0_f4 = dot(f4(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int0_f4)
gaussTetrahedron.setOrder(2)
int1_f4 = dot(f4(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int1_f4)
gaussTetrahedron.setOrder(3)
int2_f4 = dot(f4(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int2_f4)
gaussTetrahedron.setOrder(4)
int3_f4 = dot(f4(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int3_f4)
gaussTetrahedron.setOrder(5)
int4_f4 = dot(f4(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int4_f4)
gaussTetrahedron.setOrder(6)
int5_f4 = dot(f4(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int5_f4)
npt.assert_almost_equal(int3_f4,int4_f4)
npt.assert_almost_equal(int4_f4,int5_f4)
def test_gauss_edge3():
print("3rd Order Polynomial")
print("Edge")
gaussEdge.setOrder(1)
int0_f3 = dot(f3(gaussEdge.points),gaussEdge.weights)
print(int0_f3)
gaussEdge.setOrder(2)
int1_f3 = dot(f3(gaussEdge.points),gaussEdge.weights)
print(int1_f3)
gaussEdge.setOrder(3)
int2_f3 = dot(f3(gaussEdge.points),gaussEdge.weights)
print(int2_f3)
gaussEdge.setOrder(4)
int3_f3 = dot(f3(gaussEdge.points),gaussEdge.weights)
print(int3_f3)
npt.assert_almost_equal(int2_f3,int3_f3)
def test_gauss_tri3():
print("3rd Order Polynomial")
print("Triangle")
gaussTriangle.setOrder(1)
int0_f3 = dot(f3(gaussTriangle.points),gaussTriangle.weights)
print(int0_f3)
gaussTriangle.setOrder(2)
int1_f3 = dot(f3(gaussTriangle.points),gaussTriangle.weights)
print(int1_f3)
gaussTriangle.setOrder(3)
int2_f3 = dot(f3(gaussTriangle.points),gaussTriangle.weights)
print(int2_f3)
gaussTriangle.setOrder(4)
int3_f3 = dot(f3(gaussTriangle.points),gaussTriangle.weights)
print(int3_f3)
npt.assert_almost_equal(int2_f3,int3_f3)
def test_gauss_tet3():
print("3rd Order Polynomial")
print("Tetrahedron")
gaussTetrahedron.setOrder(1)
int0_f3 = dot(f3(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int0_f3)
gaussTetrahedron.setOrder(2)
int1_f3 = dot(f3(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int1_f3)
gaussTetrahedron.setOrder(3)
int2_f3 = dot(f3(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int2_f3)
gaussTetrahedron.setOrder(4)
int3_f3 = dot(f3(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int3_f3)
npt.assert_almost_equal(int2_f3,int3_f3)
def test_gauss_edge2():
print("2nd Order Polynomial")
print("Edge")
gaussEdge.setOrder(1)
int0_f2 = dot(f2(gaussEdge.points),gaussEdge.weights)
print(int0_f2)
gaussEdge.setOrder(2)
int1_f2 = dot(f2(gaussEdge.points),gaussEdge.weights)
print(int1_f2)
gaussEdge.setOrder(3)
int2_f2 = dot(f2(gaussEdge.points),gaussEdge.weights)
print(int2_f2)
npt.assert_almost_equal(int1_f2,int2_f2)
def test_gauss_tri2():
print("2nd Order Polynomial")
print("Triangle")
gaussTriangle.setOrder(1)
int0_f2 = dot(f2(gaussTriangle.points),gaussTriangle.weights)
print(int0_f2)
gaussTriangle.setOrder(2)
int1_f2 = dot(f2(gaussTriangle.points),gaussTriangle.weights)
print(int1_f2)
gaussTriangle.setOrder(3)
int2_f2 = dot(f2(gaussTriangle.points),gaussTriangle.weights)
print(int2_f2)
npt.assert_almost_equal(int1_f2,int2_f2)
def test_gauss_tet2():
print("2nd Order Polynomial")
print("Tetrahedron")
gaussTetrahedron.setOrder(1)
int0_f2 = dot(f2(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int0_f2)
gaussTetrahedron.setOrder(2)
int1_f2 = dot(f2(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int1_f2)
gaussTetrahedron.setOrder(3)
int2_f2 = dot(f2(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int2_f2)
npt.assert_almost_equal(int1_f2,int2_f2)
def test_gauss_edge1():
print("1st Order Polynomial")
print("Edge")
gaussEdge.setOrder(1)
int0_f1 = dot(f1(gaussEdge.points),gaussEdge.weights)
print(int0_f1)
gaussEdge.setOrder(2)
int1_f1 = dot(f1(gaussEdge.points),gaussEdge.weights)
print(int1_f1)
gaussEdge.setOrder(3)
int2_f1 = dot(f1(gaussEdge.points),gaussEdge.weights)
print(int1_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
def test_gauss_tri1():
print("1st Order Polynomial")
print("Triangle")
gaussTriangle.setOrder(1)
int0_f1 = dot(f1(gaussTriangle.points),gaussTriangle.weights)
print(int0_f1)
gaussTriangle.setOrder(2)
int1_f1 = dot(f1(gaussTriangle.points),gaussTriangle.weights)
print(int1_f1)
gaussTriangle.setOrder(3)
int2_f1 = dot(f1(gaussTriangle.points),gaussTriangle.weights)
print(int1_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
def test_gauss_tet1():
print("1st Order Polynomial")
print("Tetrahedron")
gaussTetrahedron.setOrder(1)
int0_f1 = dot(f1(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int0_f1)
gaussTetrahedron.setOrder(2)
int1_f1 = dot(f1(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int1_f1)
gaussTetrahedron.setOrder(3)
int2_f1 = dot(f1(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int2_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
def test_gauss_edge0():
print("0th Order Polynomial")
print("Edge")
gaussEdge.setOrder(1)
int0_f0 = dot(f0(gaussEdge.points),gaussEdge.weights)
print(int0_f0)
gaussEdge.setOrder(2)
int1_f0 = dot(f0(gaussEdge.points),gaussEdge.weights)
print(int1_f0)
gaussEdge.setOrder(3)
int2_f0 = dot(f0(gaussEdge.points),gaussEdge.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0,int1_f0)
npt.assert_almost_equal(int1_f0,int2_f0)
def test_gauss_tri0():
print("0th Order Polynomial")
print("Triangle")
gaussTriangle.setOrder(1)
int0_f0 = dot(f0(gaussTriangle.points),gaussTriangle.weights)
print(int0_f0)
gaussTriangle.setOrder(2)
int1_f0 = dot(f0(gaussTriangle.points),gaussTriangle.weights)
print(int1_f0)
gaussTriangle.setOrder(3)
int2_f0 = dot(f0(gaussTriangle.points),gaussTriangle.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0,int1_f0)
npt.assert_almost_equal(int1_f0,int2_f0)
def test_gauss_tet0():
print("0th Order Polynomial")
print("Tetrahedron")
gaussTetrahedron.setOrder(1)
int0_f0 = dot(f0(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int0_f0)
gaussTetrahedron.setOrder(2)
int1_f0 = dot(f0(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int1_f0)
gaussTetrahedron.setOrder(3)
int2_f0 = dot(f0(gaussTetrahedron.points),gaussTetrahedron.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0,int1_f0)
npt.assert_almost_equal(int1_f0,int2_f0)
def test_gauss_square4():
print("4th Order Polynomial")
print("Square")
gaussSquare.setOrder(1)
int0_f4 = dot(f4(gaussSquare.points),gaussSquare.weights)
print(int0_f4)
gaussSquare.setOrder(2)
int1_f4 = dot(f4(gaussSquare.points),gaussSquare.weights)
print(int1_f4)
gaussSquare.setOrder(3)
int2_f4 = dot(f4(gaussSquare.points),gaussSquare.weights)
print(int2_f4)
gaussSquare.setOrder(4)
int3_f4 = dot(f4(gaussSquare.points),gaussSquare.weights)
print(int3_f4)
gaussSquare.setOrder(5)
int4_f4 = dot(f4(gaussSquare.points),gaussSquare.weights)
print(int4_f4)
npt.assert_almost_equal(int3_f4,int4_f4)
def test_gauss_cube4():
print("4th Order Polynomial")
print("Cube")
gaussCube.setOrder(1)
int0_f4 = dot(f4(gaussCube.points),gaussCube.weights)
print(int0_f4)
gaussCube.setOrder(2)
int1_f4 = dot(f4(gaussCube.points),gaussCube.weights)
print(int1_f4)
gaussCube.setOrder(3)
int2_f4 = dot(f4(gaussCube.points),gaussCube.weights)
print(int2_f4)
gaussCube.setOrder(4)
int3_f4 = dot(f4(gaussCube.points),gaussCube.weights)
print(int3_f4)
gaussCube.setOrder(5)
int4_f4 = dot(f4(gaussCube.points),gaussCube.weights)
print(int4_f4)
npt.assert_almost_equal(int3_f4,int4_f4)
def test_gauss_square3():
print("3rd Order Polynomial")
print("Square")
gaussSquare.setOrder(1)
int0_f3 = dot(f3(gaussSquare.points),gaussSquare.weights)
print(int0_f3)
gaussSquare.setOrder(2)
int1_f3 = dot(f3(gaussSquare.points),gaussSquare.weights)
print(int1_f3)
gaussSquare.setOrder(3)
int2_f3 = dot(f3(gaussSquare.points),gaussSquare.weights)
print(int2_f3)
gaussSquare.setOrder(4)
int3_f3 = dot(f3(gaussSquare.points),gaussSquare.weights)
print(int3_f3)
npt.assert_almost_equal(int2_f3,int3_f3)
def test_gauss_cube3():
print("3rd Order Polynomial")
print("Cube")
gaussCube.setOrder(1)
int0_f3 = dot(f3(gaussCube.points),gaussCube.weights)
print(int0_f3)
gaussCube.setOrder(2)
int1_f3 = dot(f3(gaussCube.points),gaussCube.weights)
print(int1_f3)
gaussCube.setOrder(3)
int2_f3 = dot(f3(gaussCube.points),gaussCube.weights)
print(int2_f3)
gaussCube.setOrder(4)
int3_f3 = dot(f3(gaussCube.points),gaussCube.weights)
print(int3_f3)
npt.assert_almost_equal(int2_f3,int3_f3)
def test_gauss_square2():
print("2nd Order Polynomial")
print("Square")
gaussSquare.setOrder(1)
int0_f2 = dot(f2(gaussSquare.points),gaussSquare.weights)
print(int0_f2)
gaussSquare.setOrder(2)
int1_f2 = dot(f2(gaussSquare.points),gaussSquare.weights)
print(int1_f2)
gaussSquare.setOrder(3)
int2_f2 = dot(f2(gaussSquare.points),gaussSquare.weights)
print(int2_f2)
npt.assert_almost_equal(int1_f2,int2_f2)
def test_gauss_cube2():
print("2nd Order Polynomial")
print("Cube")
gaussCube.setOrder(1)
int0_f2 = dot(f2(gaussCube.points),gaussCube.weights)
print(int0_f2)
gaussCube.setOrder(2)
int1_f2 = dot(f2(gaussCube.points),gaussCube.weights)
print(int1_f2)
gaussCube.setOrder(3)
int2_f2 = dot(f2(gaussCube.points),gaussCube.weights)
print(int2_f2)
npt.assert_almost_equal(int1_f2,int2_f2)
def test_gauss_square1():
print("1st Order Polynomial")
print("Square")
gaussSquare.setOrder(1)
int0_f1 = dot(f1(gaussSquare.points),gaussSquare.weights)
print(int0_f1)
gaussSquare.setOrder(2)
int1_f1 = dot(f1(gaussSquare.points),gaussSquare.weights)
print(int1_f1)
gaussSquare.setOrder(3)
int2_f1 = dot(f1(gaussSquare.points),gaussSquare.weights)
print(int1_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
def test_gauss_cube1():
print("1st Order Polynomial")
print("Cube")
gaussCube.setOrder(1)
int0_f1 = dot(f1(gaussCube.points),gaussCube.weights)
print(int0_f1)
gaussCube.setOrder(2)
int1_f1 = dot(f1(gaussCube.points),gaussCube.weights)
print(int1_f1)
gaussCube.setOrder(3)
int2_f1 = dot(f1(gaussCube.points),gaussCube.weights)
print(int2_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
def test_gauss_square0():
print("0th Order Polynomial")
print("Square")
gaussSquare.setOrder(1)
int0_f0 = dot(f0(gaussSquare.points),gaussSquare.weights)
print(int0_f0)
gaussSquare.setOrder(2)
int1_f0 = dot(f0(gaussSquare.points),gaussSquare.weights)
print(int1_f0)
gaussSquare.setOrder(3)
int2_f0 = dot(f0(gaussSquare.points),gaussSquare.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0,int1_f0)
npt.assert_almost_equal(int1_f0,int2_f0)
def test_gauss_cube0():
print("0th Order Polynomial")
print("Cube")
gaussCube.setOrder(1)
int0_f0 = dot(f0(gaussCube.points),gaussCube.weights)
print(int0_f0)
gaussCube.setOrder(2)
int1_f0 = dot(f0(gaussCube.points),gaussCube.weights)
print(int1_f0)
gaussCube.setOrder(3)
int2_f0 = dot(f0(gaussCube.points),gaussCube.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0,int1_f0)
npt.assert_almost_equal(int1_f0,int2_f0)
def test_compositeTrapezoidal_edge1():
print("1st Order Polynomial")
print("Edge")
compositeTrapezoidalEdge.setOrder(1)
int0_f1 = dot(f1(compositeTrapezoidalEdge.points),compositeTrapezoidalEdge.weights)
print(int0_f1)
compositeTrapezoidalEdge.setOrder(2)
int1_f1 = dot(f1(compositeTrapezoidalEdge.points),compositeTrapezoidalEdge.weights)
print(int1_f1)
compositeTrapezoidalEdge.setOrder(3)
int2_f1 = dot(f1(compositeTrapezoidalEdge.points),compositeTrapezoidalEdge.weights)
print(int2_f1)
compositeTrapezoidalEdge.setOrder(4)
int3_f1 = dot(f1(compositeTrapezoidalEdge.points),compositeTrapezoidalEdge.weights)
print(int3_f1)
compositeTrapezoidalEdge.setOrder(5)
int4_f1 = dot(f1(compositeTrapezoidalEdge.points),compositeTrapezoidalEdge.weights)
print(int4_f1)
npt.assert_almost_equal(int3_f1,int4_f1)
def test_faceBarycenter_edge1():
print("1st Order Polynomial")
print("Edge")
faceBarycenterEdge.setOrder(1)
int0_f1 = dot(f1(faceBarycenterEdge.points),faceBarycenterEdge.weights)
print(int0_f1)
faceBarycenterEdge.setOrder(2)
int1_f1 = dot(f1(faceBarycenterEdge.points),faceBarycenterEdge.weights)
print(int1_f1)
faceBarycenterEdge.setOrder(3)
int2_f1 = dot(f1(faceBarycenterEdge.points),faceBarycenterEdge.weights)
print(int2_f1)
faceBarycenterEdge.setOrder(4)
int3_f1 = dot(f1(faceBarycenterEdge.points),faceBarycenterEdge.weights)
print(int3_f1)
faceBarycenterEdge.setOrder(5)
int4_f1 = dot(f1(faceBarycenterEdge.points),faceBarycenterEdge.weights)
print(int4_f1)
npt.assert_almost_equal(int3_f1,int4_f1)
def test_compositeTrapezoidal_triangle1():
print("1st Order Polynomial")
print("Triangle")
compositeTrapezoidalTriangle.setOrder(1)
int0_f1 = dot(f1(compositeTrapezoidalTriangle.points),compositeTrapezoidalTriangle.weights)
print(sum(compositeTrapezoidalTriangle.weights))
print(int0_f1)
compositeTrapezoidalTriangle.setOrder(2)
int1_f1 = dot(f1(compositeTrapezoidalTriangle.points),compositeTrapezoidalTriangle.weights)
print(sum(compositeTrapezoidalTriangle.weights))
print(int1_f1)
compositeTrapezoidalTriangle.setOrder(3)
int2_f1 = dot(f1(compositeTrapezoidalTriangle.points),compositeTrapezoidalTriangle.weights)
print(sum(compositeTrapezoidalTriangle.weights))
print(int2_f1)
compositeTrapezoidalTriangle.setOrder(4)
int3_f1 = dot(f1(compositeTrapezoidalTriangle.points),compositeTrapezoidalTriangle.weights)
print(sum(compositeTrapezoidalTriangle.weights))
print(int3_f1)
compositeTrapezoidalTriangle.setOrder(5)
int4_f1 = dot(f1(compositeTrapezoidalTriangle.points),compositeTrapezoidalTriangle.weights)
print(sum(compositeTrapezoidalTriangle.weights))
print(int4_f1)
npt.assert_almost_equal(int3_f1,int4_f1)
def test_faceBarycenter_triangle1():
print("1st Order Polynomial")
print("Triangle")
faceBarycenterTriangle.setOrder(1)
int0_f1 = dot(f1(faceBarycenterTriangle.points),faceBarycenterTriangle.weights)
print(sum(faceBarycenterTriangle.weights))
print(int0_f1)
faceBarycenterTriangle.setOrder(2)
int1_f1 = dot(f1(faceBarycenterTriangle.points),faceBarycenterTriangle.weights)
print(sum(faceBarycenterTriangle.weights))
print(int1_f1)
faceBarycenterTriangle.setOrder(3)
int2_f1 = dot(f1(faceBarycenterTriangle.points),faceBarycenterTriangle.weights)
print(sum(faceBarycenterTriangle.weights))
print(int2_f1)
faceBarycenterTriangle.setOrder(4)
int3_f1 = dot(f1(faceBarycenterTriangle.points),faceBarycenterTriangle.weights)
print(sum(faceBarycenterTriangle.weights))
print(int3_f1)
faceBarycenterTriangle.setOrder(5)
int4_f1 = dot(f1(faceBarycenterTriangle.points),faceBarycenterTriangle.weights)
print(sum(faceBarycenterTriangle.weights))
print(int4_f1)
npt.assert_almost_equal(int3_f1,int4_f1)
lobattoPoint=SimplexLobattoQuadrature(nd=0, order=1)
lobattoEdge=SimplexLobattoQuadrature(nd=1, order=1)
lobattoEdgeAlt=LobattoEdgeAlt(order=1)
lobattoTriangle=SimplexLobattoQuadrature(nd=2, order=1)
lobattoTetrahedron=SimplexLobattoQuadrature(nd=3, order=1)
def test_lobatto_point():
lobattoPoint.setOrder(1)
int0_f4 = dot(f4(lobattoPoint.points),lobattoPoint.weights)
print(int0_f4)
lobattoPoint.setOrder(2)
int1_f4 = dot(f4(lobattoPoint.points),lobattoPoint.weights)
print(int1_f4)
assert(int0_f4 == int1_f4)
def test_lobatto_edge1():
print("1st Order Polynomial")
print("Edge")
lobattoEdge.setOrder(1)
int0_f1 = dot(f1(lobattoEdge.points),lobattoEdge.weights)
print(int0_f1)
lobattoEdge.setOrder(2)
int1_f1 = dot(f1(lobattoEdge.points),lobattoEdge.weights)
print(int1_f1)
lobattoEdge.setOrder(3)
int2_f1 = dot(f1(lobattoEdge.points),lobattoEdge.weights)
print(int1_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
def test_lobatto_edgeAlt1():
print("1st Order Polynomial")
print("Edge")
lobattoEdgeAlt.setOrder(1)
int0_f1 = dot(f1(lobattoEdgeAlt.points),lobattoEdgeAlt.weights)
print(int0_f1)
lobattoEdgeAlt.setOrder(2)
int1_f1 = dot(f1(lobattoEdgeAlt.points),lobattoEdgeAlt.weights)
print(int1_f1)
lobattoEdgeAlt.setOrder(3)
int2_f1 = dot(f1(lobattoEdgeAlt.points),lobattoEdgeAlt.weights)
print(int1_f1)
lobattoEdgeAlt.setOrder(4)
int3_f1 = dot(f1(lobattoEdgeAlt.points),lobattoEdgeAlt.weights)
print(int3_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
npt.assert_almost_equal(int3_f1,int3_f1)
def test_lobatto_tri1():
print("1st Order Polynomial")
print("Triangle")
lobattoTriangle.setOrder(1)
int0_f1 = dot(f1(lobattoTriangle.points),lobattoTriangle.weights)
print(int0_f1)
lobattoTriangle.setOrder(2)
int1_f1 = dot(f1(lobattoTriangle.points),lobattoTriangle.weights)
print(int1_f1)
lobattoTriangle.setOrder(3)
int2_f1 = dot(f1(lobattoTriangle.points),lobattoTriangle.weights)
print(int1_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
def test_lobatto_tet1():
print("1st Order Polynomial")
print("Tetrahedron")
lobattoTetrahedron.setOrder(1)
int0_f1 = dot(f1(lobattoTetrahedron.points),lobattoTetrahedron.weights)
print(int0_f1)
lobattoTetrahedron.setOrder(2)
int1_f1 = dot(f1(lobattoTetrahedron.points),lobattoTetrahedron.weights)
print(int1_f1)
lobattoTetrahedron.setOrder(3)
int2_f1 = dot(f1(lobattoTetrahedron.points),lobattoTetrahedron.weights)
print(int2_f1)
npt.assert_almost_equal(int0_f1,int1_f1)
npt.assert_almost_equal(int1_f1,int2_f1)
def test_lobatto_edge0():
print("0th Order Polynomial")
print("Edge")
lobattoEdge.setOrder(1)
int0_f0 = dot(f0(lobattoEdge.points),lobattoEdge.weights)
print(int0_f0)
lobattoEdge.setOrder(2)
int1_f0 = dot(f0(lobattoEdge.points),lobattoEdge.weights)
print(int1_f0)
lobattoEdge.setOrder(3)
int2_f0 = dot(f0(lobattoEdge.points),lobattoEdge.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0,int1_f0)
npt.assert_almost_equal(int1_f0,int2_f0)
def test_lobatto_tri0():
print("0th Order Polynomial")
print("Triangle")
lobattoTriangle.setOrder(1)
int0_f0 = dot(f0(lobattoTriangle.points),lobattoTriangle.weights)
print(int0_f0)
lobattoTriangle.setOrder(2)
int1_f0 = dot(f0(lobattoTriangle.points),lobattoTriangle.weights)
print(int1_f0)
lobattoTriangle.setOrder(3)
int2_f0 = dot(f0(lobattoTriangle.points),lobattoTriangle.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0,int1_f0)
npt.assert_almost_equal(int1_f0,int2_f0)
def test_lobatto_tet0():
print("0th Order Polynomial")
print("Tetrahedron")
lobattoTetrahedron.setOrder(1)
int0_f0 = dot(f0(lobattoTetrahedron.points),lobattoTetrahedron.weights)
print(int0_f0)
lobattoTetrahedron.setOrder(2)
int1_f0 = dot(f0(lobattoTetrahedron.points),lobattoTetrahedron.weights)
print(int1_f0)
lobattoTetrahedron.setOrder(3)
int2_f0 = dot(f0(lobattoTetrahedron.points),lobattoTetrahedron.weights)
print(int2_f0)
npt.assert_almost_equal(int0_f0,int1_f0)
npt.assert_almost_equal(int1_f0,int2_f0)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
import numpy as np
class Node(object):
'''
A base class for a node object.
Each node has:
- a name and description
- properties
- incoming and outgoing data
'''
def __init__(self, name):
self._name = name
self._props = {}
self._data = self._props
self._inputs = {}
self._outputs = {}
#TODO - kwargs -> data -> methods
#TODO - set node type based on class name
def name(self):
return self._name
def inputs(self):
return self._inputs
def setInput(self, inputName, newInput):
#TODO - exception class
self._inputs[inputName] = newInput
def outputs(self):
return self._outputs
def setOutput(self, outputName, newOutput):
self._outputs[outputName] = newOutput
def data(self):
return self._data
def refresh(self, parentData):
self._data = self._props
#--------------- Viewer nodes ------------------#
class Node_Info(Node):
'''
Prints information about incoming nodes.
'''
def __init__(self, name):
super(Node_Info, self).__init__(name)
def refresh(self, parentData):
for inputName in self._inputs.keys():
print "------------------------"
print "Nodegraph Results:"
print self._inputs[inputName].name() + ":"
print self._inputs[inputName].data()
print "------------------------"
#--------------- Math nodes ------------------#
class Node_Integer(Node):
'''
Integers
'''
def __init__(self, name, value=1):
super(Node_Integer, self).__init__(name)
self._props = {"value": value}
self._data = self._props
class Node_Math(Node):
'''
Basic math node. "plus" "minus" "multipy" "divide"
'''
def __init__(self, name, operation="plus"):
super(Node_Math, self).__init__(name)
self._operation = operation
def refresh(self, parentData):
result = 0
for inVal in parentData:
if self._operation == "plus":
try:
result += parentData[inVal]["value"]
except KeyError:
#TODO - graceful error checking
raise
self._data["value"] = result
#--------------- Layout nodes ------------------#
class Node_Camera(Node):
'''
Camera position.
'''
def __init__(self, name, pos=(0, 0, 1), fov=45):
super(Node_Camera, self).__init__(name)
self._props = {"position": pos,
"fov": fov}
class Node_Vector_Screen(Node):
'''
Vector in screen space.
'''
def __init__(self, name, pos=(0, 0, 1, 1)):
super(Node_Vector_Screen, self).__init__(name)
self._props = {"position": pos}
class Node_Screen_to_Layout(Node):
'''
Project the 2D screen coordinates into 3D space.
'''
def __init__(self, name):
super(Node_Screen_to_Layout, self).__init__(name)
def refresh(self, parentData):
#TODO: write this shit
screen_position = parentData["obj"]["position"]
cam_pos = parentData["cam"]["position"]
cam_fov = parentData["cam"]["fov"]
result = screen_position
self._data = result
class Node_Vector_CamXYZ(Node):
'''
Just a point in space for debugging purposes.
'''
def __init__(self, name, pos=((0, 0, 0), (1, 1, 1))):
super(Node_Vector_CamXYZ, self).__init__(name)
self._props = {"position": pos}
class Node_Layout_to_Screen(Node):
'''
Project the XYZ coordinates into 2D screen space.
'''
def __init__(self, name):
super(Node_Layout_to_Screen, self).__init__(name)
def refresh(self, parentData):
#TODO: write this shit
layout_pos = parentData["obj"]["position"]
cam_pos = parentData["cam"]["position"]
cam_fov = parentData["cam"]["fov"]
result = layout_position
self._data = result
#--------------- Color nodes ------------------#
class Node_Constant_Color(Node):
'''
A constant color image plane.
'''
def __init__(self, name, color=(255, 255, 255, 255), resolution=(10, 10)):
super(Node_Constant_Color, self).__init__(name)
self._props["framebuffer"] = {}
#Store the framebuffer in a 2D numpy array for each channel
pixelData = np.empty(resolution)
pixelData.fill(color)
self._props["framebuffer"]["pixels"] = pixelData
self._props["framebuffer"]["resolution"] = resolution
class Node_Read_Image(Node):
def __init__(self, name, file=''):
super(Node_Read_Image, self).__init__(name)
import Image
img = Image.open(file)
print img.info
self._props["framebuffer"] = {}
self._props["framebuffer"]["resolution"] = img.size
print img.getbands()
if img.getbands() == ("R", "G", "B"):
img.putalpha(0)
self._props["framebuffer"]["pixels"] = np.array(img)
class Node_Composite(Node):
'''
Perform compositing operations.
TODO - operation should be a mode function pass in, e.g. nodegraph.add, etc
'''
def __init__(self, name, operation="add"):
super(Node_Composite, self).__init__(name)
def refresh(self, parentData):
self._props["framebuffer"] = {}
for inputName, data in sorted(parentData.iteritems()):
print inputName
framebuffer = data["framebuffer"]
self._props["framebuffer"]["resolution"] = framebuffer["resolution"]
if "pixels" in self._props["framebuffer"]:
print
self._props["framebuffer"]["pixels"] = np.add(self._props["framebuffer"]["pixels"], framebuffer["pixels"])
else:
self._props["framebuffer"]["pixels"] = framebuffer["pixels"]
class Node_View_Image(Node):
'''
View the image.
'''
def __init__(self, name):
super(Node_View_Image, self).__init__(name)
def refresh(self, parentData):
if "framebuffer" in parentData["in"].keys():
inFramebuffer = parentData["in"]["framebuffer"]
resolution = inFramebuffer["resolution"]
from PIL import Image
img = Image.fromarray(inFramebuffer["pixels"])
img.show()
# import OpenEXR
# import Imath
# pixelsR = inFramebuffer["red"].astype(np.float16).tostring()
# pixelsG = inFramebuffer["green"].astype(np.float16).tostring()
# pixelsB = inFramebuffer["blue"].astype(np.float16).tostring()
# HEADER = OpenEXR.Header(resolution[1], resolution[0])
# half_chan = Imath.Channel(Imath.PixelType(Imath.PixelType.HALF))
# HEADER['channels'] = dict([(c, half_chan) for c in "RGB"])
# exr = OpenEXR.OutputFile("out.exr", HEADER)
# exr.writePixels({'R': pixelsR, 'G': pixelsG, 'B': pixelsB})
# exr.close()
| |
# -*- coding: utf-8 -*-
""" PolymorphicModel Meta Class
Please see README.rst or DOCS.rst or
http://chrisglass.github.com/django_polymorphic/
"""
from __future__ import absolute_import
import sys
import inspect
import django
from django.db import models
from django.db.models.base import ModelBase
from django.db.models.manager import ManagerDescriptor
from .manager import PolymorphicManager
from .query import PolymorphicQuerySet
# PolymorphicQuerySet Q objects (and filter()) support these additional key
# words.
# These are forbidden as field names (a descriptive exception is raised)
POLYMORPHIC_SPECIAL_Q_KWORDS = ['instance_of', 'not_instance_of']
try:
from django.db.models.manager import AbstractManagerDescriptor # dj15
except ImportError:
AbstractManagerDescriptor = None
class PolymorphicModelBase(ModelBase):
"""
Manager inheritance is a pretty complex topic which may need
more thought regarding how this should be handled for polymorphic
models.
In any case, we probably should propagate 'objects' and 'base_objects'
from PolymorphicModel to every subclass. We also want to somehow
inherit/propagate _default_manager as well, as it needs to be polymorphic.
The current implementation below is an experiment to solve this
problem with a very simplistic approach: We unconditionally
inherit/propagate any and all managers (using _copy_to_model),
as long as they are defined on polymorphic models
(the others are left alone).
Like Django ModelBase, we special-case _default_manager:
if there are any user-defined managers, it is set to the first of these.
We also require that _default_manager as well as any user defined
polymorphic managers produce querysets that are derived from
PolymorphicQuerySet.
"""
def __new__(self, model_name, bases, attrs):
# print; print '###', model_name, '- bases:', bases
# Workaround compatibility issue with six.with_metaclass() and custom
# Django model metaclasses:
if not attrs and model_name == 'NewBase':
if django.VERSION < (1, 5):
# Let Django fully ignore the class which is inserted in
# between. Django 1.5 fixed this, see
# https://code.djangoproject.com/ticket/19688
attrs['__module__'] = 'django.utils.six'
attrs['Meta'] = type('Meta', (), {'abstract': True})
return super(PolymorphicModelBase, self).__new__(
self, model_name, bases, attrs
)
# create new model
new_class = self.call_superclass_new_method(model_name, bases, attrs)
# check if the model fields are all allowed
self.validate_model_fields(new_class)
# create list of all managers to be inherited from the base classes
inherited_managers = new_class.get_inherited_managers(attrs)
# add the managers to the new model
for source_name, mgr_name, manager in inherited_managers:
new_manager = manager._copy_to_model(new_class)
if mgr_name == '_default_manager':
new_class._default_manager = new_manager
else:
new_class.add_to_class(mgr_name, new_manager)
# get first user defined manager; if there is one, make it the
# _default_manager
# this value is used by the related objects, restoring access to custom
# queryset methods on related objects.
user_manager = self.get_first_user_defined_manager(new_class)
if user_manager:
new_class._default_manager = user_manager._copy_to_model(new_class)
# the default mgr was defined by the user, not inherited
new_class._default_manager._inherited = False
# validate resulting default manager
self.validate_model_manager(
new_class._default_manager, model_name, '_default_manager'
)
# for __init__ function of this class (monkeypatching inheritance
# accessors)
new_class.polymorphic_super_sub_accessors_replaced = False
# determine the name of the primary key field and store it into the
# class variable polymorphic_primary_key_name (it is needed by
# query.py)
for f in new_class._meta.fields:
if f.primary_key and type(f) != models.OneToOneField:
new_class.polymorphic_primary_key_name = f.name
break
return new_class
def get_inherited_managers(self, attrs):
"""
Return list of all managers to be inherited/propagated from the base
classes; use correct mro, only use managers with _inherited==False
(they are of no use), skip managers that are overwritten by the user
with same-named class attributes (in attrs)
"""
add_managers = []
add_managers_keys = set()
for base in self.__mro__[1:]:
if not issubclass(base, models.Model):
continue
if not getattr(base, 'polymorphic_model_marker', None):
continue # leave managers of non-polym. models alone
for key, manager in base.__dict__.items():
if type(manager) == models.manager.ManagerDescriptor:
manager = manager.manager
if AbstractManagerDescriptor is not None:
# Django 1.4 unconditionally assigned managers to a model.
# As of Django 1.5 however, the abstract models don't get
# any managers, only a AbstractManagerDescriptor as
# substitute.
# Pretend that the manager is still there, so all code
# works like it used to.
if type(manager) == AbstractManagerDescriptor and \
base.__name__ == 'PolymorphicModel':
model = manager.model
if key == 'objects':
manager = PolymorphicManager()
manager.model = model
elif key == 'base_objects':
manager = models.Manager()
manager.model = model
if not isinstance(manager, models.Manager):
continue
if key == '_base_manager':
continue # let Django handle _base_manager
if key in attrs:
continue
if key in add_managers_keys:
continue # manager with that name already added, skip
if manager._inherited:
# inherited managers (on the bases) have no significance,
# they are just copies:
continue
# validate any inherited polymorphic managers:
if isinstance(manager, PolymorphicManager):
self.validate_model_manager(manager, self.__name__, key)
add_managers.append((base.__name__, key, manager))
add_managers_keys.add(key)
# The ordering in the base.__dict__ may randomly change depending on
# which method is added.
# Make sure base_objects is on top, and 'objects' and
# '_default_manager' follow afterwards.
# This makes sure that the _base_manager is also assigned properly.
add_managers = sorted(
add_managers, key=lambda item: (item[1].startswith('_'), item[1])
)
return add_managers
@classmethod
def get_first_user_defined_manager(mcs, new_class):
# See if there is a manager attribute directly stored at this
# inheritance level.
mgr_list = []
for key, val in new_class.__dict__.items():
if isinstance(val, ManagerDescriptor):
val = val.manager
if not isinstance(val, PolymorphicManager) or \
type(val) is PolymorphicManager:
continue
mgr_list.append((val.creation_counter, key, val))
# if there are user defined managers, use first one as _default_manager
if mgr_list:
_, manager_name, manager = sorted(mgr_list)[0]
return manager
return None
@classmethod
def call_superclass_new_method(self, model_name, bases, attrs):
"""call __new__ method of super class and return the newly created class.
Also work around a limitation in Django's ModelBase."""
# There seems to be a general limitation in Django's app_label handling
# regarding abstract models (in ModelBase). See issue 1 on github -
# TODO: propose patch for Django We run into this problem if
# polymorphic.py is located in a top-level directory which is directly
# in the python path. To work around this we temporarily set app_label
# here for PolymorphicModel.
meta = attrs.get('Meta', None)
do_app_label_workaround = (
meta and
attrs['__module__'] == 'polymorphic' and
model_name == 'PolymorphicModel' and
getattr(meta, 'app_label', None) is None
)
if do_app_label_workaround:
meta.app_label = 'poly_dummy_app_label'
new_class = super(PolymorphicModelBase, self).__new__(
self, model_name, bases, attrs
)
if do_app_label_workaround:
del(meta.app_label)
return new_class
@classmethod
def validate_model_fields(self, new_class):
"""check if all fields names are allowed (i.e. not in
POLYMORPHIC_SPECIAL_Q_KWORDS)"""
for f in new_class._meta.fields:
if f.name in POLYMORPHIC_SPECIAL_Q_KWORDS:
e = 'PolymorphicModel: "%s" - field name "%s"' + \
'is not allowed in polymorphic models'
raise AssertionError(e % (new_class.__name__, f.name))
@classmethod
def validate_model_manager(self, manager, model_name, manager_name):
"""check if the manager is derived from PolymorphicManager
and its querysets from PolymorphicQuerySet - throw AssertionError if
not"""
if not issubclass(type(manager), PolymorphicManager):
e = 'PolymorphicModel: "' + model_name + '.' + manager_name + \
'" manager is of type "' + type(manager).__name__
e += '", but must be a subclass of PolymorphicManager'
raise AssertionError(e)
if not getattr(manager, 'queryset_class', None) or \
not issubclass(manager.queryset_class, PolymorphicQuerySet):
e = 'PolymorphicModel: "' + model_name + '.' + manager_name + \
'" (PolymorphicManager) has been instantiated with a ' + \
'queryset class which is'
e += ' not a subclass of PolymorphicQuerySet (which is required)'
raise AssertionError(e)
return manager
# hack: a small patch to Django would be a better solution. Django's
# management command 'dumpdata' relies on non-polymorphic behaviour of the
# _default_manager. Therefore, we catch any access to _default_manager here
# and return the non-polymorphic default manager instead if we are called
# from 'dumpdata.py' Otherwise, the base objects will be upcasted to
# polymorphic models, and be outputted as such. (non-polymorphic default
# manager is 'base_objects' for polymorphic models). This way we don't
# need to patch django.core.management.commands.dumpdata for all supported
# Django versions.
if len(sys.argv) > 1 and sys.argv[1] == 'dumpdata':
# manage.py dumpdata is running
def __getattribute__(self, name):
if name == '_default_manager':
# frm[1] is caller file name, frm[3] is caller function name
frm = inspect.stack()[1]
if 'django/core/management/commands/dumpdata.py' in frm[1]:
return self.base_objects
return super(PolymorphicModelBase, self).__getattribute__(name)
# TODO: investigate Django how this can be avoided
| |
# coding: utf-8
import pytest # noqa: 401
from mock import mock, patch
from sitebackup import main
from backup.utils.mail import Sender, Recipient
def testHelp():
with pytest.raises(SystemExit) as exceptioninfo:
main(["-h", ])
assert exceptioninfo.value.code == 0
def setupWP(mock):
m = mock()
mock.reset_mock()
return m
@patch('sitebackup.os.path.isdir', return_value=True)
@patch('sitebackup.Mailer')
@patch('sitebackup.WP')
@patch('sitebackup.Backup')
def testWithNoArguments(patchedBackup, patchedWP, patchedMailer, *args):
mailer = patchedMailer()
wp = setupWP(patchedWP)
bup = patchedBackup()
main(["path_for_test_with_no_arguments"])
patchedWP.assert_called_once_with(
'path_for_test_with_no_arguments',
dbhost=None,
dbname=None,
dbpass=None,
dbport=None,
dbprefix=None,
dbuser=None
)
# calls to backup
patchedBackup.assert_called_with(wp, mailer=mailer, quiet=False)
bup.execute.assert_called_once_with(
targets=[],
database=False,
filesystem=False,
thinning=None,
attic=None,
dry=False
)
@patch('sitebackup.os.path.isdir', return_value=True)
@patch('sitebackup.Mailer')
@patch('sitebackup.WP')
@patch('sitebackup.Backup')
def testWithArguments(patchedBackup, patchedWP, patchedMailer, *args):
mailer = patchedMailer()
wp = setupWP(patchedWP)
wp.email = "michael@localhost"
bup = patchedBackup()
# test 1: overwrite database configuration
main([
"--db=wpdb",
"--dbhost=localhost",
"--dbuser=michael",
"--dbpass=123456",
"--dbprefix=wp",
"path_for_test_with_db_arguments"
])
# configuration should be taken from arguments
patchedWP.assert_called_once_with(
'path_for_test_with_db_arguments',
dbhost='localhost',
dbname='wpdb',
dbpass='123456',
dbport=None,
dbprefix='wp',
dbuser='michael'
)
# calls to backup
patchedBackup.assert_called_with(wp, mailer=mailer, quiet=False)
bup.execute.assert_called_with(
targets=[],
database=False,
filesystem=False,
thinning=None,
attic=None,
dry=False
)
# test 2: configure mail reporting
main([
"-q",
"--mail-to-admin",
"--mail-from=admin@localhost",
"--mail-to=example@localhost",
"."
])
# configuration should be taken from arguments
mailer.setSender.assert_called_with(
Sender("admin@localhost")
)
assert[
mock.call(Recipient("michael@localhost")),
mock.call(Recipient("example@localhost"))
] == mailer.addRecipient.mock_calls
# calls to backup
patchedBackup.assert_called_with(wp, mailer=mailer, quiet=True)
bup.execute.assert_called_with(
targets=[],
database=False,
filesystem=False,
thinning=None,
attic=None,
dry=False
)
# test 3: switch on database processing and configure attic with no parameter
main([
"--database",
"--attic",
"--",
"."
])
# calls to backup
patchedBackup.assert_called_with(wp, mailer=mailer, quiet=False
)
bup.execute.assert_called_with(
targets=[],
database=True,
filesystem=False,
thinning=None,
attic=".",
dry=False
)
# test 4: switch on filesystem processing and configure attic with parameter
main([
"--filesystem",
"--attic=path_to_attic",
"."
])
# calls to backup
patchedBackup.assert_called_with(wp, mailer=mailer, quiet=False
)
bup.execute.assert_called_with(
targets=[],
database=False,
filesystem=True,
thinning=None,
attic="path_to_attic",
dry=False
)
@patch('sitebackup.os.path.isdir', return_value=True)
@patch('sitebackup.WP')
@patch('sitebackup.S3')
@patch('sitebackup.Backup')
def testWithS3Arguments(patchedBackup, patchedS3, patchedWP, *args):
wp = setupWP(patchedWP)
wp.slug = "wordpress-instance-to-backup"
s3 = patchedS3()
bup = patchedBackup()
# test: configure s3 targets with bucket
main([
"--s3=s3.host.com",
"--s3accesskey=ABCDEF",
"--s3secretkey=000000",
"--s3bucket=bucket",
"."
])
patchedS3.assert_called_with(
"s3.host.com", "ABCDEF", "000000", "bucket"
)
bup.execute.assert_called_once_with(
targets=[s3],
database=False,
filesystem=False,
thinning=None,
attic=None,
dry=False
)
# test: configure s3 target with no bucket (bucket should be wp.slug)
main([
"--s3=s3.host.com",
"--s3accesskey=ABCDEF",
"--s3secretkey=000000",
"."
])
patchedS3.assert_called_with(
"s3.host.com", "ABCDEF", "000000", "wordpress-instance-to-backup"
)
bup.execute.assert_called_with(
targets=[s3],
database=False,
filesystem=False,
thinning=None,
attic=None,
dry=False
)
| |
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from u2flib_server.jsapi import (RegisterRequest, RegisterResponse,
SignRequest, SignResponse, DeviceRegistration)
from u2flib_server.utils import (certificate_from_der, pub_key_from_der,
subject_from_certificate, websafe_decode,
websafe_encode, rand_bytes,
verify_ecdsa_signature)
from u2flib_server.yubicommon.compat import byte2int
import codecs
import struct
from cryptography.hazmat.primitives.serialization import Encoding
__all__ = [
'start_register',
'complete_register',
'start_authenticate',
'verify_authenticate'
]
VERSION = 'U2F_V2'
FIXSIG = [
'CN=Yubico U2F EE Serial 776137165',
'CN=Yubico U2F EE Serial 1086591525',
'CN=Yubico U2F EE Serial 1973679733',
'CN=Yubico U2F EE Serial 13503277888',
'CN=Yubico U2F EE Serial 13831167861',
'CN=Yubico U2F EE Serial 14803321578'
]
class RawRegistrationResponse(object):
"""
Object representing a raw registration response.
registrationData = 0x05, pubkey, kh_len, key_handle, cert, signature
"""
PUBKEY_LEN = 65
def __init__(self, app_param, chal_param, data):
self.app_param = app_param
self.chal_param = chal_param
self.data = data
if byte2int(data[0]) != 0x05:
raise ValueError("Invalid data: %r" % (data,))
data = data[1:]
self.pub_key = data[:self.PUBKEY_LEN]
data = data[self.PUBKEY_LEN:]
kh_len = byte2int(data[0])
data = data[1:]
self.key_handle = data[:kh_len]
data = data[kh_len:]
self.certificate = self._fixsig(certificate_from_der(data))
self.signature = data[len(self.certificate.public_bytes(Encoding.DER)):]
def __str__(self):
# N.B. Ensure this returns a str() on both Python 2 and Python 3
hex_bytes = codecs.encode(self.data, 'hex_codec')
hex_text = codecs.decode(hex_bytes, 'ascii')
return str(hex_text)
def verify_csr_signature(self):
data = (b'\x00' + self.app_param + self.chal_param +
self.key_handle + self.pub_key)
pub_key = self.certificate.public_key()
verify_ecdsa_signature(data, pub_key, self.signature)
def _fixsig(self, cert):
subject = 'CN=' + subject_from_certificate(cert)
if subject in FIXSIG: # Set unused bits in signature to 0
der = list(cert.public_bytes(Encoding.DER))
der[-257] = b'\x00'
cert = certificate_from_der(der)
return cert
def serialize(self):
return websafe_encode(self.app_param + self.chal_param + self.data)
@classmethod
def deserialize(cls, serialized):
data = websafe_decode(serialized)
return cls(data[:32], data[32:64], data[64:])
class RawAuthenticationResponse(object):
"""
Object representing a raw authentication response.
authenticationData = touch, counter, signature
"""
def __init__(self, app_param, chal_param, data):
self.app_param = app_param
self.chal_param = chal_param
self.data = data
self.user_presence = data[0:1]
self.counter = data[1:5]
self.counter_int = struct.unpack('>I', self.counter)[0]
self.signature = data[5:]
def __str__(self):
# N.B. Ensure this returns a str() on both Python 2 and Python 3
hex_bytes = codecs.encode(self.data, 'hex_codec')
hex_text = codecs.decode(hex_bytes, 'ascii')
return str(hex_text)
def verify_signature(self, pubkey):
data = (self.app_param + self.user_presence + self.counter +
self.chal_param)
pub_key = pub_key_from_der(pubkey)
verify_ecdsa_signature(data, pub_key, self.signature)
def serialize(self):
return websafe_encode(self.app_param + self.chal_param + self.data)
@classmethod
def deserialize(cls, serialized):
data = websafe_decode(serialized)
return cls(data[:32], data[32:64], data[64:])
def _validate_client_data(client_data, challenge, typ, valid_facets):
"""
Validate the client data.
clientData = {
"typ": string,
"challenge": string, //b64 encoded challenge.
"origin": string, //Facet used
}
"""
if client_data.typ != typ:
raise ValueError("Wrong type! Was: %r, expecting: %r" % (
client_data.typ, typ))
if challenge != client_data.challenge:
raise ValueError("Wrong challenge! Was: %r, expecting: %r" % (
client_data.challenge, challenge))
if valid_facets is not None and client_data.origin not in valid_facets:
raise ValueError("Invalid facet! Was: %r, expecting one of: %r" % (
client_data.origin, valid_facets))
def start_register(app_id, challenge=None):
if challenge is None:
challenge = rand_bytes(32)
return RegisterRequest(
version=VERSION,
appId=app_id,
challenge=websafe_encode(challenge)
)
def complete_register(request, response, valid_facets=None):
request = RegisterRequest.wrap(request)
response = RegisterResponse.wrap(response)
_validate_client_data(response.clientData, request.challenge,
"navigator.id.finishEnrollment", valid_facets)
raw_response = RawRegistrationResponse(
request.appParam,
response.clientParam,
response.registrationData
)
raw_response.verify_csr_signature()
return DeviceRegistration(
appId=request.appId,
keyHandle=websafe_encode(raw_response.key_handle),
publicKey=websafe_encode(raw_response.pub_key)
), raw_response.certificate
def start_authenticate(device, challenge=None):
device = DeviceRegistration.wrap(device)
if challenge is None:
challenge = rand_bytes(32)
return SignRequest(
version=VERSION,
appId=device.appId,
keyHandle=device.keyHandle,
challenge=websafe_encode(challenge)
)
def verify_authenticate(device, request, response, valid_facets=None):
device = DeviceRegistration.wrap(device)
request = SignRequest.wrap(request)
response = SignResponse.wrap(response)
_validate_client_data(response.clientData, request.challenge,
"navigator.id.getAssertion", valid_facets)
raw_response = RawAuthenticationResponse(
device.appParam,
response.clientParam,
response.signatureData
)
raw_response.verify_signature(websafe_decode(device.publicKey))
return raw_response.counter_int, raw_response.user_presence
| |
r"""
Arithmatex.
pymdownx.arithmatex
Extension that preserves the following for MathJax use:
~~~.tex
$Equation$, \(Equation\)
$$
Display Equations
$$
\[
Display Equations
\]
\begin{align}
Display Equations
\end{align}
~~~
and `$Inline MathJax Equations$`
Inline and display equations are converted to scripts tags. You can optionally generate previews.
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.inlinepatterns import Pattern
from markdown.blockprocessors import BlockProcessor
from markdown import util as md_util
from . import util
import re
from .util import PymdownxDeprecationWarning
import warnings
DEPRECATION_WARN = """'insert_as_script' is deprecated and is now unnecessary if using MathJax
as it is the default format for MathJax. If you wish to use a generic math output, use 'generic'.
Please discontinue using this option as it will be removed in the future.
See documentation to see why this have been deprecated."""
RE_SMART_DOLLAR_INLINE = r'(?:(?<!\\)((?:\\{2})+)(?=\$)|(?<!\\)(\$)(?!\s)((?:\\.|[^\$])+?)(?<!\s)(?:\$))'
RE_DOLLAR_INLINE = r'(?:(?<!\\)((?:\\{2})+)(?=\$)|(?<!\\)(\$)((?:\\.|[^\$])+?)(?:\$))'
RE_BRACKET_INLINE = r'(?:(?<!\\)((?:\\{2})+?)(?=\\\()|(?<!\\)(\\\()((?:\\[^)]|[^\\])+?)(?:\\\)))'
RE_DOLLAR_BLOCK = r'(?P<dollar>[$]{2})(?P<math>.+?)(?P=dollar)'
RE_TEX_BLOCK = r'(?P<math2>\\begin\{(?P<env>[a-z]+\*?)\}.+?\\end\{(?P=env)\})'
RE_BRACKET_BLOCK = r'\\\[(?P<math3>(?:\\[^\]]|[^\\])+?)\\\]'
class InlineArithmatexPattern(Pattern):
"""Arithmatex inline pattern handler."""
ESCAPED_BSLASH = '%s%s%s' % (md_util.STX, ord('\\'), md_util.ETX)
def __init__(self, pattern, config):
"""Initialize."""
# Generic setup
self.generic = config.get('generic', False)
wrap = config.get('tex_inline_wrap', ["\\(", "\\)"])
self.wrap = wrap[0] + '%s' + wrap[1]
# Default setup
self.preview = config.get('preview', True)
Pattern.__init__(self, pattern)
def mathjax_output(self, math):
"""Default MathJax output."""
if self.preview:
el = md_util.etree.Element('span')
preview = md_util.etree.SubElement(el, 'span', {'class': 'MathJax_Preview'})
preview.text = md_util.AtomicString(math)
script = md_util.etree.SubElement(el, 'script', {'type': 'math/tex'})
script.text = md_util.AtomicString(math)
else:
el = md_util.etree.Element('script', {'type': 'math/tex'})
el.text = md_util.AtomicString(math)
return el
def generic_output(self, math):
"""Generic output."""
el = md_util.etree.Element('span', {'class': 'arithmatex'})
el.text = md_util.AtomicString(self.wrap % math)
return el
def handleMatch(self, m):
"""Handle notations and switch them to something that will be more detectable in HTML."""
# Handle escapes
escapes = m.group(2)
if not escapes:
escapes = m.group(5)
if escapes:
return escapes.replace('\\\\', self.ESCAPED_BSLASH)
# Handle Tex
math = m.group(4)
if not math:
math = m.group(7)
return self.generic_output(math) if self.generic else self.mathjax_output(math)
class BlockArithmatexProcessor(BlockProcessor):
"""MathJax block processor to find $$MathJax$$ content."""
def __init__(self, pattern, config, md):
"""Initialize."""
# Generic setup
self.generic = config.get('generic', False)
wrap = config.get('tex_block_wrap', ['\\[', '\\]'])
self.wrap = wrap[0] + '%s' + wrap[1]
# Default setup
self.preview = config.get('preview', False)
self.match = None
self.pattern = re.compile(pattern)
BlockProcessor.__init__(self, md.parser)
def test(self, parent, block):
"""Return 'True' for future Python Markdown block compatibility."""
self.match = self.pattern.match(block) if self.pattern is not None else None
return self.match is not None
def mathjax_output(self, parent, math):
"""Default MathJax output."""
if self.preview:
grandparent = parent
parent = md_util.etree.SubElement(grandparent, 'div')
preview = md_util.etree.SubElement(parent, 'div', {'class': 'MathJax_Preview'})
preview.text = md_util.AtomicString(math)
el = md_util.etree.SubElement(parent, 'script', {'type': 'math/tex; mode=display'})
el.text = md_util.AtomicString(math)
def generic_output(self, parent, math):
"""Generic output."""
el = md_util.etree.SubElement(parent, 'div', {'class': 'arithmatex'})
el.text = md_util.AtomicString(self.wrap % math)
def run(self, parent, blocks):
"""Find and handle block content."""
blocks.pop(0)
math = self.match.group('math')
if not math:
math = self.match.group('math2')
if not math:
math = self.match.group('math3')
if self.generic:
self.generic_output(parent, math)
else:
self.mathjax_output(parent, math)
return True
class ArithmatexExtension(Extension):
"""Adds delete extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'tex_inline_wrap': [
["\\(", "\\)"],
"Wrap inline content with the provided text ['open', 'close'] - Default: ['', '']"
],
'tex_block_wrap': [
["\\[", "\\]"],
"Wrap blick content with the provided text ['open', 'close'] - Default: ['', '']"
],
"smart_dollar": [True, "Use Arithmatex's smart dollars - Default True"],
"block_syntax": [
['dollar', 'square', 'begin'],
'Enable block syntax: "dollar" ($$...$$), "square" (\\[...\\]), and '
'"begin" (\\begin{env}...\\end{env}). - Default: ["dollar", "square", "begin"]'
],
"inline_syntax": [
['dollar', 'round'],
'Enable block syntax: "dollar" ($$...$$), "bracket" (\\(...\\)) '
' - Default: ["dollar", "round"]'
],
'generic': [False, "Output in a generic format for non MathJax libraries - Default: False"],
'insert_as_script': [False, "Deprecated"],
'preview': [
True,
"Insert a preview for scripts. - Default: False"
]
}
super(ArithmatexExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
"""Extend the inline and block processor objects."""
md.registerExtension(self)
util.escape_chars(md, ['$'])
config = self.getConfigs()
if config.get('insert_as_script'): # pragma: no cover
warnings.warn(DEPRECATION_WARN, PymdownxDeprecationWarning)
# Inline patterns
allowed_inline = set(config.get('inline_syntax', ['dollar', 'round']))
smart_dollar = config.get('smart_dollar', True)
inline_patterns = []
if 'dollar' in allowed_inline:
inline_patterns.append(RE_SMART_DOLLAR_INLINE if smart_dollar else RE_DOLLAR_INLINE)
if 'round' in allowed_inline:
inline_patterns.append(RE_BRACKET_INLINE)
if inline_patterns:
inline = InlineArithmatexPattern('(?:%s)' % '|'.join(inline_patterns), config)
md.inlinePatterns.add("arithmatex-inline", inline, ">backtick")
# Block patterns
allowed_block = set(config.get('block_syntax', ['dollar', 'square', 'begin']))
block_pattern = []
if 'dollar' in allowed_block:
block_pattern.append(RE_DOLLAR_BLOCK)
if 'square' in allowed_block:
block_pattern.append(RE_BRACKET_BLOCK)
if 'begin' in allowed_block:
block_pattern.append(RE_TEX_BLOCK)
if block_pattern:
block = BlockArithmatexProcessor(r'(?s)^(?:%s)[ ]*$' % '|'.join(block_pattern), config, md)
md.parser.blockprocessors.add('arithmatex-block', block, "<code")
def makeExtension(*args, **kwargs):
"""Return extension."""
return ArithmatexExtension(*args, **kwargs)
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Renderers for various kinds of annotations that can be added to
Bokeh plots
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.enums import (
AngleUnits,
Dimension,
FontStyle,
LegendClickPolicy,
LegendLocation,
Orientation,
RenderMode,
SpatialUnits,
TextAlign,
TooltipAttachment,
VerticalAlign,
)
from ..core.has_props import abstract
from ..core.properties import (
Angle,
AngleSpec,
Auto,
Bool,
ColorSpec,
Datetime,
Dict,
Either,
Enum,
Float,
FontSizeSpec,
Include,
Instance,
Int,
List,
NumberSpec,
Override,
PropertyUnitsSpec,
Seq,
String,
StringSpec,
Tuple,
value,
)
from ..core.property_mixins import (
FillProps,
LineProps,
ScalarFillProps,
ScalarLineProps,
ScalarTextProps,
TextProps,
)
from ..core.validation import error
from ..core.validation.errors import (
BAD_COLUMN_NAME,
NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS,
)
from ..model import Model
from ..util.serialization import convert_datetime_type
from .formatters import BasicTickFormatter, TickFormatter
from .mappers import ContinuousColorMapper
from .renderers import GlyphRenderer, Renderer
from .sources import ColumnDataSource, DataSource
from .tickers import BasicTicker, ContinuousTicker
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Annotation',
'Arrow',
'Band',
'BoxAnnotation',
'ColorBar',
'Label',
'LabelSet',
'Legend',
'LegendItem',
'PolyAnnotation',
'Slope',
'Span',
'TextAnnotation',
'Title',
'Tooltip',
'ToolbarPanel',
'Whisker',
)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
# This only exists to prevent a circular import.
def _DEFAULT_ARROW():
from .arrow_heads import OpenHead
return OpenHead()
# This only exists to prevent a circular import.
def _DEFAULT_TEE():
from .arrow_heads import TeeHead
return TeeHead(level="underlay", size=10)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class Annotation(Renderer):
''' Base class for all annotation models.
'''
level = Override(default="annotation")
@abstract
class TextAnnotation(Annotation):
''' Base class for text annotation models such as labels and titles.
'''
render_mode = Enum(RenderMode, default="canvas", help="""
Specifies whether the text is rendered as a canvas element or as a
CSS element overlaid on the canvas. The default mode is "canvas".
.. note::
The CSS labels won't be present in the output using the "save" tool.
.. warning::
Not all visual styling properties are supported if the render_mode is
set to "css". The border_line_dash property isn't fully supported and
border_line_dash_offset isn't supported at all. Setting text_alpha will
modify the opacity of the entire background box and border in addition
to the text. Finally, clipping Label annotations inside of the plot
area isn't supported in "css" mode.
""")
class LegendItem(Model):
'''
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(self.label, str):
# Allow convenience of setting label as a string
self.label = value(self.label)
label = StringSpec(default=None, help="""
A label for this legend. Can be a string, or a column of a
ColumnDataSource. If ``label`` is a field, then it must
be in the renderers' data_source.
""")
renderers = List(Instance(GlyphRenderer), help="""
A list of the glyph renderers to draw in the legend. If ``label`` is a field,
then all data_sources of renderers must be the same.
""")
index = Int(default=None, help="""
The column data index to use for drawing the representative items.
If None (the default), then Bokeh will automatically choose an index to
use. If the label does not refer to a data column name, this is typically
the first data point in the data source. Otherwise, if the label does
refer to a column name, the legend will have "groupby" behavior, and will
choose and display representative points from every "group" in the column.
If set to a number, Bokeh will use that number as the index in all cases.
""")
@error(NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS)
def _check_data_sources_on_renderers(self):
if self.label and 'field' in self.label:
if len({r.data_source for r in self.renderers}) != 1:
return str(self)
@error(BAD_COLUMN_NAME)
def _check_field_label_on_data_source(self):
if self.label and 'field' in self.label:
if len(self.renderers) < 1:
return str(self)
source = self.renderers[0].data_source
if self.label.get('field') not in source.column_names:
return str(self)
class Legend(Annotation):
''' Render informational legends for a plot.
'''
location = Either(Enum(LegendLocation), Tuple(Float, Float), default="top_right", help="""
The location where the legend should draw itself. It's either one of
``bokeh.core.enums.LegendLocation``'s enumerated values, or a ``(x, y)``
tuple indicating an absolute location absolute location in screen
coordinates (pixels from the bottom-left corner).
""")
orientation = Enum(Orientation, default="vertical", help="""
Whether the legend entries should be placed vertically or horizontally
when they are drawn.
""")
title = String(help="""
The title text to render.
""")
title_props = Include(ScalarTextProps, help="""
The %s values for the title text.
""")
title_text_font_size = Override(default="13px")
title_text_font_style = Override(default="italic")
title_standoff = Int(5, help="""
The distance (in pixels) to separate the title from the legend.
""")
border_props = Include(ScalarLineProps, help="""
The %s for the legend border outline.
""")
border_line_color = Override(default="#e5e5e5")
border_line_alpha = Override(default=0.5)
background_props = Include(ScalarFillProps, help="""
The %s for the legend background style.
""")
inactive_props = Include(ScalarFillProps, help="""
The %s for the legend item style when inactive. These control an overlay
on the item that can be used to obscure it when the corresponding glyph
is inactive (e.g. by making it semi-transparent).
""")
click_policy = Enum(LegendClickPolicy, default="none", help="""
Defines what happens when a lengend's item is clicked.
""")
background_fill_color = Override(default="#ffffff")
background_fill_alpha = Override(default=0.95)
inactive_fill_color = Override(default="white")
inactive_fill_alpha = Override(default=0.7)
label_props = Include(ScalarTextProps, help="""
The %s for the legend labels.
""")
label_text_baseline = Override(default='middle')
label_text_font_size = Override(default='13px')
label_standoff = Int(5, help="""
The distance (in pixels) to separate the label from its associated glyph.
""")
label_height = Int(20, help="""
The minimum height (in pixels) of the area that legend labels should occupy.
""")
label_width = Int(20, help="""
The minimum width (in pixels) of the area that legend labels should occupy.
""")
glyph_height = Int(20, help="""
The height (in pixels) that the rendered legend glyph should occupy.
""")
glyph_width = Int(20, help="""
The width (in pixels) that the rendered legend glyph should occupy.
""")
margin = Int(10, help="""
Amount of margin around the legend.
""")
padding = Int(10, help="""
Amount of padding around the contents of the legend. Only applicable when
when border is visible, otherwise collapses to 0.
""")
spacing = Int(3, help="""
Amount of spacing (in pixels) between legend entries.
""")
items = List(Instance(LegendItem), help="""
A list of :class:`~bokeh.model.annotations.LegendItem` instances to be
rendered in the legend.
This can be specified explicitly, for instance:
.. code-block:: python
legend = Legend(items=[
LegendItem(label="sin(x)" , renderers=[r0, r1]),
LegendItem(label="2*sin(x)" , renderers=[r2]),
LegendItem(label="3*sin(x)" , renderers=[r3, r4])
])
But as a convenience, can also be given more compactly as a list of tuples:
.. code-block:: python
legend = Legend(items=[
("sin(x)" , [r0, r1]),
("2*sin(x)" , [r2]),
("3*sin(x)" , [r3, r4])
])
where each tuple is of the form: *(label, renderers)*.
""").accepts(List(Tuple(String, List(Instance(GlyphRenderer)))), lambda items: [LegendItem(label=item[0], renderers=item[1]) for item in items])
class ColorBar(Annotation):
''' Render a color bar based on a color mapper.
'''
location = Either(Enum(LegendLocation), Tuple(Float, Float),
default="top_right", help="""
The location where the color bar should draw itself. It's either one of
``bokeh.core.enums.LegendLocation``'s enumerated values, or a ``(x, y)``
tuple indicating an absolute location absolute location in screen
coordinates (pixels from the bottom-left corner).
.. warning::
If the color bar is placed in a side panel, the location will likely
have to be set to `(0,0)`.
""")
orientation = Enum(Orientation, default="vertical", help="""
Whether the color bar should be oriented vertically or horizontally.
""")
height = Either(Auto, Int(), help="""
The height (in pixels) that the color scale should occupy.
""")
width = Either(Auto, Int(), help="""
The width (in pixels) that the color scale should occupy.
""")
scale_alpha = Float(1.0, help="""
The alpha with which to render the color scale.
""")
title = String(help="""
The title text to render.
""")
title_props = Include(ScalarTextProps, help="""
The %s values for the title text.
""")
title_text_font_size = Override(default="13px")
title_text_font_style = Override(default="italic")
title_standoff = Int(2, help="""
The distance (in pixels) to separate the title from the color bar.
""")
ticker = Instance(ContinuousTicker, default=lambda: BasicTicker(), help="""
A Ticker to use for computing locations of axis components.
""")
formatter = Instance(TickFormatter, default=lambda: BasicTickFormatter(), help="""
A ``TickFormatter`` to use for formatting the visual appearance of ticks.
""")
major_label_overrides = Dict(Either(Float, String), String, default={}, help="""
Provide explicit tick label values for specific tick locations that
override normal formatting.
""")
color_mapper = Instance(ContinuousColorMapper, help="""
A continuous color mapper containing a color palette to render.
.. warning::
If the `low` and `high` attributes of the ``ColorMapper`` aren't set, ticks
and tick labels won't be rendered. Additionally, if a ``LogTicker`` is
passed to the `ticker` argument and either or both of the logarithms
of `low` and `high` values of the color_mapper are non-numeric
(i.e. `low=0`), the tick and tick labels won't be rendered.
""")
margin = Int(30, help="""
Amount of margin (in pixels) around the outside of the color bar.
""")
padding = Int(10, help="""
Amount of padding (in pixels) between the color scale and color bar border.
""")
major_label_props = Include(ScalarTextProps, help="""
The %s of the major tick labels.
""")
major_label_text_align = Override(default="center")
major_label_text_baseline = Override(default="middle")
major_label_text_font_size = Override(default="11px")
label_standoff = Int(5, help="""
The distance (in pixels) to separate the tick labels from the color bar.
""")
major_tick_props = Include(ScalarLineProps, help="""
The %s of the major ticks.
""")
major_tick_line_color = Override(default="#ffffff")
major_tick_in = Int(default=5, help="""
The distance (in pixels) that major ticks should extend into the
main plot area.
""")
major_tick_out = Int(default=0, help="""
The distance (in pixels) that major ticks should extend out of the
main plot area.
""")
minor_tick_props = Include(ScalarLineProps, help="""
The %s of the minor ticks.
""")
minor_tick_line_color = Override(default=None)
minor_tick_in = Int(default=0, help="""
The distance (in pixels) that minor ticks should extend into the
main plot area.
""")
minor_tick_out = Int(default=0, help="""
The distance (in pixels) that major ticks should extend out of the
main plot area.
""")
bar_props = Include(LineProps, help="""
The %s for the color scale bar outline.
""")
bar_line_color = Override(default=None)
border_props = Include(ScalarLineProps, help="""
The %s for the color bar border outline.
""")
border_line_color = Override(default=None)
background_props = Include(ScalarFillProps, help="""
The %s for the color bar background style.
""")
background_fill_color = Override(default="#ffffff")
background_fill_alpha = Override(default=0.95)
class Arrow(Annotation):
''' Render arrows as an annotation.
'''
x_start = NumberSpec(help="""
The x-coordinates to locate the start of the arrows.
""")
y_start = NumberSpec(help="""
The y-coordinates to locate the start of the arrows.
""")
start_units = Enum(SpatialUnits, default='data', help="""
The unit type for the start_x and start_y attributes. Interpreted as "data
space" units by default.
""")
start = Instance('.models.arrow_heads.ArrowHead', default=None, help="""
Instance of ``ArrowHead``.
""")
x_end = NumberSpec(help="""
The x-coordinates to locate the end of the arrows.
""")
y_end = NumberSpec(help="""
The y-coordinates to locate the end of the arrows.
""")
end_units = Enum(SpatialUnits, default='data', help="""
The unit type for the end_x and end_y attributes. Interpreted as "data
space" units by default.
""")
end = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_ARROW, help="""
Instance of ``ArrowHead``.
""")
body_props = Include(LineProps, use_prefix=False, help="""
The %s values for the arrow body.
""")
source = Instance(DataSource, help="""
Local data source to use when rendering annotations on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
class BoxAnnotation(Annotation):
''' Render a shaded rectangular region as an annotation.
'''
left = Either(Auto, NumberSpec(), default=None, help="""
The x-coordinates of the left edge of the box annotation.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""")
left_units = Enum(SpatialUnits, default='data', help="""
The unit type for the left attribute. Interpreted as "data space" units
by default.
""")
right = Either(Auto, NumberSpec(), default=None, help="""
The x-coordinates of the right edge of the box annotation.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""")
right_units = Enum(SpatialUnits, default='data', help="""
The unit type for the right attribute. Interpreted as "data space" units
by default.
""")
bottom = Either(Auto, NumberSpec(), default=None, help="""
The y-coordinates of the bottom edge of the box annotation.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""")
bottom_units = Enum(SpatialUnits, default='data', help="""
The unit type for the bottom attribute. Interpreted as "data space" units
by default.
""")
top = Either(Auto, NumberSpec(), default=None, help="""
The y-coordinates of the top edge of the box annotation.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""")
top_units = Enum(SpatialUnits, default='data', help="""
The unit type for the top attribute. Interpreted as "data space" units
by default.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering box annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering box annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the box.
""")
line_alpha = Override(default=0.3)
line_color = Override(default="#cccccc")
fill_props = Include(ScalarFillProps, use_prefix=False, help="""
The %s values for the box.
""")
fill_alpha = Override(default=0.4)
fill_color = Override(default="#fff9ba")
render_mode = Enum(RenderMode, default="canvas", help="""
Specifies whether the box is rendered as a canvas element or as an
css element overlaid on the canvas. The default mode is "canvas".
.. note:
This property is deprecated and will be removed in bokeh 3.0.
.. warning::
The line_dash and line_dash_offset attributes aren't supported if
the render_mode is set to "css"
""")
class Band(Annotation):
''' Render a filled area band along a dimension.
'''
lower = PropertyUnitsSpec(default=None, units_type=Enum(SpatialUnits), units_default="data", help="""
The coordinates of the lower portion of the filled area band.
""")
upper = PropertyUnitsSpec(default=None, units_type=Enum(SpatialUnits), units_default="data", help="""
The coordinates of the upper portion of the filled area band.
""")
base = PropertyUnitsSpec(default=None, units_type=Enum(SpatialUnits), units_default="data", help="""
The orthogonal coordinates of the upper and lower values.
""")
dimension = Enum(Dimension, default='height', help="""
The direction of the band can be specified by setting this property
to "height" (``y`` direction) or "width" (``x`` direction).
""")
source = Instance(DataSource, default=lambda: ColumnDataSource(), help="""
Local data source to use when rendering annotations on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the band.
""")
line_alpha = Override(default=0.3)
line_color = Override(default="#cccccc")
fill_props = Include(ScalarFillProps, use_prefix=False, help="""
The %s values for the band.
""")
fill_alpha = Override(default=0.4)
fill_color = Override(default="#fff9ba")
class Label(TextAnnotation):
''' Render a single text label as an annotation.
``Label`` will render a single text label at given ``x`` and ``y``
coordinates, which can be in either screen (pixel) space, or data (axis
range) space.
The label can also be configured with a screen space offset from ``x`` and
``y``, by using the ``x_offset`` and ``y_offset`` properties.
Additionally, the label can be rotated with the ``angle`` property.
There are also standard text, fill, and line properties to control the
appearance of the text, its background, as well as the rectangular bounding
box border.
'''
x = Float(help="""
The x-coordinate in screen coordinates to locate the text anchors.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""").accepts(Datetime, convert_datetime_type)
x_units = Enum(SpatialUnits, default='data', help="""
The unit type for the x attribute. Interpreted as "data space" units
by default.
""")
y = Float(help="""
The y-coordinate in screen coordinates to locate the text anchors.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""").accepts(Datetime, convert_datetime_type)
y_units = Enum(SpatialUnits, default='data', help="""
The unit type for the y attribute. Interpreted as "data space" units
by default.
""")
text = String(help="""
The text value to render.
""")
angle = Angle(default=0, help="""
The angle to rotate the text, as measured from the horizontal.
.. warning::
The center of rotation for canvas and css render_modes is different.
For `render_mode="canvas"` the label is rotated from the top-left
corner of the annotation, while for `render_mode="css"` the annotation
is rotated around it's center.
""")
angle_units = Enum(AngleUnits, default='rad', help="""
Acceptable values for units are ``"rad"`` and ``"deg"``
""")
x_offset = Float(default=0, help="""
Offset value to apply to the x-coordinate.
This is useful, for instance, if it is desired to "float" text a fixed
distance in screen units from a given data position.
""")
y_offset = Float(default=0, help="""
Offset value to apply to the y-coordinate.
This is useful, for instance, if it is desired to "float" text a fixed
distance in screen units from a given data position.
""")
text_props = Include(ScalarTextProps, use_prefix=False, help="""
The %s values for the text.
""")
background_props = Include(ScalarFillProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_props = Include(ScalarLineProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
border_line_color = Override(default=None)
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen location when
rendering an annotation on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen location when
rendering an annotation on the plot. If unset, use the default y-range.
""")
class LabelSet(TextAnnotation):
''' Render multiple text labels as annotations.
``LabelSet`` will render multiple text labels at given ``x`` and ``y``
coordinates, which can be in either screen (pixel) space, or data (axis
range) space. In this case (as opposed to the single ``Label`` model),
``x`` and ``y`` can also be the name of a column from a
:class:`~bokeh.models.sources.ColumnDataSource`, in which case the labels
will be "vectorized" using coordinate values from the specified columns.
The label can also be configured with a screen space offset from ``x`` and
``y``, by using the ``x_offset`` and ``y_offset`` properties. These offsets
may be vectorized by giving the name of a data source column.
Additionally, the label can be rotated with the ``angle`` property (which
may also be a column name.)
There are also standard text, fill, and line properties to control the
appearance of the text, its background, as well as the rectangular bounding
box border.
The data source is provided by setting the ``source`` property.
'''
x = NumberSpec(help="""
The x-coordinates to locate the text anchors.
""")
x_units = Enum(SpatialUnits, default='data', help="""
The unit type for the ``xs`` attribute. Interpreted as "data space" units
by default.
""")
y = NumberSpec(help="""
The y-coordinates to locate the text anchors.
""")
y_units = Enum(SpatialUnits, default='data', help="""
The unit type for the ``ys`` attribute. Interpreted as "data space" units
by default.
""")
text = StringSpec("text", help="""
The text values to render.
""")
angle = AngleSpec(default=0, help="""
The angles to rotate the text, as measured from the horizontal.
.. warning::
The center of rotation for canvas and css render_modes is different.
For `render_mode="canvas"` the label is rotated from the top-left
corner of the annotation, while for `render_mode="css"` the annotation
is rotated around it's center.
""")
x_offset = NumberSpec(default=0, help="""
Offset values to apply to the x-coordinates.
This is useful, for instance, if it is desired to "float" text a fixed
distance in screen units from a given data position.
""")
y_offset = NumberSpec(default=0, help="""
Offset values to apply to the y-coordinates.
This is useful, for instance, if it is desired to "float" text a fixed
distance in screen units from a given data position.
""")
text_props = Include(TextProps, use_prefix=False, help="""
The %s values for the text.
""")
background_props = Include(FillProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_props = Include(LineProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
border_line_color = Override(default=None)
source = Instance(DataSource, default=lambda: ColumnDataSource(), help="""
Local data source to use when rendering annotations on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
class PolyAnnotation(Annotation):
''' Render a shaded polygonal region as an annotation.
'''
xs = Seq(Float, default=[], help="""
The x-coordinates of the region to draw.
""")
xs_units = Enum(SpatialUnits, default='data', help="""
The unit type for the ``xs`` attribute. Interpreted as "data space" units
by default.
""")
ys = Seq(Float, default=[], help="""
The y-coordinates of the region to draw.
""")
ys_units = Enum(SpatialUnits, default='data', help="""
The unit type for the ``ys`` attribute. Interpreted as "data space" units
by default.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering box annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering box annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the polygon.
""")
line_alpha = Override(default=0.3)
line_color = Override(default="#cccccc")
fill_props = Include(ScalarFillProps, use_prefix=False, help="""
The %s values for the polygon.
""")
fill_alpha = Override(default=0.4)
fill_color = Override(default="#fff9ba")
class Slope(Annotation):
""" Render a sloped line as an annotation.
"""
gradient = Float(help="""
The gradient of the line, in data units
""")
y_intercept = Float(help="""
The y intercept of the line, in data units
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the line.
""")
class Span(Annotation):
""" Render a horizontal or vertical line span.
"""
location = Float(help="""
The location of the span, along ``dimension``.
Datetime values are also accepted, but note that they are immediately
converted to milliseconds-since-epoch.
""").accepts(Datetime, convert_datetime_type)
location_units = Enum(SpatialUnits, default='data', help="""
The unit type for the location attribute. Interpreted as "data space"
units by default.
""")
dimension = Enum(Dimension, default='width', help="""
The direction of the span can be specified by setting this property
to "height" (``y`` direction) or "width" (``x`` direction).
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
render_mode = Enum(RenderMode, default="canvas", help="""
Specifies whether the span is rendered as a canvas element or as a
CSS element overlaid on the canvas. The default mode is "canvas".
.. note:
This property is deprecated and will be removed in bokeh 3.0.
.. warning::
The line_dash and line_dash_offset attributes aren't supported if
the render_mode is set to "css"
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the span.
""")
class Title(TextAnnotation):
''' Render a single title box as an annotation.
'''
text = String(help="""
The text value to render.
""")
vertical_align = Enum(VerticalAlign, default='bottom', help="""
Alignment of the text in its enclosing space, *across* the direction of the text.
""")
align = Enum(TextAlign, default='left', help="""
Alignment of the text in its enclosing space, *along* the direction of the text.
""")
text_line_height = Float(default=1.0, help="""
How much additional space should be allocated for the title. The value is provided
as a number, but should be treated as a percentage of font size. The default is
100%, which means no additional space will be used.
""")
offset = Float(default=0, help="""
Offset the text by a number of pixels (can be positive or negative). Shifts the text in
different directions based on the location of the title:
* above: shifts title right
* right: shifts title down
* below: shifts title right
* left: shifts title up
""")
text_font = String(default="helvetica", help="""
Name of a font to use for rendering text, e.g., ``'times'``,
``'helvetica'``.
""")
text_font_size = FontSizeSpec(default="13px")
text_font_style = Enum(FontStyle, default="bold", help="""
A style to use for rendering text.
Acceptable values are:
- ``'normal'`` normal text
- ``'italic'`` *italic text*
- ``'bold'`` **bold text**
""")
text_color = ColorSpec(default="#444444", help="""
A color to use to fill text with.
Acceptable values are:
- any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``
- an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``
- a 3-tuple of integers (r,g,b) between 0 and 255
- a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1
.. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp
""")
text_alpha = NumberSpec(default=1.0, help="""
An alpha value to use to fill text with.
Acceptable values are floating point numbers between 0 (transparent)
and 1 (opaque).
""")
background_props = Include(ScalarFillProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_props = Include(ScalarLineProps, use_prefix=True, help="""
The %s values for the text bounding box.
""")
border_line_color = Override(default=None)
class Tooltip(Annotation):
''' Render a tooltip.
.. note::
This model is currently managed by BokehJS and is not useful
directly from python.
'''
level = Override(default="overlay")
attachment = Enum(TooltipAttachment, help="""
Whether the tooltip should be displayed to the left or right of the cursor
position or above or below it, or if it should be automatically placed
in the horizontal or vertical dimension.
""")
inner_only = Bool(default=True, help="""
Whether to display outside a central plot frame area.
""")
show_arrow = Bool(default=True, help="""
Whether tooltip's arrow should be shown.
""")
class Whisker(Annotation):
''' Render a whisker along a dimension.
'''
lower = PropertyUnitsSpec(default=None, units_type=Enum(SpatialUnits), units_default="data", help="""
The coordinates of the lower end of the whiskers.
""")
lower_head = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_TEE, help="""
Instance of ``ArrowHead``.
""")
upper = PropertyUnitsSpec(default=None, units_type=Enum(SpatialUnits), units_default="data", help="""
The coordinates of the upper end of the whiskers.
""")
upper_head = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_TEE, help="""
Instance of ``ArrowHead``.
""")
base = PropertyUnitsSpec(default=None, units_type=Enum(SpatialUnits), units_default="data", help="""
The orthogonal coordinates of the upper and lower values.
""")
dimension = Enum(Dimension, default='height', help="""
The direction of the whisker can be specified by setting this property
to "height" (``y`` direction) or "width" (``x`` direction).
""")
source = Instance(DataSource, default=lambda: ColumnDataSource(), help="""
Local data source to use when rendering annotations on the plot.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering annotations on the plot. If unset, use the default y-range.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the whisker body.
""")
level = Override(default="underlay")
class ToolbarPanel(Annotation): # TODO: this shouldn't be an annotation
toolbar = Instance(".models.tools.Toolbar", help="""
A toolbar to display.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
"""
Copyright 2016 ElasticBox All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from bson.objectid import ObjectId
import cgi
import json
import logging
import random
import re
import string
import urllib
import urlparse
from datetime import datetime, timedelta
import jwt
from lxml import etree
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.metadata import OneLogin_Saml2_Metadata
from passlib.hash import sha512_crypt
from tornado.auth import GoogleOAuth2Mixin
from tornado.gen import coroutine, Return
from tornado.web import RequestHandler, HTTPError
from api.v1 import ELASTICKUBE_TOKEN_HEADER, ELASTICKUBE_VALIDATION_TOKEN_HEADER
from api.v1.actions import emails
from data.query import Query
ROUNDS = 40000
def _generate_hashed_password(password):
salt = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(64))
hash = sha512_crypt.encrypt((password + salt).encode("utf-8"), rounds=ROUNDS)
hash_parts = hash.split("$rounds={0}$".format(ROUNDS))
return {"hash": hash_parts[1], "rounds": "{0}$rounds={1}$".format(hash_parts[0], ROUNDS), "salt": salt}
def _fill_signup_invitation_request(document, firstname, lastname, password=None):
document["firstname"] = firstname
document["lastname"] = lastname
document["email_validated_at"] = datetime.utcnow()
if password is not None:
document["password"] = _generate_hashed_password(password)
class AuthHandler(RequestHandler):
@coroutine
def authenticate_user(self, user):
logging.info("Authenticating user '%(username)s'", user)
token = dict(
id=str(user["_id"]),
username=user["username"],
firstname=user["firstname"],
lastname=user["lastname"],
email=user["email"],
role=user["role"],
created=datetime.utcnow().isoformat(),
exp=datetime.utcnow() + timedelta(30)
)
user["last_login"] = datetime.utcnow()
yield self.settings["database"].Users.update({"_id": user["_id"]}, user)
token = jwt.encode(token, self.settings["secret"], algorithm="HS256")
self.set_cookie(ELASTICKUBE_TOKEN_HEADER, token)
logging.info("User '%(username)s' authenticated.", user)
raise Return(token)
class AuthProvidersHandler(RequestHandler):
@coroutine
def get(self):
providers = dict()
# If there are no users created then we need to return an empty list of providers to enable the signup flow
if (yield Query(self.settings["database"], "Users").find_one()) is None:
self.write({})
else:
settings = yield Query(self.settings["database"], "Settings").find_one()
if "google_oauth" in settings["authentication"]:
providers['google'] = dict(auth_url="/api/v1/auth/google")
if "saml" in settings["authentication"]:
providers['saml'] = dict(auth_url="/api/v1/auth/saml")
if "password" in settings["authentication"]:
providers['password'] = dict(regex=settings["authentication"]["password"]["regex"])
validation_token = self.request.headers.get(ELASTICKUBE_VALIDATION_TOKEN_HEADER)
if validation_token is not None:
user = yield Query(self.settings["database"], "Users").find_one({"invite_token": validation_token})
if user is not None and 'email_validated_at' not in user:
providers['email'] = user[u'email']
self.write(providers)
class SignupHandler(AuthHandler):
@staticmethod
def _validate_signup_data(data):
if "email" not in data:
raise HTTPError(400, reason="Email is required.")
if "password" not in data:
raise HTTPError(400, reason="Password is required.")
if "firstname" not in data:
raise HTTPError(400, reason="First name is required.")
if "lastname" not in data:
raise HTTPError(400, reason="Last name is required.")
return True
@coroutine
def _update_invited_user(self, validation_token, data):
user = yield Query(self.settings["database"], "Users").find_one(
{"invite_token": validation_token, "email": data["email"]})
if user is not None and "email_validated_at" not in user:
for namespace_name in user["namespaces"]:
namespace = yield Query(self.settings["database"], "Namespaces").find_one({"name": namespace_name})
if namespace is None:
logging.warn("Cannot find namespace %s", namespace_name)
else:
if "members" in namespace:
namespace["members"].append(user["username"])
else:
namespace["members"] = [user["username"]]
yield Query(self.settings["database"], "Namespaces").update(namespace)
del user["namespaces"]
_fill_signup_invitation_request(
user, firstname=data["firstname"], lastname=data["lastname"],
password=data["password"])
raise Return(user)
else:
raise HTTPError(403, message="Invitation not found.")
@coroutine
def post(self):
try:
data = json.loads(self.request.body)
except Exception:
raise HTTPError(400, message='Invalid JSON')
validation_token = self.request.headers.get(ELASTICKUBE_VALIDATION_TOKEN_HEADER)
if validation_token is not None:
self._validate_signup_data(data)
user = yield self._update_invited_user(validation_token, data)
token = yield self.authenticate_user(user)
self.write(token)
self.flush()
# Signup can be used only the first time
elif (yield Query(self.settings["database"], "Users").find_one()) is not None:
raise HTTPError(403, message="Onboarding already completed.")
else:
self._validate_signup_data(data)
user = dict(
email=data["email"],
username=data["email"],
password=_generate_hashed_password(data["password"]),
firstname=data["firstname"],
lastname=data["lastname"],
role="administrator",
schema="http://elasticbox.net/schemas/user",
email_validated_at=datetime.utcnow().isoformat()
)
signup_user = yield Query(self.settings["database"], "Users").insert(user)
token = yield self.authenticate_user(signup_user)
self.write(token)
self.flush()
class RequestInviteHandler(AuthHandler):
@coroutine
def post(self):
logging.info("Initiating RequestInviteHandler post")
data = json.loads(self.request.body)
if "email" not in data:
raise HTTPError(400, reason="Missing email in body request.")
settings = yield self.settings["database"].Settings.find_one()
if "mail" in settings:
mail_settings = settings["mail"]
origin_user = {
'name': data.get('name', ''),
'email': data['email']
}
invite_address = "{0}/admin/users?invite={1}".format(
settings["hostname"], cgi.escape(origin_user['email'], quote=True))
try:
admin = yield Query(self.settings["database"], "Users").find_one({"role": "administrator"})
yield emails.send_request_invite_link(
mail_settings, admin["email"], origin_user, invite_address, settings)
except Exception:
logging.exception("Error sending request invite.")
raise HTTPError(500, reason='Error sending request invite.')
raise HTTPError(200)
else:
logging.warning("Mail settings not added")
raise HTTPError(412, reason="Request invite not available.")
class ResetPasswordHandler(AuthHandler):
@coroutine
def post(self):
logging.info("Initiating ResetPasswordHandler post")
data = json.loads(self.request.body)
if "email" not in data:
raise HTTPError(400, reason="Missing email in body request.")
email = data["email"]
user = yield self.settings["database"].Users.find_one({"email": email})
if not user:
logging.debug("User with email '%s' not found.", email)
raise HTTPError(200)
settings = yield self.settings["database"].Settings.find_one()
if "mail" in settings:
mail_settings = settings["mail"]
token = dict(
id=str(user["_id"]),
hash=user['password']['hash'][:-16],
exp=datetime.utcnow() + timedelta(minutes=10)
)
token = jwt.encode(token, self.settings["secret"], algorithm="HS256")
user_data = {
'name': user.get('firstname'),
'email': user['email'],
'token': token
}
try:
yield emails.send_reset_password_link(mail_settings, user_data, settings)
except Exception:
raise HTTPError(500, reason='Error sending reset password email.')
raise HTTPError(200)
else:
logging.warning("Mail settings not added")
raise HTTPError(412, reason="Mail settings not added.")
class ChangePasswordHandler(AuthHandler):
@coroutine
def post(self):
logging.info("Initiating ChangePasswordHandler post")
data = json.loads(self.request.body)
if "password" not in data:
raise HTTPError(400, reason="Missing arguments in change password request.")
if "token" not in data:
raise HTTPError(400, reason="Missing arguments in change password request.")
password = data["password"]
try:
token = jwt.decode(data["token"], self.settings['secret'], algorithm='HS256')
except Exception:
raise HTTPError(400, reason="Invalid token or token has expired")
user = yield self.settings["database"].Users.find_one({"_id": ObjectId(token["id"])})
if not user:
logging.error("Error trying to change user password for token: '%s'.", token)
raise HTTPError(200)
if not user["password"]["hash"][:-16] == token["hash"]:
raise HTTPError(400, reason="Invalid token or token has expired")
user["password"] = _generate_hashed_password(password)
yield Query(self.settings["database"], "Users").update_fields({"_id": user["_id"]}, {
"password": user["password"]
})
raise HTTPError(200)
class PasswordHandler(AuthHandler):
@coroutine
def post(self):
logging.info("Initiating PasswordHandler post")
data = json.loads(self.request.body)
if "username" not in data:
raise HTTPError(400, reason="Missing username in body request.")
if "password" not in data:
raise HTTPError(400, reason="Missing password in body request.")
username = data["username"]
password = data["password"]
user = yield self.settings["database"].Users.find_one({"username": username})
if not user:
logging.info("Username '%s' not found.", username)
raise HTTPError(302, reason='/request-invite')
if 'email_validated_at' not in user:
logging.info("Username '%s' not validated.", username)
raise HTTPError(302, reason='/request-invite')
if 'password' not in user:
logging.info("User '%s' has not password.", username)
raise HTTPError(401, reason="Invalid username or password.")
encoded_user_password = '{0}{1}'.format(user["password"]["rounds"], user["password"]["hash"])
if sha512_crypt.verify((password + user["password"]["salt"]).encode("utf-8"), encoded_user_password):
token = yield self.authenticate_user(user)
self.write(token)
self.flush()
else:
logging.info("Invalid password for user '%s'.", username)
raise HTTPError(401, reason="Invalid username or password.")
class GoogleOAuth2LoginHandler(AuthHandler, GoogleOAuth2Mixin):
@coroutine
def get(self):
logging.info("Initiating Google OAuth.")
settings = yield Query(self.settings["database"], "Settings").find_one()
google_oauth = settings[u'authentication'].get('google_oauth', None)
if google_oauth is None:
raise HTTPError(403, 'Forbidden request')
# Add OAuth settings for GoogleOAuth2Mixin
self.settings['google_oauth'] = {
'key': google_oauth['key'],
'secret': google_oauth['secret']
}
code = self.get_argument('code', False)
redirect_uri = "{0}/api/v1/auth/google".format(settings["hostname"])
if code:
logging.debug("Google redirect received.")
auth_data = yield self.get_authenticated_user(
redirect_uri=redirect_uri,
code=code)
auth_user = yield self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=auth_data['access_token'])
if auth_user["verified_email"]:
user = yield self.settings["database"].Users.find_one({"email": auth_user["email"]})
firstname = auth_data.get('given_name', auth_data.get('name', ""))
lastname = auth_data.get('family_name', "")
# Validate user if it signup by OAuth2
if user and 'email_validated_at' not in user:
logging.debug('User validated via OAuth2 %s', auth_user["email"])
_fill_signup_invitation_request(user, firstname=firstname, lastname=lastname, password=None)
user = yield Query(self.settings["database"], 'Users').update(user)
if user:
yield self.authenticate_user(user)
self.redirect('/')
else:
logging.debug("User '%s' not found", auth_user["email"])
self.redirect('/request-invite?account={0}&name={1}'.format(
cgi.escape(auth_user["email"], quote=True),
cgi.escape("{0} {1}".format(firstname, lastname), quote=True)))
else:
logging.info("User email '%s' not verified.", auth_user["email"])
raise HTTPError(400, "Email is not verified.")
else:
logging.debug("Redirecting to google for authentication.")
yield self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=google_oauth['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
class Saml2MetadataHandler(RequestHandler):
@coroutine
def get(self):
logging.info("Initiating SAML 2.0 Metadata get.")
settings = yield Query(self.settings["database"], "Settings").find_one()
saml_settings = Saml2MetadataHandler.get_saml_settings(settings)
self.set_header('Content-Type', 'text/xml')
self.write(OneLogin_Saml2_Metadata.builder(
sp=saml_settings['sp'],
authnsign=saml_settings['security']['authnRequestsSigned'],
wsign=saml_settings['security']['wantAssertionsSigned'])
)
self.flush()
@staticmethod
def get_saml_settings(settings, saml_config=None):
saml_settings = dict(
sp=dict(
entityId=urlparse.urlparse(settings["hostname"]).netloc,
assertionConsumerService=dict(
url="{0}/api/v1/auth/saml".format(settings["hostname"]),
binding=OneLogin_Saml2_Constants.BINDING_HTTP_POST),
NameIDFormat=OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED,
attributeConsumingService=dict(
serviceName="ElasticKube SAML",
serviceDescription="ElasticKube SAML Service Provider",
requestedAttributes=[]
)
),
security=dict(
authnRequestsSigned=False,
wantAssertionsSigned=True,
wantNameId=True
)
)
if saml_config is not None:
saml_settings['idp'] = dict(
entityId=saml_config['idp_entity_id'],
singleSignOnService=dict(
url=saml_config['idp_sso'],
binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT),
x509cert=saml_config['idp_cert']
)
return saml_settings
class Saml2LoginHandler(AuthHandler):
NS_IDENTITY_CLAIMS = 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/'
EMAIL_ATTRIBUTES = ('email', 'Email', 'User.Email', NS_IDENTITY_CLAIMS + 'email')
FIRST_NAME_ATTRIBUTES = ('firstname', 'FirstName', 'User.FirstName', NS_IDENTITY_CLAIMS + 'givenname')
LAST_NAME_ATTRIBUTES = ('lastname', 'LastName', 'User.LastName', NS_IDENTITY_CLAIMS + 'surname')
IDP_CERT_PATH = "md:IDPSSODescriptor/md:KeyDescriptor[@use='signing']/ds:KeyInfo/ds:X509Data/ds:X509Certificate"
IDP_SSO_PATH = "md:IDPSSODescriptor/md:SingleSignOnService[@Binding='{0}']".format(
OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT)
@staticmethod
def get_metadata_info(metadata):
metadata_xml = etree.fromstring(str(metadata))
if metadata_xml.tag.endswith('EntitiesDescriptor'):
metadata_xml = metadata_xml.find("md:EntityDescriptor", namespaces=OneLogin_Saml2_Constants.NSMAP)
idp_entity_id = metadata_xml.attrib['entityID']
idp_domain = urlparse.urlparse(idp_entity_id).netloc
idp_cert = metadata_xml.find(Saml2LoginHandler.IDP_CERT_PATH, namespaces=OneLogin_Saml2_Constants.NSMAP).text
idp_sso = metadata_xml.find(
Saml2LoginHandler.IDP_SSO_PATH, namespaces=OneLogin_Saml2_Constants.NSMAP).attrib['Location']
return (idp_entity_id, idp_domain, idp_cert, idp_sso)
@coroutine
def _get_saml_auth(self, request):
settings = yield Query(self.settings["database"], "Settings").find_one()
saml_config = settings[u'authentication'].get('saml', None)
if saml_config is None:
raise HTTPError(403, 'Forbidden request')
netloc = urlparse.urlparse(settings["hostname"]).netloc
host, _, port = netloc.partition(':')
saml_request = dict(
http_host=host,
script_name=request.path,
get_data={k: v[0] if len(v) == 1 else v for k, v in request.query_arguments.items()},
post_data={k: v[0] if len(v) == 1 else v for k, v in request.body_arguments.items()}
)
if port:
saml_request['server_port'] = port
saml_settings = Saml2MetadataHandler.get_saml_settings(settings, saml_config)
raise Return(
(OneLogin_Saml2_Auth(saml_request, saml_settings), "{0}/api/v1/auth/saml".format(settings["hostname"]))
)
def _get_attribute(self, attributes, mappings):
for mapping in mappings:
values = attributes.get(mapping, [])
if len(values) > 0:
return values[0].encode('utf8')
return ""
@coroutine
def get(self):
logging.info("Initiating SAML 2.0 Auth.")
auth, return_to = yield self._get_saml_auth(self.request)
logging.info("Redirecting to SAML for authentication.")
self.redirect(auth.login(return_to=return_to))
@coroutine
def post(self):
logging.info("SAML redirect received.")
auth, _ = yield self._get_saml_auth(self.request)
auth.process_response()
errors = auth.get_errors()
if len(errors) > 0:
logging.info("SAML authentication error: '%s'.", auth.get_last_error_reason())
raise HTTPError(401, reason=auth.get_last_error_reason())
if not auth.is_authenticated():
logging.info("SAML user not authenticated.")
raise HTTPError(401, reason="SAML user not authenticated.")
attributes = auth.get_attributes()
logging.debug('SAML Attributes received: {0}'.format(attributes))
first_name = self._get_attribute(attributes, self.FIRST_NAME_ATTRIBUTES)
last_name = self._get_attribute(attributes, self.LAST_NAME_ATTRIBUTES)
settings = yield Query(self.settings["database"], "Settings").find_one()
saml = settings[u'authentication'].get('saml', None)
name_id = auth.get_nameid()
user_email = self._get_attribute(attributes, self.EMAIL_ATTRIBUTES).lower()
if not user_email:
raise HTTPError(401, reason="SAML email attribute is missing.")
user = yield self.settings["database"].Users.find_one({"saml_id": name_id})
user_updated = False
if user and user["email"] != user_email:
logging.info("User email changed!")
user["email"] = user_email
user_updated = True
elif not user:
user = yield self.settings["database"].Users.find_one({"email": re.compile(user_email, re.IGNORECASE)})
if user:
user["saml_id"] = name_id
user_updated = True
# Validate user if it signup by SAML
if user and 'email_validated_at' not in user:
logging.debug('User %s (%s) validated via SAML', user_email, name_id)
user = yield self._update_invited_user(user, attributes)
user_updated = True
if user:
if user_updated:
user = yield Query(self.settings["database"], 'Users').update(user)
yield self.authenticate_user(user)
self.redirect('/')
else:
logging.debug("User '%s' (%s) not found", user_email, name_id)
escaped_name = cgi.escape("{0} {1}".format(first_name, last_name), quote=True)
if not escaped_name:
escaped_name = cgi.escape(name_id, quote=True)
self.redirect('/request-invite?account={0}&&name={1}'.format(
cgi.escape(user_email, quote=True),
escaped_name))
@coroutine
def _update_invited_user(self, user, attributes):
for namespace_name in user["namespaces"]:
namespace = yield Query(self.settings["database"], "Namespaces").find_one({"name": namespace_name})
if namespace is None:
logging.warn("Cannot find namespace %s", namespace_name)
else:
if "members" in namespace:
namespace["members"].append(user["username"])
else:
namespace["members"] = [user["username"]]
yield Query(self.settings["database"], "Namespaces").update(namespace)
del user["namespaces"]
first_name = self._get_attribute(attributes, self.FIRST_NAME_ATTRIBUTES)
last_name = self._get_attribute(attributes, self.LAST_NAME_ATTRIBUTES)
_fill_signup_invitation_request(user, firstname=first_name, lastname=last_name, password=None)
raise Return(user)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.