gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import oelite.fetch
import oelite.git
import oelite.util
import os
import re
import warnings
import string
import sys
import hashlib
class GitFetcher():
SUPPORTED_SCHEMES = ("git")
SHA1_RE = re.compile("([0-9a-f]{1,40})$")
def __init__(self, uri, d):
if not uri.scheme in self.SUPPORTED_SCHEMES:
raise Exception(
"Scheme %s not supported by oelite.fetch.GitFetcher"%(scheme))
uri.fdepends.append("native:git")
self.uri = uri
try:
protocol = uri.params["protocol"]
except KeyError:
protocol = "git"
self.url = "%s://%s"%(protocol, uri.location)
repo_name = protocol + "_" + \
self.uri.location.rstrip("/").translate(string.maketrans("/", "_"))
if protocol == "file":
self.is_local = True
self.repo = uri.location
else:
self.is_local = False
self.repo = os.path.join(
uri.ingredients, uri.isubdir, 'git', repo_name)
self.mirror_name = repo_name
if self.mirror_name.endswith(".git"):
self.mirror_name = self.mirror_name[:-4]
self.commit = None
self.tag = None
self.branch = None
if "commit" in uri.params:
self.commit = uri.params["commit"]
if not self.SHA1_RE.match(self.commit):
raise oelite.fetch.InvalidURI(
self.uri, "invalid commit id %s"%(repr(self.commit)))
if "tag" in uri.params:
self.tag = uri.params["tag"]
self.signature_name = "git://" + uri.location.replace(
d.get('TOPDIR'), '${TOPDIR}', 1)
if protocol != "git":
self.signature_name += ";protocol=" + protocol
self.signature_name += ";tag=" + self.tag
if "branch" in uri.params:
self.branch = uri.params["branch"]
i = bool(self.commit) + bool(self.tag) + bool(self.branch)
if i == 0:
self.branch = "HEAD"
i = 1
if self.is_local and not hasattr(self, 'dirty'):
self.dirty = True
if "dirty" in uri.params:
if uri.params["dirty"] == "1":
if not self.is_local:
raise oelite.fetch.InvalidURI(
self.uri, "cannot fetch git dirty content from remote")
if self.branch != "HEAD":
raise oelite.fetch.InvalidURI(
self.uri, "can only fetch git dirty content from HEAD")
self.dirty = True
else:
self.dirty = False
elif i != 1:
raise oelite.fetch.InvalidURI(
self.uri, "cannot mix commit, tag and branch parameters")
if self.is_local and self.branch:
uri.dont_cache = True
if not hasattr(self, 'dirty'):
self.dirty = False
elif self.dirty:
assert self.branch == 'HEAD'
repo = oelite.git.GitRepository(self.repo)
dirt = repo.get_dirt()
if not dirt:
self.dirty = False
if self.dirty:
m = hashlib.sha1()
m.update(dirt)
self.dirty_signature = m.hexdigest()
self.dirty_file = os.path.join(
uri.ingredients, uri.isubdir, 'git', "%s~dirty"%(repo_name),
"%s.diff"%(self.dirty_signature))
oelite.util.makedirs(os.path.dirname(self.dirty_file))
with open(self.dirty_file, 'w') as f:
f.write(dirt)
if "track" in uri.params:
self.track = uri.params["track"].split(",")
warnings.warn("track parameter not implemented yet")
else:
self.track = None
repo_name = uri.location.strip("/").split("/")[-1]
if repo_name.endswith(".git"):
repo_name = repo_name[:-4]
if "subdir" in uri.params:
self.dest = uri.params["subdir"]
if self.dest[-1] == "/":
self.dest += repo_name
else:
self.dest = repo_name
self.signatures = d.get("FILE") + ".sig"
self.fetch_signatures = d["__fetch_signatures"]
return
def signature(self):
if self.commit:
return self.commit
elif self.tag:
try:
self._signature = self.fetch_signatures[self.signature_name]
return self._signature
except KeyError:
raise oelite.fetch.NoSignature(self.uri, "signature unknown")
elif self.branch and self.is_local:
repo = oelite.git.GitRepository(self.repo)
if self.branch == 'HEAD':
signature = repo.current_head()
else:
signature = repo.get_head(self.branch)
if signature is None:
raise Exception('no such branch: %s'%(self.branch))
if self.dirty:
m = hashlib.sha1(signature)
m.update(self.dirty_signature)
signature = m.hexdigest()
return signature
elif self.branch:
warnings.warn("fetching git branch head, causing source signature to not be sufficient for proper signature handling (%s)"%(self.uri))
return ""
raise Exception("this should not be reached")
def fetch(self):
if not os.path.exists(self.repo):
if self.is_local:
print "Error: git repository not found: %s"%(self.repo)
return False
if not self.fetch_clone():
return False
repo = oelite.git.GitRepository(self.repo)
if self.is_local:
if not self.has_rev(repo):
return False
else:
if self.branch or not self.has_rev(repo):
if not self.fetch_update(repo):
return False
if self.tag:
commit = repo.get_tag(self.tag)
if not commit:
raise oelite.fetch.FetchError(
self.uri, "unknown tag: %s"%(self.tag))
if not "_signature" in dir(self):
return (self.signature_name, commit)
if (commit != self._signature):
print "Error signature mismatch "+self.tag
print " expected: %s"%self._signature
print " obtained: %s"%commit
return commit == self._signature
return self.has_rev(repo)
def fetch_clone(self):
basedir = os.path.dirname(self.repo)
repodir = os.path.basename(self.repo)
oelite.util.makedirs(basedir)
options = ['--mirror']
if self.uri.params.get('recursive', '0') != '0':
options.append('--recursive')
fetched = False
for url in self.uri.premirrors + [self.url] + self.uri.mirrors:
if not isinstance(url, basestring):
if url[0].endswith("//"):
url = os.path.join(url[0].rstrip("/"), self.mirror_name)
url += ".git"
else:
url = os.path.join(url[0], url[1])
if not self.uri.allow_url(url):
print "Skipping", url
continue
cmd = ['git', 'clone'] + options + [ url, repodir ]
print "Cloning from", url
if oelite.util.shcmd(cmd, dir=basedir) is True:
return True
print "fetching from %s failed"%(url)
print "Error: git clone failed"
return False
def fetch_update(self, repo):
print "Fetching from: %s"%(self.uri.mirrors)
fetched = False
for url in self.uri.premirrors + [self.url] + self.uri.mirrors:
if not isinstance(url, basestring):
if url[0].endswith("//"):
url = os.path.join(url[0].rstrip("/"), self.mirror_name)
url += ".git"
else:
url = os.path.join(url[0], url[1])
if not self.uri.allow_url(url):
print "Skipping", url
continue
print "Updating from %s"%(url)
if not repo.remote_update(url):
continue
if self.has_rev(repo):
return True
print "Error: git update failed (required rev could not be fetched)"
return False
def has_rev(self, repo):
if self.commit:
print "Checking git revision '%s'"%(self.commit)
has_commit = repo.has_commit(self.commit)
if not has_commit:
print "Error: revision not found"
return False
return has_commit
elif self.tag:
print "Checking git tag '%s'"%(self.tag)
has_tag = repo.has_tag(self.tag)
if not has_tag:
print "Error: tag not found"
return False
return has_tag
elif self.branch == 'HEAD':
return True
elif self.branch:
print "Checking branch '%s'"%(self.branch)
has_head = repo.has_head(self.branch)
if not has_head:
print "Error: branch not found"
return False
return has_head
print("Revision not found")
return False
def unpack(self, d):
wc = os.path.join(d.get("SRCDIR"), self.dest)
basedir = os.path.dirname(wc)
oelite.util.makedirs(basedir)
repo = oelite.git.GitRepository(self.repo)
if self.is_local:
clone_cmd = "git clone"
else:
clone_cmd = "git clone --shared"
if self.branch:
branch = repo.resolve_head(self.branch)
if branch:
clone_cmd += " -b %s"%(branch)
elif not (self.is_local and self.branch == 'HEAD'):
print "Error: unable to resolve branch: %s"%(self.branch)
return False
cmd = "%s %s %s"%(clone_cmd, self.repo, wc)
if not oelite.util.shcmd(cmd):
print "Error: git clone failed"
return False
if self.dirty:
cmd = "git apply %s"%(self.dirty_file)
if not oelite.util.shcmd(cmd, wc):
print "Error: git apply of dirty diff file failed"
return False
return True
cmd = "%s --no-checkout %s %s"%(clone_cmd, self.repo, wc)
if not oelite.util.shcmd(cmd):
print "Error: git clone failed"
return False
cmd = "git checkout -q "
if self.commit:
cmd += self.commit
elif self.tag:
cmd += "refs/tags/%s"%(self.tag)
else:
print "Error: WTF! no commit, tag or branch!!"
return False
if not oelite.util.shcmd(cmd, dir=self.dest):
print "Error: git checkout failed"
return False
return True
def mirror(self, mirror=os.getcwd()):
if self.is_local:
# Don't mirror if the source is local. Concept taken from fetch
print "Info: Skipping creating mirror for local path %s"%(self.uri)
return True
path = os.path.join(self.uri.isubdir, "git", self.mirror_name) + ".git"
basedir = os.path.dirname(path)
if not os.path.exists(path):
print "Creating git mirror", path
oelite.util.makedirs(basedir)
options = ['--mirror']
if self.uri.params.get('recursive', '0') != '0':
options.append('--recursive')
cmd = ['git', 'clone'] + options + [self.repo, path]
return oelite.util.shcmd(cmd) is True
else:
print "Updating git mirror", path
cmd = "git remote update --prune"
return oelite.util.shcmd(cmd, dir=path) is True
|
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve_banded # banded solver
import sys
''' Mixed Hybrid Finite Element solver for moment equations (drift diffusion) '''
class MHFEM:
def __init__(self, xe, Sigmaa, Sigmat, BCL=0, BCR=1, CENT=0):
''' Solves drift diffusion with MHFEM
Inputs:
xe: array of cell edges
Sigmaa: absorption XS function
Sigmat: total XS function
BCL: left boundary
0: reflecting
1: marshak
BCR: right boundary
0: reflecting
1: marshak
CENT: return phi on
0: edges only
1: centers only
2: edges and centers
'''
self.N = np.shape(xe)[0] # number of cell edges
self.n = 2*self.N - 1 # number of rows and columns of A
self.xc = np.zeros(self.N-1) # store cell centers
self.xe = xe # cell edges
# get cell centers
for i in range(self.N-1):
# midpoint between cell edges
self.xc[i] = (self.xe[i] + self.xe[i+1])/2
# combine edge and center points
self.x = np.sort(np.concatenate((self.xc, self.xe)))
# material properties
self.Sigmaa = Sigmaa
self.Sigmat = Sigmat
# boundary conditions
self.BCL = BCL
self.BCR = BCR
self.CENT = CENT
# create banded coefficient matrix, bandwidth 5
# upper diagonals have leading zeros, lower have trailing zeros
# A[0,:] = 2nd upper
# A[1,:] = 1st upper
# A[2,:] = diagonal
# A[3,:] = 1st lower
# A[4,:] = 2nd lower
self.A = np.zeros((5, self.n))
def discretize(self, mu2, B):
''' setup coefficient matrix with MHFEM equations
Inputs:
mu2: array of cell edge and center Eddington factors (set to 1/3 for diffusion)
B: boundary value (set to 1/2 for marshak)
'''
# build equations
for i in range(1, self.n, 2):
hi = self.x[i+1] - self.x[i-1] # cell width, x_i+1/2 - x_i-1/2
beta = 2/(self.Sigmat(self.x[i])*hi)
# balance equation
# lower diagonal
self.A[3,i-1] = -3 * beta * mu2[i-1]
# diagonal term
self.A[2,i] = 6*beta*mu2[i] + self.Sigmaa(self.x[i])*hi
# upper diagonal
self.A[1,i+1] = -3*beta*mu2[i+1]
# phi_i+1/2 equation
if (i != self.n-2):
# cell i+1 width, x_i+3/2 - x_i+1/2
h1 = self.x[i+3] - self.x[i+1]
beta1 = 2/(self.Sigmat(self.x[i+2])*h1)
# second lower (phi_i-1/2)
self.A[4,i-1] = -beta*mu2[i-1]
# first lower (phi_i)
self.A[3,i] = 3*beta*mu2[i]
# diagonal term (phi_i+1/2)
self.A[2,i+1] = -2*(beta + beta1)*mu2[i+1]
# first upper (phi_i+1)
self.A[1,i+2] = 3*beta1*mu2[i+2]
# second upper (phi_i+3/2)
self.A[0,i+3] = -beta1*mu2[i+3]
# boundary conditions
# left
if (self.BCL == 0): # reflecting
h1 = self.x[2] - self.x[0]
beta1 = 2/(self.Sigmat(self.x[1])*h1)
# J_1L = 0
# diagonal (phi_1/2)
self.A[2,0] = -2*beta1*mu2[0]
# first upper (phi_1)
self.A[1,1] = 3*beta1*mu2[1]
# second upper (phi_3/2)
self.A[0,2] = -beta1*mu2[2]
elif (self.BCL == 1): # marshak
h1 = self.x[2] - self.x[0]
beta1 = 2/(self.Sigmat(self.x[1])*h1)
# diagonal (phi_1/2)
self.A[2,0] = -B[0] - 2*beta1*mu2[0]
# first upper (phi_1)
self.A[1,1] = 3*beta1*mu2[1]
# second upper (phi_3/2)
self.A[0,2] = -beta1*mu2[2]
else:
print('\n--- FATAL ERROR: MHFEM left boundary condition not defined ---\n')
sys.exit()
# right
if (self.BCR == 0): # reflecting
hN = self.x[-1] - self.x[-3] # cell N width
betaN = 2/(self.Sigmat(self.x[-2])*hN)
# J_NR = 0
# second lower (phi_N-1/2)
self.A[4,-3] = betaN*mu2[-3]
# first lower (phi_N)
self.A[3,-2] = -3*betaN*mu2[-2]
# diagonal (phi_N+1/2)
self.A[2,-1] = 2*betaN*mu2[-1]
elif (self.BCR == 1): # marshak
hN = self.x[-1] - self.x[-3] # cell N width
betaN = 2/(self.Sigmat(self.x[-2])*hN)
# second lower (phi_N-1/2)
self.A[4,-3] = betaN*mu2[-3]
# first lower (phi_N)
self.A[3,-2] = -3*betaN*mu2[-2]
# diagonal (phi_N+1/2)
self.A[2,-1] = B[-1] + 2*betaN*mu2[-1]
else:
print('\n --- FATAL ERROR: MHFEM right boundary condition not defined ---\n')
sys.exit()
def getEdges(self, phi):
''' Convert a combined edge and center array to edges only '''
# get edge values
phiEdge = np.zeros(self.N)
ii = 0
for i in range(0, self.n, 2):
phiEdge[ii] = phi[i]
ii += 1
return phiEdge
def getCenters(self, phi):
''' Convert a combined edge and center array to centers only '''
# get center values
phiCent = np.zeros(self.N-1)
ii = 0
for i in range(1, self.n, 2):
phiCent[ii] = phi[i]
ii += 1
return phiCent
def solve(self, q, qq):
''' Compute phi = A^-1 q with banded solver
Inputs:
q: cell centered array of source terms
qq: cell centered array of first moment of source
'''
# check q and qq are cell centered
if (np.shape(q)[0] != self.N-1):
print('\n--- FATAL ERROR: MHFEM q must be cell centered ---\n')
sys.exit()
if (np.shape(qq)[0] != self.N-1):
print('\n--- FATAL ERROR: MHFEM qq must be cell centered ---\n')
sys.exit()
ii = 0 # store iterations of q
b = np.zeros(self.n) # store source vector
# set odd equations to the source, leave even as zero
for i in range(1, self.n, 2):
b[i] = q[ii] * (self.x[i+1] - self.x[i-1])
ii += 1
# set even equations to use first moment of q
ii = 0
for i in range(1, self.n-2, 2):
beta = 2/(self.Sigmat(self.x[i]))
beta1 = 2/(self.Sigmat(self.x[i+2]))
b[i+1] = .5*(beta1*qq[ii+1] - beta*qq[ii])
ii += 1
# set boundary b
beta1 = 2/(self.Sigmat(self.x[1]))
b[0] = beta1/2*qq[0]
betaN = 2/(self.Sigmat(self.x[-2]))
b[-1] = betaN/2*qq[-1]
# solve for flux
# solve banded matrix
phi = solve_banded((2,2), self.A, b)
# check solution
# self.checkSolution(phi, self.mu2f(self.x), q)
if (self.CENT == 0): # return edges only
return self.xe, self.getEdges(phi)
elif (self.CENT == 1): # return centers only
return self.xc, self.getCenters(phi)
else: # return edges and centers
return self.x, phi
def checkSolution(self, phi, mu2, q):
''' Check for continuity and conservation
Only supports isotropic q (qq = 0)
'''
# continuity
Jl = np.zeros(self.N - 1)
Jr = np.zeros(self.N - 1)
phiEdge = self.getEdges(phi)
muEdge = self.getEdges(mu2)
Fedge = phiEdge * muEdge
phiCent = self.getCenters(phi)
muCent = self.getCenters(mu2)
Fcent = phiCent * muCent
for i in range(self.N-1):
h = self.xe[i+1] - self.xe[i]
Jr[i] = -2/(self.Sigmat(self.xc[i])*h) * (
Fedge[i] - 3*Fcent[i] + 2*Fedge[i+1])
Jl[i] = -2/(self.Sigmat(self.xc[i])*h) * (
-2*Fedge[i] + 3*Fcent[i] - Fedge[i+1])
cont = np.zeros(self.N - 2)
for i in range(1, self.N - 1):
cont[i-1] = np.fabs((Jl[i] - Jr[i-1])/Jr[i-1])
# conservation
balance = np.zeros(self.N-1)
for i in range(self.N-1):
h = self.xe[i+1] - self.xe[i]
qq = .5*(q[i] + q[i+1]) * h
balance[i] = np.fabs(Jr[i] - Jl[i] +
self.Sigmaa(self.xc[i])*phiCent[i]*h - qq)
balance[i] /= qq
tol = 1e-3
if (np.max(cont) > tol):
print('--- WARNING: MHFEM continuity of current broken ---')
if (np.max(balance) > tol):
print('--- WARNING: MHFEM conservation broken ---')
|
|
"""Unittests for heapq."""
import random
import unittest
from test import support
import sys
# We do a bit of trickery here to be able to test both the C implementation
# and the Python implementation of the module.
import heapq as c_heapq
py_heapq = support.import_fresh_module('heapq', blocked=['_heapq'])
class TestHeap(unittest.TestCase):
module = None
def test_push_pop(self):
# 1) Push 256 random numbers and pop them off, verifying all's OK.
heap = []
data = []
self.check_invariant(heap)
for i in range(256):
item = random.random()
data.append(item)
self.module.heappush(heap, item)
self.check_invariant(heap)
results = []
while heap:
item = self.module.heappop(heap)
self.check_invariant(heap)
results.append(item)
data_sorted = data[:]
data_sorted.sort()
self.assertEqual(data_sorted, results)
# 2) Check that the invariant holds for a sorted array
self.check_invariant(results)
self.assertRaises(TypeError, self.module.heappush, [])
try:
self.assertRaises(TypeError, self.module.heappush, None, None)
self.assertRaises(TypeError, self.module.heappop, None)
except AttributeError:
pass
def check_invariant(self, heap):
# Check the heap invariant.
for pos, item in enumerate(heap):
if pos: # pos 0 has no parent
parentpos = (pos-1) >> 1
self.assertTrue(heap[parentpos] <= item)
def test_heapify(self):
for size in range(30):
heap = [random.random() for dummy in range(size)]
self.module.heapify(heap)
self.check_invariant(heap)
self.assertRaises(TypeError, self.module.heapify, None)
def test_naive_nbest(self):
data = [random.randrange(2000) for i in range(1000)]
heap = []
for item in data:
self.module.heappush(heap, item)
if len(heap) > 10:
self.module.heappop(heap)
heap.sort()
self.assertEqual(heap, sorted(data)[-10:])
def heapiter(self, heap):
# An iterator returning a heap's elements, smallest-first.
try:
while 1:
yield self.module.heappop(heap)
except IndexError:
pass
def test_nbest(self):
# Less-naive "N-best" algorithm, much faster (if len(data) is big
# enough <wink>) than sorting all of data. However, if we had a max
# heap instead of a min heap, it could go faster still via
# heapify'ing all of data (linear time), then doing 10 heappops
# (10 log-time steps).
data = [random.randrange(2000) for i in range(1000)]
heap = data[:10]
self.module.heapify(heap)
for item in data[10:]:
if item > heap[0]: # this gets rarer the longer we run
self.module.heapreplace(heap, item)
self.assertEqual(list(self.heapiter(heap)), sorted(data)[-10:])
self.assertRaises(TypeError, self.module.heapreplace, None)
self.assertRaises(TypeError, self.module.heapreplace, None, None)
self.assertRaises(IndexError, self.module.heapreplace, [], None)
def test_nbest_with_pushpop(self):
data = [random.randrange(2000) for i in range(1000)]
heap = data[:10]
self.module.heapify(heap)
for item in data[10:]:
self.module.heappushpop(heap, item)
self.assertEqual(list(self.heapiter(heap)), sorted(data)[-10:])
self.assertEqual(self.module.heappushpop([], 'x'), 'x')
def test_heappushpop(self):
h = []
x = self.module.heappushpop(h, 10)
self.assertEqual((h, x), ([], 10))
h = [10]
x = self.module.heappushpop(h, 10.0)
self.assertEqual((h, x), ([10], 10.0))
self.assertEqual(type(h[0]), int)
self.assertEqual(type(x), float)
h = [10];
x = self.module.heappushpop(h, 9)
self.assertEqual((h, x), ([10], 9))
h = [10];
x = self.module.heappushpop(h, 11)
self.assertEqual((h, x), ([11], 10))
def test_heapsort(self):
# Exercise everything with repeated heapsort checks
for trial in range(100):
size = random.randrange(50)
data = [random.randrange(25) for i in range(size)]
if trial & 1: # Half of the time, use heapify
heap = data[:]
self.module.heapify(heap)
else: # The rest of the time, use heappush
heap = []
for item in data:
self.module.heappush(heap, item)
heap_sorted = [self.module.heappop(heap) for i in range(size)]
self.assertEqual(heap_sorted, sorted(data))
def test_merge(self):
inputs = []
for i in range(random.randrange(5)):
row = sorted(random.randrange(1000) for j in range(random.randrange(10)))
inputs.append(row)
self.assertEqual(sorted(chain(*inputs)), list(self.module.merge(*inputs)))
self.assertEqual(list(self.module.merge()), [])
def test_merge_stability(self):
class Int(int):
pass
inputs = [[], [], [], []]
for i in range(20000):
stream = random.randrange(4)
x = random.randrange(500)
obj = Int(x)
obj.pair = (x, stream)
inputs[stream].append(obj)
for stream in inputs:
stream.sort()
result = [i.pair for i in self.module.merge(*inputs)]
self.assertEqual(result, sorted(result))
def test_nsmallest(self):
data = [(random.randrange(2000), i) for i in range(1000)]
for f in (None, lambda x: x[0] * 547 % 2000):
for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100):
self.assertEqual(list(self.module.nsmallest(n, data)),
sorted(data)[:n])
self.assertEqual(list(self.module.nsmallest(n, data, key=f)),
sorted(data, key=f)[:n])
def test_nlargest(self):
data = [(random.randrange(2000), i) for i in range(1000)]
for f in (None, lambda x: x[0] * 547 % 2000):
for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100):
self.assertEqual(list(self.module.nlargest(n, data)),
sorted(data, reverse=True)[:n])
self.assertEqual(list(self.module.nlargest(n, data, key=f)),
sorted(data, key=f, reverse=True)[:n])
class TestHeapPython(TestHeap):
module = py_heapq
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_pure_python(self):
self.assertFalse(sys.modules['heapq'] is self.module)
self.assertTrue(hasattr(self.module.heapify, '__code__'))
class TestHeapC(TestHeap):
module = c_heapq
def test_comparison_operator(self):
# Issue 3501: Make sure heapq works with both __lt__
# For python 3.0, __le__ alone is not enough
def hsort(data, comp):
data = [comp(x) for x in data]
self.module.heapify(data)
return [self.module.heappop(data).x for i in range(len(data))]
class LT:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x > other.x
class LE:
def __init__(self, x):
self.x = x
def __le__(self, other):
return self.x >= other.x
data = [random.random() for i in range(100)]
target = sorted(data, reverse=True)
self.assertEqual(hsort(data, LT), target)
self.assertRaises(TypeError, data, LE)
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_accelerated(self):
self.assertTrue(sys.modules['heapq'] is self.module)
self.assertFalse(hasattr(self.module.heapify, '__code__'))
#==============================================================================
class LenOnly:
"Dummy sequence class defining __len__ but not __getitem__."
def __len__(self):
return 10
class GetOnly:
"Dummy sequence class defining __getitem__ but not __len__."
def __getitem__(self, ndx):
return 10
class CmpErr:
"Dummy element that always raises an error during comparison"
def __eq__(self, other):
raise ZeroDivisionError
__ne__ = __lt__ = __le__ = __gt__ = __ge__ = __eq__
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestErrorHandling(unittest.TestCase):
# only for C implementation
module = c_heapq
def test_non_sequence(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(TypeError, f, 10)
for f in (self.module.heappush, self.module.heapreplace,
self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 10, 10)
def test_len_only(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(TypeError, f, LenOnly())
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises(TypeError, f, LenOnly(), 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 2, LenOnly())
def test_get_only(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(TypeError, f, GetOnly())
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises(TypeError, f, GetOnly(), 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 2, GetOnly())
def test_get_only(self):
seq = [CmpErr(), CmpErr(), CmpErr()]
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(ZeroDivisionError, f, seq)
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises(ZeroDivisionError, f, seq, 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(ZeroDivisionError, f, 2, seq)
def test_arg_parsing(self):
for f in (self.module.heapify, self.module.heappop,
self.module.heappush, self.module.heapreplace,
self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 10)
def test_iterable_args(self):
for f in (self.module.nlargest, self.module.nsmallest):
for s in ("123", "", range(1000), (1, 1.2), range(2000,2200,5)):
for g in (G, I, Ig, L, R):
self.assertEqual(list(f(2, g(s))), list(f(2,s)))
self.assertEqual(list(f(2, S(s))), [])
self.assertRaises(TypeError, f, 2, X(s))
self.assertRaises(TypeError, f, 2, N(s))
self.assertRaises(ZeroDivisionError, f, 2, E(s))
#==============================================================================
def test_main(verbose=None):
test_classes = [TestHeapPython, TestHeapC, TestErrorHandling]
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
|
# -*- coding: utf-8 -*-
"""
Examples of plots and calculations using the tmm package.
"""
from __future__ import division, print_function, absolute_import
from tmm.tmm_core import (coh_tmm, unpolarized_RT, ellips,
position_resolved, find_in_structure_with_inf)
from numpy import pi, linspace, inf, array, sqrt
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
try:
import colorpy.illuminants
import colorpy.colormodels
from . import color
colors_were_imported = True
except ImportError:
# without colorpy, you can't run sample5(), but everything else is fine.
colors_were_imported = False
# "5 * degree" is 5 degrees expressed in radians
# "1.2 / degree" is 1.2 radians expressed in degrees
degree = pi/180
def sample0():
"""
Here's a thin non-absorbing layer, on top of a thick absorbing layer, with
air on both sides. Plotting reflected intensity versus wavenumber, at two
different incident angles.
"""
# list of layer thicknesses in nm
d_list = [inf, 10*1.0E3, inf]
# list of refractive indices (equivalent to eps = 2+0.1j)
# refractive_index = 1.414655159+0.035344j
# epsilon = refractive_index*refractive_index
# print('refractive index',refractive_index, 'epsilon ',epsilon)
n_list = [sqrt(1.1), 1.414655159+0.035344j, sqrt(1.5)]
# list of wavenumbers to plot in nm^-1
cm_1 = linspace(100.0,400.0,100)
ks = cm_1 / 1e7
# initialize lists of y-values to plot
R_s = []
R_p = []
T_s = []
T_p = []
R_sp = []
T_sp = []
print('Frequency_cm1,', 'R_s,','R_p,','T_s,','T_p')
for k in ks:
# For normal incidence, s and p polarizations are identical.
#jk normal means 0 degrees?
result_s = coh_tmm('s', n_list, d_list, 80*degree, 1/k)
result_p = coh_tmm('p', n_list, d_list, 80*degree, 1/k)
result_sp = unpolarized_RT(n_list, d_list, 80*degree, 1/k)
# print(k,result_s)
R_s.append(result_s['R'])
T_s.append(result_s['T'])
R_p.append(result_p['R'])
T_p.append(result_p['T'])
R_sp.append(result_sp['R'])
T_sp.append(result_sp['T'])
print(k*1e7,',', R_s[-1],',',R_p[-1],',',T_s[-1],',',T_p[-1])
# Rnorm.append(coh_tmm('s', n_list, d_list, 0, 1/k)['R'])
# R45.append(unpolarized_RT(n_list, d_list, 45*degree, 1/k)['R'])
kcm = ks * 1e7 #ks in cm^-1 rather than nm^-1
plt.figure()
plt.plot(kcm, R_s, 'blue', kcm, R_p, 'purple',kcm, R_sp, 'black')
plt.xlabel('k (cm$^{-1}$)')
plt.ylabel('Fraction reflected')
plt.title('Reflection light at 45$^\circ$ R_s(blue), '
'R_p (purple), R_sp (black)')
plt.show()
def sample1():
"""
Here's a thin non-absorbing layer, on top of a thick absorbing layer, with
air on both sides. Plotting reflected intensity versus wavenumber, at two
different incident angles.
"""
# list of layer thicknesses in nm
d_list = [inf, 100, 300, inf]
# list of refractive indices
n_list = [1, 2.2, 3.3+0.3j, 1]
# list of wavenumbers to plot in nm^-1
ks = linspace(0.0001, .01, num=400)
# initialize lists of y-values to plot
Rnorm = []
R45 = []
for k in ks:
# For normal incidence, s and p polarizations are identical.
# I arbitrarily decided to use 's'.
Rnorm.append(coh_tmm('s', n_list, d_list, 0, 1/k)['R'])
R45.append(unpolarized_RT(n_list, d_list, 45*degree, 1/k)['R'])
kcm = ks * 1e7 #ks in cm^-1 rather than nm^-1
plt.figure()
plt.plot(kcm, Rnorm, 'blue', kcm, R45, 'purple')
plt.xlabel('k (cm$^{-1}$)')
plt.ylabel('Fraction reflected')
plt.title('Reflection of unpolarized light at 0$^\circ$ incidence (blue), '
'45$^\circ$ (purple)')
plt.show()
def sample2():
"""
Here's the transmitted intensity versus wavelength through a single-layer
film which has some complicated wavelength-dependent index of refraction.
(I made these numbers up, but in real life they could be read out of a
graph / table published in the literature.) Air is on both sides of the
film, and the light is normally incident.
"""
#index of refraction of my material: wavelength in nm versus index.
material_nk_data = array([[200, 2.1+0.1j],
[300, 2.4+0.3j],
[400, 2.3+0.4j],
[500, 2.2+0.4j],
[750, 2.2+0.5j]])
material_nk_fn = interp1d(material_nk_data[:,0].real,
material_nk_data[:,1], kind='quadratic')
d_list = [inf, 300, inf] #in nm
lambda_list = linspace(200, 750, 400) #in nm
T_list = []
for lambda_vac in lambda_list:
n_list = [1, material_nk_fn(lambda_vac), 1]
T_list.append(coh_tmm('s', n_list, d_list, 0, lambda_vac)['T'])
plt.figure()
plt.plot(lambda_list, T_list)
plt.xlabel('Wavelength (nm)')
plt.ylabel('Fraction of power transmitted')
plt.title('Transmission at normal incidence')
plt.show()
def sample3():
"""
Here is a calculation of the psi and Delta parameters measured in
ellipsometry. This reproduces Fig. 1.14 in Handbook of Ellipsometry by
Tompkins, 2005.
"""
n_list = [1, 1.46, 3.87+0.02j]
ds = linspace(0, 1000, num=100) #in nm
psis = []
Deltas = []
for d in ds:
e_data = ellips(n_list, [inf, d, inf], 70*degree, 633) #in nm
psis.append(e_data['psi']/degree) # angle in degrees
Deltas.append(e_data['Delta']/degree) # angle in degrees
plt.figure()
plt.plot(ds, psis, ds, Deltas)
plt.xlabel('SiO2 thickness (nm)')
plt.ylabel('Ellipsometric angles (degrees)')
plt.title('Ellipsometric parameters for air/SiO2/Si, varying '
'SiO2 thickness.\n'
'@ 70$^\circ$, 633nm. '
'Should agree with Handbook of Ellipsometry Fig. 1.14')
plt.show()
def sample4():
"""
Here is an example where we plot absorption and Poynting vector
as a function of depth.
"""
d_list = [inf, 100, 300, inf] #in nm
n_list = [1, 2.2+0.2j, 3.3+0.3j, 1]
th_0 = pi/4
lam_vac = 400
pol = 'p'
coh_tmm_data = coh_tmm(pol, n_list, d_list, th_0, lam_vac)
ds = linspace(-50, 400, num=1000) #position in structure
poyn = []
absor = []
for d in ds:
layer, d_in_layer = find_in_structure_with_inf(d_list, d)
data = position_resolved(layer, d_in_layer, coh_tmm_data)
poyn.append(data['poyn'])
absor.append(data['absor'])
# convert data to numpy arrays for easy scaling in the plot
poyn = array(poyn)
absor = array(absor)
plt.figure()
plt.plot(ds, poyn, 'blue', ds, 200*absor, 'purple')
plt.xlabel('depth (nm)')
plt.ylabel('AU')
plt.title('Local absorption (purple), Poynting vector (blue)')
plt.show()
def sample5():
"""
Color calculations: What color is a air / thin SiO2 / Si wafer?
"""
if not colors_were_imported:
print('Colorpy was not detected (or perhaps an error occurred when',
'loading it). You cannot do color calculations, sorry!',
'Original version is at http://pypi.python.org/pypi/colorpy',
'A Python 3 compatible edit is at https://github.com/fish2000/ColorPy/')
return
# Crystalline silicon refractive index. Data from Palik via
# http://refractiveindex.info, I haven't checked it, but this is just for
# demonstration purposes anyway.
Si_n_data = [[400, 5.57 + 0.387j],
[450, 4.67 + 0.145j],
[500, 4.30 + 7.28e-2j],
[550, 4.08 + 4.06e-2j],
[600, 3.95 + 2.57e-2j],
[650, 3.85 + 1.64e-2j],
[700, 3.78 + 1.26e-2j]]
Si_n_data = array(Si_n_data)
Si_n_fn = interp1d(Si_n_data[:,0], Si_n_data[:,1], kind='linear')
# SiO2 refractive index (approximate): 1.46 regardless of wavelength
SiO2_n_fn = lambda wavelength : 1.46
# air refractive index
air_n_fn = lambda wavelength : 1
n_fn_list = [air_n_fn, SiO2_n_fn, Si_n_fn]
th_0 = 0
# Print the colors, and show plots, for the special case of 300nm-thick SiO2
d_list = [inf, 300, inf]
reflectances = color.calc_reflectances(n_fn_list, d_list, th_0)
illuminant = colorpy.illuminants.get_illuminant_D65()
spectrum = color.calc_spectrum(reflectances, illuminant)
color_dict = color.calc_color(spectrum)
print('air / 300nm SiO2 / Si --- rgb =', color_dict['rgb'], ', xyY =', color_dict['xyY'])
plt.figure()
color.plot_reflectances(reflectances,
title='air / 300nm SiO2 / Si -- '
'Fraction reflected at each wavelength')
plt.figure()
color.plot_spectrum(spectrum,
title='air / 300nm SiO2 / Si -- '
'Reflected spectrum under D65 illumination')
# Calculate irgb color (i.e. gamma-corrected sRGB display color rounded to
# integers 0-255) versus thickness of SiO2
max_SiO2_thickness = 600
SiO2_thickness_list = linspace(0, max_SiO2_thickness, num=80)
irgb_list = []
for SiO2_d in SiO2_thickness_list:
d_list = [inf, SiO2_d, inf]
reflectances = color.calc_reflectances(n_fn_list, d_list, th_0)
illuminant = colorpy.illuminants.get_illuminant_D65()
spectrum = color.calc_spectrum(reflectances, illuminant)
color_dict = color.calc_color(spectrum)
irgb_list.append(color_dict['irgb'])
# Plot those colors
print('Making color vs SiO2 thickness graph. Compare to (for example)')
print('http://www.htelabs.com/appnotes/sio2_color_chart_thermal_silicon_dioxide.htm')
plt.figure()
plt.plot([0, max_SiO2_thickness], [1, 1])
plt.xlim(0, max_SiO2_thickness)
plt.ylim(0, 1)
plt.xlabel('SiO2 thickness (nm)')
plt.yticks([])
plt.title('Air / SiO2 / Si color vs SiO2 thickness')
for i in range(len(SiO2_thickness_list)):
# One strip of each color, centered at x=SiO2_thickness_list[i]
if i == 0:
x0 = 0
else:
x0 = (SiO2_thickness_list[i] + SiO2_thickness_list[i-1]) / 2
if i == len(SiO2_thickness_list) - 1:
x1 = max_SiO2_thickness
else:
x1 = (SiO2_thickness_list[i] + SiO2_thickness_list[i+1]) / 2
y0 = 0
y1 = 1
poly_x = [x0, x1, x1, x0]
poly_y = [y0, y0, y1, y1]
color_string = colorpy.colormodels.irgb_string_from_irgb(irgb_list[i])
plt.fill(poly_x, poly_y, color_string, edgecolor=color_string)
plt.show()
def sample6():
"""
An example reflection plot with a surface plasmon resonance (SPR) dip.
Compare with http://doi.org/10.2320/matertrans.M2010003 ("Spectral and
Angular Responses of Surface Plasmon Resonance Based on the Kretschmann
Prism Configuration") Fig 6a
"""
# list of layer thicknesses in nm
d_list = [inf, 5, 30, inf]
# list of refractive indices
n_list = [1.517, 3.719+4.362j, 0.130+3.162j, 1]
# wavelength in nm
lam_vac = 633
# list of angles to plot
theta_list = linspace(30*degree, 60*degree, num=300)
# initialize lists of y-values to plot
Rp = []
for theta in theta_list:
Rp.append(coh_tmm('p', n_list, d_list, theta, lam_vac)['R'])
plt.figure()
plt.plot(theta_list/degree, Rp, 'blue')
plt.xlabel('theta (degree)')
plt.ylabel('Fraction reflected')
plt.xlim(30, 60)
plt.ylim(0, 1)
plt.title('Reflection of p-polarized light with Surface Plasmon Resonance\n'
'Compare with http://doi.org/10.2320/matertrans.M2010003 Fig 6a')
plt.show()
sample0()
|
|
"""
comments- not sure how to implement test_email_function
"""
import unittest
from django.contrib.auth.models import User, Group
from django.test import TestCase
from theme.models import UserProfile
from django.core.exceptions import ValidationError
from hs_core import hydroshare
from hs_dictionary.models import UncategorizedTerm
class CreateAccountTest(TestCase):
def setUp(self):
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
def test_basic_superuser(self):
username, first_name, last_name, password = 'shaunjl', 'shaun','joseph','mypass'
user = hydroshare.create_account(
'shaun@gmail.com',
username=username,
first_name=first_name,
last_name=last_name,
superuser=True,
password=password,
active=True
)
users_in_db = User.objects.all()
db_user = users_in_db[0]
self.assertEqual(user.email, db_user.email)
self.assertEqual(user.username, db_user.username)
self.assertEqual(user.first_name, db_user.first_name)
self.assertEqual(user.last_name, db_user.last_name)
self.assertEqual(user.password, db_user.password)
self.assertEqual(user.is_superuser, db_user.is_superuser)
self.assertEqual(user.is_active, db_user.is_active)
self.assertTrue(db_user.is_active)
self.assertTrue(user.is_active)
self.assertTrue(db_user.is_superuser)
self.assertTrue(user.is_superuser)
def test_basic_user(self):
username, first_name, last_name, password = 'shaunjl', 'shaun','joseph','mypass'
user = hydroshare.create_account(
'shaun@gmail.com',
username=username,
first_name=first_name,
last_name=last_name,
superuser=False,
password=password,
active=True
)
users_in_db = User.objects.all()
db_user = users_in_db[0]
self.assertEqual(user.email, db_user.email)
self.assertEqual(user.username, db_user.username)
self.assertEqual(user.first_name, db_user.first_name)
self.assertEqual(user.last_name, db_user.last_name)
self.assertEqual(user.password, db_user.password)
self.assertEqual(user.is_superuser, db_user.is_superuser)
self.assertEqual(user.is_active, db_user.is_active)
self.assertTrue(db_user.is_active)
self.assertTrue(user.is_active)
self.assertFalse(db_user.is_superuser)
self.assertFalse(user.is_superuser)
def test_with_groups(self):
groups = []
username, first_name, last_name, password = 'shaunjl', 'shaun', 'joseph', 'mypass'
user = hydroshare.create_account(
'shaun@gmail.com',
username=username,
first_name=first_name,
last_name=last_name,
groups=groups
)
g0 = user.uaccess.create_group(title='group0', description='This is group0')
g1 = user.uaccess.create_group(title='group1', description='This is group1')
g2 = user.uaccess.create_group(title='group2', description='This is group2')
# TODO from @alvacouch: no order assumption -> poor test.
user_groups = list(Group.objects.filter(g2ugp__user=user))
groups = [g0, g1, g2]
self.assertEqual(groups, user_groups)
def test_with_organizations(self):
organizations = ['org with, comma', 'another org', 'single']
organization = ';'.join(organizations)
username, first_name, last_name, password = 'shaunjl', 'shaun', 'joseph', 'mypass'
hydroshare.create_account(
'shaun@gmail.com',
username=username,
first_name=first_name,
last_name=last_name,
organization=organization
)
user = UserProfile.objects.filter(user__username='shaunjl').first()
self.assertEqual(user.organization, 'org with, comma;another org;single')
terms = UncategorizedTerm.objects.all()
self.assertEqual(3, terms.count())
for term in terms:
self.assertTrue(term.name in organizations)
def test_with_usertype_country_state(self):
username = 'testuser'
usertype = 'University Faculty'
country = 'United States'
state = 'NC'
hydroshare.create_account(
'testuser@gmail.com',
username=username,
first_name='Test',
last_name='User',
organization='TestOrg',
user_type=usertype,
country=country,
state=state
)
user = UserProfile.objects.filter(user__username=username).first()
self.assertEqual(user.user_type, usertype)
self.assertEqual(user.country, country)
self.assertEqual(user.state, state)
def test_case_in_username(self):
username, first_name, last_name, password = 'shaunjl', 'shaun','joseph','mypass'
user = hydroshare.create_account(
'shaun@gmail.com',
username=username,
first_name=first_name,
last_name=last_name,
superuser=False,
password=password,
active=True
)
users_in_db = User.objects.all()
db_user = users_in_db[0]
self.assertEqual(user.email, db_user.email)
self.assertEqual(user.username, db_user.username)
self.assertEqual(user.first_name, db_user.first_name)
self.assertEqual(user.last_name, db_user.last_name)
self.assertEqual(user.password, db_user.password)
self.assertEqual(user.is_superuser, db_user.is_superuser)
self.assertEqual(user.is_active, db_user.is_active)
self.assertTrue(db_user.is_active)
self.assertTrue(user.is_active)
self.assertFalse(db_user.is_superuser)
self.assertFalse(user.is_superuser)
username, first_name, last_name, password = 'sHaUnJl', 'shaun', 'joseph', 'mypass'
try:
user = hydroshare.create_account(
'other@gmail.com',
username=username,
first_name=first_name,
last_name=last_name,
superuser=False,
password=password,
active=True
)
self.fail("Should not be able to create an account with case insensitivie matching "
"usernames")
except ValidationError as v:
self.assertEqual("['User with provided username already exists.']", str(v))
pass
def test_case_in_email(self):
username, first_name, last_name, password = 'shaunjl', 'shaun','joseph','mypass'
user = hydroshare.create_account(
'shaun@gmail.com',
username=username,
first_name=first_name,
last_name=last_name,
superuser=False,
password=password,
active=True
)
users_in_db = User.objects.all()
db_user = users_in_db[0]
self.assertEqual(user.email, db_user.email)
self.assertEqual(user.username, db_user.username)
self.assertEqual(user.first_name, db_user.first_name)
self.assertEqual(user.last_name, db_user.last_name)
self.assertEqual(user.password, db_user.password)
self.assertEqual(user.is_superuser, db_user.is_superuser)
self.assertEqual(user.is_active, db_user.is_active)
self.assertTrue(db_user.is_active)
self.assertTrue(user.is_active)
self.assertFalse(db_user.is_superuser)
self.assertFalse(user.is_superuser)
username, first_name, last_name, password = 'other', 'shaun', 'joseph', 'mypass'
try:
user = hydroshare.create_account(
'ShAuN@gmail.com',
username=username,
first_name=first_name,
last_name=last_name,
superuser=False,
password=password,
active=True
)
self.fail("Should not be able to create an account with case insensitive matching "
"emails")
except ValidationError as v:
self.assertEqual("['User with provided email already exists.']", str(v))
pass
@unittest.skip
def test_email_function(self):
pass
|
|
import unittest
import jsmin
import sys
class JsTests(unittest.TestCase):
def _minify(self, js):
return jsmin.jsmin(js)
def assertEqual(self, thing1, thing2):
if thing1 != thing2:
print(repr(thing1), repr(thing2))
raise AssertionError
return True
def assertMinified(self, js_input, expected, **kwargs):
minified = jsmin.jsmin(js_input, **kwargs)
assert minified == expected, "%r != %r" % (minified, expected)
def testQuoted(self):
js = r'''
Object.extend(String, {
interpret: function(value) {
return value == null ? '' : String(value);
},
specialChar: {
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'\\': '\\\\'
}
});
'''
expected = r"""Object.extend(String,{interpret:function(value){return value==null?'':String(value);},specialChar:{'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','\\':'\\\\'}});"""
self.assertMinified(js, expected)
def testSingleComment(self):
js = r'''// use native browser JS 1.6 implementation if available
if (Object.isFunction(Array.prototype.forEach))
Array.prototype._each = Array.prototype.forEach;
if (!Array.prototype.indexOf) Array.prototype.indexOf = function(item, i) {
// hey there
function() {// testing comment
foo;
//something something
location = 'http://foo.com;'; // goodbye
}
//bye
'''
expected = r"""
if(Object.isFunction(Array.prototype.forEach))
Array.prototype._each=Array.prototype.forEach;if(!Array.prototype.indexOf)Array.prototype.indexOf=function(item,i){ function(){ foo; location='http://foo.com;';}"""
# print expected
self.assertMinified(js, expected)
def testEmpty(self):
self.assertMinified('', '')
self.assertMinified(' ', '')
self.assertMinified('\n', '')
self.assertMinified('\r\n', '')
self.assertMinified('\t', '')
def testMultiComment(self):
js = r"""
function foo() {
print('hey');
}
/*
if(this.options.zindex) {
this.originalZ = parseInt(Element.getStyle(this.element,'z-index') || 0);
this.element.style.zIndex = this.options.zindex;
}
*/
another thing;
"""
expected = r"""function foo(){print('hey');}
another thing;"""
self.assertMinified(js, expected)
def testLeadingComment(self):
js = r"""/* here is a comment at the top
it ends here */
function foo() {
alert('crud');
}
"""
expected = r"""function foo(){alert('crud');}"""
self.assertMinified(js, expected)
def testBlockCommentStartingWithSlash(self):
self.assertMinified('A; /*/ comment */ B', 'A;B')
def testBlockCommentEndingWithSlash(self):
self.assertMinified('A; /* comment /*/ B', 'A;B')
def testLeadingBlockCommentStartingWithSlash(self):
self.assertMinified('/*/ comment */ A', 'A')
def testLeadingBlockCommentEndingWithSlash(self):
self.assertMinified('/* comment /*/ A', 'A')
def testEmptyBlockComment(self):
self.assertMinified('/**/ A', 'A')
def testBlockCommentMultipleOpen(self):
self.assertMinified('/* A /* B */ C', 'C')
def testJustAComment(self):
self.assertMinified(' // a comment', '')
def test_issue_10(self):
js = '''
files = [{name: value.replace(/^.*\\\\/, '')}];
// comment
A
'''
expected = '''files=[{name:value.replace(/^.*\\\\/,'')}]; A'''
self.assertMinified(js, expected)
def testRe(self):
js = r'''
var str = this.replace(/\\./g, '@').replace(/"[^"\\\n\r]*"/g, '');
return (/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);
});'''
expected = r"""var str=this.replace(/\\./g,'@').replace(/"[^"\\\n\r]*"/g,'');return(/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);});"""
self.assertMinified(js, expected)
def testIgnoreComment(self):
js = r"""
var options_for_droppable = {
overlap: options.overlap,
containment: options.containment,
tree: options.tree,
hoverclass: options.hoverclass,
onHover: Sortable.onHover
}
var options_for_tree = {
onHover: Sortable.onEmptyHover,
overlap: options.overlap,
containment: options.containment,
hoverclass: options.hoverclass
}
// fix for gecko engine
Element.cleanWhitespace(element);
"""
expected = r"""var options_for_droppable={overlap:options.overlap,containment:options.containment,tree:options.tree,hoverclass:options.hoverclass,onHover:Sortable.onHover}
var options_for_tree={onHover:Sortable.onEmptyHover,overlap:options.overlap,containment:options.containment,hoverclass:options.hoverclass}
Element.cleanWhitespace(element);"""
self.assertMinified(js, expected)
def testHairyRe(self):
js = r"""
inspect: function(useDoubleQuotes) {
var escapedString = this.gsub(/[\x00-\x1f\\]/, function(match) {
var character = String.specialChar[match[0]];
return character ? character : '\\u00' + match[0].charCodeAt().toPaddedString(2, 16);
});
if (useDoubleQuotes) return '"' + escapedString.replace(/"/g, '\\"') + '"';
return "'" + escapedString.replace(/'/g, '\\\'') + "'";
},
toJSON: function() {
return this.inspect(true);
},
unfilterJSON: function(filter) {
return this.sub(filter || Prototype.JSONFilter, '#{1}');
},
"""
expected = r"""inspect:function(useDoubleQuotes){var escapedString=this.gsub(/[\x00-\x1f\\]/,function(match){var character=String.specialChar[match[0]];return character?character:'\\u00'+match[0].charCodeAt().toPaddedString(2,16);});if(useDoubleQuotes)return'"'+escapedString.replace(/"/g,'\\"')+'"';return"'"+escapedString.replace(/'/g,'\\\'')+"'";},toJSON:function(){return this.inspect(true);},unfilterJSON:function(filter){return this.sub(filter||Prototype.JSONFilter,'#{1}');},"""
self.assertMinified(js, expected)
def testLiteralRe(self):
js = r"""
myString.replace(/\\/g, '/');
console.log("hi");
"""
expected = r"""myString.replace(/\\/g,'/');console.log("hi");"""
self.assertMinified(js, expected)
js = r''' return /^data:image\//i.test(url) ||
/^(https?|ftp|file|about|chrome|resource):/.test(url);
'''
expected = r'''return /^data:image\//i.test(url)||/^(https?|ftp|file|about|chrome|resource):/.test(url);'''
self.assertMinified(js, expected)
def testNoBracesWithComment(self):
js = r"""
onSuccess: function(transport) {
var js = transport.responseText.strip();
if (!/^\[.*\]$/.test(js)) // TODO: improve sanity check
throw 'Server returned an invalid collection representation.';
this._collection = eval(js);
this.checkForExternalText();
}.bind(this),
onFailure: this.onFailure
});
"""
expected = r"""onSuccess:function(transport){var js=transport.responseText.strip();if(!/^\[.*\]$/.test(js))
throw'Server returned an invalid collection representation.';this._collection=eval(js);this.checkForExternalText();}.bind(this),onFailure:this.onFailure});"""
self.assertMinified(js, expected)
def testSpaceInRe(self):
js = r"""
num = num.replace(/ /g,'');
"""
self.assertMinified(js, "num=num.replace(/ /g,'');")
def testEmptyString(self):
js = r'''
function foo('') {
}
'''
self.assertMinified(js, "function foo(''){}")
def testDoubleSpace(self):
js = r'''
var foo = "hey";
'''
self.assertMinified(js, 'var foo="hey";')
def testLeadingRegex(self):
js = r'/[d]+/g '
self.assertMinified(js, js.strip())
def testLeadingString(self):
js = r"'a string in the middle of nowhere'; // and a comment"
self.assertMinified(js, "'a string in the middle of nowhere';")
def testSingleCommentEnd(self):
js = r'// a comment\n'
self.assertMinified(js, '')
def testInputStream(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
ins = StringIO(r'''
function foo('') {
}
''')
outs = StringIO()
m = jsmin.JavascriptMinify()
m.minify(ins, outs)
output = outs.getvalue()
assert output == "function foo(''){}"
def testUnicode(self):
instr = u'\u4000 //foo'
expected = u'\u4000'
output = jsmin.jsmin(instr)
self.assertEqual(output, expected)
def testCommentBeforeEOF(self):
self.assertMinified("//test\r\n", "")
def testCommentInObj(self):
self.assertMinified("""{
a: 1,//comment
}""", "{a:1,}")
def testCommentInObj2(self):
self.assertMinified("{a: 1//comment\r\n}", "{a:1\n}")
def testImplicitSemicolon(self):
# return \n 1 is equivalent with return; 1
# so best make sure jsmin retains the newline
self.assertMinified("return;//comment\r\na", "return;a")
def testImplicitSemicolon2(self):
self.assertMinified("return//comment...\r\na", "return\na")
def testSingleComment2(self):
self.assertMinified('x.replace(/\//, "_")// slash to underscore',
'x.replace(/\//,"_")')
def testSlashesNearComments(self):
original = '''
{ a: n / 2, }
// comment
'''
expected = '''{a:n/2,}'''
self.assertMinified(original, expected)
def testReturn(self):
original = '''
return foo;//comment
return bar;'''
expected = 'return foo; return bar;'
self.assertMinified(original, expected)
def test_space_plus(self):
original = '"s" + ++e + "s"'
expected = '"s"+ ++e+"s"'
self.assertMinified(original, expected)
def test_no_final_newline(self):
original = '"s"'
expected = '"s"'
self.assertMinified(original, expected)
def test_space_with_regex_repeats(self):
original = '/(NaN| {2}|^$)/.test(a)&&(a="M 0 0");'
self.assertMinified(original, original) # there should be nothing jsmin can do here
def test_space_with_regex_repeats_not_at_start(self):
original = 'aaa;/(NaN| {2}|^$)/.test(a)&&(a="M 0 0");'
self.assertMinified(original, original) # there should be nothing jsmin can do here
def test_space_in_regex(self):
original = '/a (a)/.test("a")'
self.assertMinified(original, original)
def test_angular_1(self):
original = '''var /** holds major version number for IE or NaN for real browsers */
msie,
jqLite, // delay binding since jQuery could be loaded after us.'''
minified = jsmin.jsmin(original)
self.assertTrue('var msie' in minified)
def test_angular_2(self):
original = 'var/* comment */msie;'
expected = 'var msie;'
self.assertMinified(original, expected)
def test_angular_3(self):
original = 'var /* comment */msie;'
expected = 'var msie;'
self.assertMinified(original, expected)
def test_angular_4(self):
original = 'var /* comment */ msie;'
expected = 'var msie;'
self.assertMinified(original, expected)
def test_angular_5(self):
original = 'a/b'
self.assertMinified(original, original)
def testBackticks(self):
original = '`test`'
self.assertMinified(original, original, quote_chars="'\"`")
original = '` test with leading whitespace`'
self.assertMinified(original, original, quote_chars="'\"`")
original = '`test with trailing whitespace `'
self.assertMinified(original, original, quote_chars="'\"`")
original = '''`test
with a new line`'''
self.assertMinified(original, original, quote_chars="'\"`")
original = '''dumpAvStats: function(stats) {
var statsString = "";
if (stats.mozAvSyncDelay) {
statsString += `A/V sync: ${stats.mozAvSyncDelay} ms `;
}
if (stats.mozJitterBufferDelay) {
statsString += `Jitter-buffer delay: ${stats.mozJitterBufferDelay} ms`;
}
return React.DOM.div(null, statsString);'''
expected = 'dumpAvStats:function(stats){var statsString="";if(stats.mozAvSyncDelay){statsString+=`A/V sync: ${stats.mozAvSyncDelay} ms `;}\nif(stats.mozJitterBufferDelay){statsString+=`Jitter-buffer delay: ${stats.mozJitterBufferDelay} ms`;}\nreturn React.DOM.div(null,statsString);'
self.assertMinified(original, expected, quote_chars="'\"`")
def testBackticksExpressions(self):
original = '`Fifteen is ${a + b} and not ${2 * a + b}.`'
self.assertMinified(original, original, quote_chars="'\"`")
original = '''`Fifteen is ${a +
b} and not ${2 * a + "b"}.`'''
self.assertMinified(original, original, quote_chars="'\"`")
def testBackticksTagged(self):
original = 'tag`Hello ${ a + b } world ${ a * b}`;'
self.assertMinified(original, original, quote_chars="'\"`")
if __name__ == '__main__':
unittest.main()
|
|
"""
Component to count within automations.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/counter/
"""
import asyncio
import logging
import os
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (ATTR_ENTITY_ID, CONF_ICON, CONF_NAME)
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import async_get_last_state
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
ATTR_INITIAL = 'initial'
ATTR_STEP = 'step'
CONF_INITIAL = 'initial'
CONF_STEP = 'step'
DEFAULT_INITIAL = 0
DEFAULT_STEP = 1
DOMAIN = 'counter'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SERVICE_DECREMENT = 'decrement'
SERVICE_INCREMENT = 'increment'
SERVICE_RESET = 'reset'
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: vol.Any({
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_INITIAL, default=DEFAULT_INITIAL):
cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STEP, default=DEFAULT_STEP): cv.positive_int,
}, None)
})
}, extra=vol.ALLOW_EXTRA)
@bind_hass
def increment(hass, entity_id):
"""Increment a counter."""
hass.add_job(async_increment, hass, entity_id)
@callback
@bind_hass
def async_increment(hass, entity_id):
"""Increment a counter."""
hass.async_add_job(hass.services.async_call(
DOMAIN, SERVICE_INCREMENT, {ATTR_ENTITY_ID: entity_id}))
@bind_hass
def decrement(hass, entity_id):
"""Decrement a counter."""
hass.add_job(async_decrement, hass, entity_id)
@callback
@bind_hass
def async_decrement(hass, entity_id):
"""Decrement a counter."""
hass.async_add_job(hass.services.async_call(
DOMAIN, SERVICE_DECREMENT, {ATTR_ENTITY_ID: entity_id}))
@bind_hass
def reset(hass, entity_id):
"""Reset a counter."""
hass.add_job(async_reset, hass, entity_id)
@callback
@bind_hass
def async_reset(hass, entity_id):
"""Reset a counter."""
hass.async_add_job(hass.services.async_call(
DOMAIN, SERVICE_RESET, {ATTR_ENTITY_ID: entity_id}))
@asyncio.coroutine
def async_setup(hass, config):
"""Set up a counter."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
if not cfg:
cfg = {}
name = cfg.get(CONF_NAME)
initial = cfg.get(CONF_INITIAL)
step = cfg.get(CONF_STEP)
icon = cfg.get(CONF_ICON)
entities.append(Counter(object_id, name, initial, step, icon))
if not entities:
return False
@asyncio.coroutine
def async_handler_service(service):
"""Handle a call to the counter services."""
target_counters = component.async_extract_from_service(service)
if service.service == SERVICE_INCREMENT:
attr = 'async_increment'
elif service.service == SERVICE_DECREMENT:
attr = 'async_decrement'
elif service.service == SERVICE_RESET:
attr = 'async_reset'
tasks = [getattr(counter, attr)() for counter in target_counters]
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml')
)
hass.services.async_register(
DOMAIN, SERVICE_INCREMENT, async_handler_service,
descriptions[SERVICE_INCREMENT], SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_DECREMENT, async_handler_service,
descriptions[SERVICE_DECREMENT], SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RESET, async_handler_service,
descriptions[SERVICE_RESET], SERVICE_SCHEMA)
yield from component.async_add_entities(entities)
return True
class Counter(Entity):
"""Representation of a counter."""
def __init__(self, object_id, name, initial, step, icon):
"""Initialize a counter."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._step = step
self._state = self._initial = initial
self._icon = icon
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return name of the counter."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the current value of the counter."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_INITIAL: self._initial,
ATTR_STEP: self._step,
}
@asyncio.coroutine
def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
# If not None, we got an initial value.
if self._state is not None:
return
state = yield from async_get_last_state(self.hass, self.entity_id)
self._state = state and state.state == state
@asyncio.coroutine
def async_decrement(self):
"""Decrement the counter."""
self._state -= self._step
yield from self.async_update_ha_state()
@asyncio.coroutine
def async_increment(self):
"""Increment a counter."""
self._state += self._step
yield from self.async_update_ha_state()
@asyncio.coroutine
def async_reset(self):
"""Reset a counter."""
self._state = self._initial
yield from self.async_update_ha_state()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods to run plink commands on the 1000genomes phase 3 dataset.
"""
import os as _os
import re as _re
import sys as _sys
import pickle as _pickle
from tempfile import mkstemp as _temp
from . import _run
# Set data directories
DATA_DIR = "/godot/1000genomes/1000GP_Phase3"
POPULATIONS = ["ALL", "AFR", "AMR", "EAS", "EUR", "SAS", "ACB", "ASW", "BEB",
"CDX", "CEU", "CHB", "CHS", "CLM", "ESN", "FIN", "GBR", "GIH",
"GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL",
"PJL", "PUR", "STU", "TSI", "YRI"]
__all__ = ["PLINK", "read_bim"]
###############################################################################
# Run plink jobs without repetative code #
###############################################################################
class PLINK(object):
"""A reusable object to run plink jobs on 1000genomes files quickly."""
written_files = {}
bims = {}
def __init__(self, data=None, pop_file=None, plink='plink'):
"""Load population information.
Parameters
----------
data : str, optional
Path the the 1000genomes data files
(e.g. ALL.chr16.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.{bed,bim})
Default set in DATA_DIR hardcoded in this file.
pop_file : str, optional
Path to the 1000 genomes population file
plink : str, optional
Path to plink executable, otherwise searches PATH
"""
self.plink = plink
data = data if data else DATA_DIR
if not _os.path.isdir(data):
raise ValueError('{} must be a directory, but no directory found'
.format(data))
self.path = _os.path.abspath(data)
if not pop_file:
pop_file = _os.path.join(
self.path, 'integrated_call_samples_v3.20130502.ALL.panel'
)
individuals = {}
with open(pop_file) as fin:
assert fin.readline() == 'sample\tpop\tsuper_pop\tgender\t\t\n'
for line in fin:
ind, pop, _, _ = line.split('\t')
if pop not in individuals:
individuals[pop] = []
individuals[pop].append(ind)
self.individuals = individuals
self.files = {}
for fl in _os.listdir(self.path):
if 'phase3_' not in fl:
continue
if not fl.endswith('bed'):
continue
if not fl.startswith('ALL'):
continue
chrom = fl.split('.')[1]
if not chrom.startswith('chr'):
continue
fl = _os.path.abspath(_os.path.join(self.path, fl))
root = '.'.join(fl.split('.')[:-1])
bed = _os.path.abspath('{}.bed'.format(root))
bim = _os.path.abspath('{}.bim'.format(root))
fam = _os.path.abspath('{}.fam'.format(root))
assert _os.path.isfile(bed)
assert _os.path.isfile(bim)
assert _os.path.isfile(fam)
c1 = chrom if chrom.startswith('chr') else 'chr' + str(chrom)
c2 = c1[3:]
for c in [c1, c2]:
self.files[c] = {
'root': root,
'bed': bed,
'bim': bim,
'fam': fam,
}
def pop_file(self, populations=None, outfile=None):
"""Write temp file with a list of individuals in population."""
populations = populations if populations else self.individuals.keys()
if isinstance(populations, str):
populations = [populations]
populations = list(populations)
if outfile and _os.path.isfile(outfile):
outfile = _os.path.abspath(outfile)
self.written_files[','.join(populations)] = outfile
return outfile
if ','.join(populations) in self.written_files:
fl = self.written_files[','.join(populations)]
if _os.path.isfile(fl):
return fl
pop_ids = []
bad_pops = []
for pop_i in populations:
if pop_i in self.individuals:
pop_ids += self.individuals[pop_i]
else:
bad_pops.append(pop_i)
if bad_pops:
err = (
"{} are not ancestral populations. Choose one of the following "
"ancestral populations: {}"
).format(bad_pops, POPULATIONS)
raise ValueError(err)
pop_ids = sorted(set(pop_ids))
if not outfile:
_, outfile = _temp(prefix='-'.join(populations), dir='/tmp')
with open(outfile, 'w') as fout:
fout.write('\n'.join(
[' '.join(x) for x in zip(pop_ids, pop_ids)]
))
self.written_files[','.join(populations)] = outfile
return outfile
def bim_file(self, chrom, snps, outfile=None):
"""Filter a bim file to only include SNPs in snps.
Parameters
----------
chrom : str
A chromosome name for the file to work on.
snps : list_of_tuple
A list of snps from list_to_rsid_and_locs():
(rsid, chrom, loc)
outfile : str, optional
A file to write to. A tempfile will be created if not is provided.
Returns
-------
file_path : str
Absolute path to the newly created file.
"""
if not chrom in self.files:
raise ValueError(
'{} is not in our bim files:\n{}'.format(
chrom,
'\n'.join([i[' bim'] for i in self.files.values()])
)
)
snps = [(str(name), int(loc)) for name, loc in snps]
if not outfile:
_, outfile = _temp(
prefix='filtered_bim.', suffix='.bim', dir='/tmp'
)
outfile = _os.path.abspath(outfile)
with _run.open_zipped(outfile, 'w') as fout:
for c, name, cm, pos, a1, a2 in read_bim(self.files[chrom]['bim']):
if (name, pos) in snps:
fout.write('\t'.join([c, name, cm, str(pos), a1, a2]) + '\n')
return outfile
def bim_snps(self, chrom, bim_file=None):
"""Return and cache all SNPs in a bim file.
Note: will only cache rsids in bim if no bim_file is given.
Parameters
----------
chrom : str
bim_file : str, optional
Bim file to work on, otherwise the chromosome is used to pick the
complete file.
Returns
-------
rsids : frozenset
"""
if not bim_file:
if chrom in self.bims:
with open(self.bims[chrom], 'rb') as fin:
return _pickle.load(fin)
try:
bim_file = self.files[chrom]['bim']
except KeyError:
raise KeyError('{} is not in the bim file list'.format(chrom))
_, pfl = _temp()
else:
pfl = None
rsids = []
with open(bim_file) as fin:
for line in fin:
if not line.strip():
continue
rsids.append(line.strip().split('\t')[1])
rsids = frozenset(rsids)
if pfl:
with open(pfl, 'wb') as fout:
_pickle.dump(rsids, fout)
self.bims[chrom] = pfl
return rsids
def many_to_many(self, snps, comp_list, chrom, r2=0.6, populations=None,
window_size=50000, outfile=None,
keep_int_files=False, raise_on_error=False,
logfile=_sys.stderr):
"""Get many-to-many LD information using plink.
Will do a single lookup for all SNPs in snps using plink on a filtered
bim file generated to only contain the SNPs in comp_list.
Parameters
----------
snps : list_of_tuple
list of rsIDs to query in the format:
(rsid, loc) (from pairs_to_lists())
comp_list : list_of_tuple
list of rsIDs to compare to in the format:
(rsid, loc) (from pairs_to_lists())
chrom : str
which chromosome to search
r2 : float, optional
r-squared level to use for filtering
populations : list_of_str, optional
list of populations to include in the analysis
window_size : int, optional
Integer number for window size, default 50kb.
outfile : str, optional
A file root for plink to write to, output will have '.ld' appended
to it. If not provided, temp file used and deleted after use.
keep_int_files : bool, optional
Do not delete intermediate SNP files
raise_on_error : bool, optional
if False, will return None if primary SNP missing from bim file
logfile : filehandle, optional
A file like object to write to
Returns
-------
matching_snps : dict
For every matching SNP that beats the r-squared: {
snp: {r2: r-squared, dprime: d-prime, phased: phased-alleles}
}
If plink job fails, returns an empty dictionary.
"""
window_size = _run.get_length(window_size)
if not snps:
logfile.write(
'ERROR: SNPS list for chrom {} empty, skipping.\n'.format(
chrom
)
)
return {}
if not comp_list:
logfile.write(
'ERROR: COMP list for chrom {} empty, skipping.\n'.format(
chrom
)
)
return {}
snps = set(snps)
comp_list = set(comp_list)
bsnps = self.bim_snps(chrom)
all_snps = [i for i, _ in snps | comp_list if i in bsnps]
good = []
bad = []
rsids = {}
for rsid, loc in snps:
if rsid in bsnps:
good.append(rsid)
rsids[rsid] = int(loc)
else:
bad.append(rsid)
if bad:
_sys.stderr.write(
'The following SNPs are not in the bim file and cannot be ' +
'queried:\n{}\n'.format(bad)
)
del(bsnps)
# Initialize necessary files
bfile = self.files[chrom]['root']
pop_file = self.pop_file(populations)
if outfile:
del_file = False
else:
_, outfile = _temp(prefix='plink', dir='/tmp')
_os.remove(outfile)
del_file = True
_, snp_file = _temp(prefix='plinksnps', dir='/tmp')
with open(snp_file, 'w') as fout:
fout.write('\n'.join(sorted([snp for snp, _ in snps])) + '\n')
_, comp_file = _temp(prefix='plinkcomp', dir='/tmp')
with open(comp_file, 'w') as fout:
fout.write('\n'.join(sorted(all_snps)) + '\n')
# Build the command
plink_cmnd = (
'{plink} --bfile {bfile} --r2 in-phase dprime '
'--ld-snp-list {snp_file} --extract {comp_file} '
'--ld-window {window} --keep {ind_file} --out {out}'
).format(
plink=self.plink, bfile=bfile, window=window_size,
snp_file=snp_file, comp_file=comp_file, ind_file=pop_file,
out=outfile
)
# Run it
stdout, stderr, code = _run.run(plink_cmnd, raise_on_error)
# Parse the output file
if code != 0:
logfile.write(
'{}: plink command failed'.format(chrom) +
'Command: {}\nExit Code: {}\nSTDOUT:\n{}\bSTDERR:\n{}\n'
.format(plink_cmnd, code, stdout, stderr)
)
return {}
results = {}
with open(outfile + '.ld') as fin:
# Check header
line = fin.readline().strip()
assert _re.split(r' +', line) == [
'CHR_A', 'BP_A', 'SNP_A', 'CHR_B', 'BP_B',
'SNP_B', 'PHASE', 'R2', 'DP'
]
for line in fin:
f = _re.split(r' +', line.strip())
snp1, snp2, phased = f[2], f[5], f[6]
loc1, loc2 = int(f[1]), int(f[4])
rsquared, dprime = float(f[7]), float(f[8])
if snp1 not in good:
continue
if snp1 == snp2:
continue
if loc1 != rsids[snp1]:
continue
if rsquared < r2:
continue
if snp1 not in results:
results[snp1] = {
'chrom': chrom, 'loc': loc1, 'matches': {}
}
try:
p1, p2 = phased.split('/')
s1a, s2a = p1
s1b, s2b = p2
lookup = {snp1: {s1a: s2a, s1b: s2b},
snp2: {s2a: s1a, s2b: s1b}}
except ValueError:
lookup = {}
results[snp1]['matches'][snp2] = {
'r2': rsquared, 'dprime': dprime, 'loc': loc2,
'phased': phased, 'lookup': lookup
}
if del_file:
for fl in [_os.path.join('/tmp', f) for f in _os.listdir('/tmp')]:
if fl.startswith(outfile):
_os.remove(fl)
if not keep_int_files:
_os.remove(snp_file)
_os.remove(comp_file)
return results
def one_to_many(self, snp, comp_list, chrom, r2=0.6, populations=None,
raise_on_error=False, logfile=_sys.stderr):
"""Get one-to-many LD information using plink.
Parameters
----------
snp : str
rsID of a SNP to query
comp_list : list_of_str
list of rsIDs to compare to
chrom : str
which chromosome to search
r2 : float, optional
r-squared level to use for filtering
populations : list_of_str, optional
list of populations to include in the analysis
raise_on_error : bool, optional
if False, will return None if primary SNP missing from bim file
logfile : filehandle, optional
A file like object to write to
Returns
-------
matching_snps : dict
For every matching SNP that beats the r-squared: {
snp: {r2: r-squared, dprime: d-prime, phased: phased-alleles}
}
If plink job fails, returns an empty dictionary.
"""
_, temp_file = _temp(prefix='plink', dir='/tmp')
pop_file = self.pop_file(populations)
bfile = _os.path.join(
DATA_DIR,
'ALL.{}.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes'
.format(chrom)
)
bim = bfile + '.bim'
# We need to include the query SNP in the lookup list
comp_list = list(comp_list)
comp_list.append(snp)
comp_list = sorted(set(comp_list))
# Filter SNPs not in the bim
bim_snps = self.bim_snps(chrom)
bad = []
if snp not in bim_snps:
err = ('Primary SNP {} is not in BIM {}, cannot continue.'
.format(snp, bim))
if raise_on_error:
raise BadSNPError(err)
else:
logfile.write(err + '\n')
return None
bad = []
for s in comp_list:
if s not in bim_snps:
bad.append(s)
comp_list.remove(s)
if bad:
_sys.stderr.write(('{} removed from comparison list as not in ' +
'bim file\n').format(bad))
del(bim_snps)
# Build the command
plink_cmnd = (
'{plink} --bfile {bfile} --r2 in-phase dprime --ld-snp {snp} '
'--snps {comp_list} --keep {ind_file} --out {tmp}'
).format(
plink=self.plink,
bfile=bfile,
snp=snp, comp_list=' '.join(comp_list),
ind_file=pop_file, tmp=temp_file
)
# Run it
stdout, stderr, code = _run.run(plink_cmnd, raise_on_error)
# Parse the output file
if code != 0:
logfile.write(
'{}: plink command failed'.format(snp) +
'Command: {}\nExit Code: {}\nSTDOUT:\n{}\bSTDERR:\n{}\n'
.format(plink_cmnd, code, stdout, stderr)
)
return {}
results = {}
with open(temp_file + '.ld') as fin:
# Check header
line = fin.readline().strip()
assert _re.split(r' +', line) == [
'CHR_A', 'BP_A', 'SNP_A', 'CHR_B', 'BP_B',
'SNP_B', 'PHASE', 'R2', 'DP'
]
for line in fin:
f = _re.split(r' +', line.strip())
snp2, phased = f[5], f[6]
rsquared, dprime = float(f[7]), float(f[8])
if snp2 == snp:
continue
if rsquared < r2:
continue
try:
p1, p2 = phased.split('/')
s1a, s2a = p1
s1b, s2b = p2
lookup = {snp: {s1a: s2a, s1b: s2b},
snp2: {s2a: s1a, s2b: s1b}}
except ValueError:
lookup = {}
results[snp2] = {'r2': rsquared, 'dprime': dprime,
'phased': phased, 'lookup': lookup}
_os.remove(temp_file + '.ld')
return results
###############################################################################
# Helper Functions #
###############################################################################
class MissingSNPError(Exception):
"""Exceception to catch missing SNPs."""
pass
class BadSNPError(Exception):
"""Exceception to catch missing SNPs."""
pass
def read_bim(bim_file):
"""Yields a tuple for each line in bim_file.
Parameters
----------
bim_file : str
Path to a bim file, can be zipped or an open file handle.
Yields
------
chromsome : str
name : str
cm : str
Position in centimorgans, usually 0
position : int
allele_1 : str
allele_2 : str
"""
with _run.open_zipped(bim_file) as fin:
for line in fin:
chrom, name, cm, loc, a1, a2 = line.split('\t')
yield chrom, name, cm, int(loc), a1, a2
|
|
from math import floor
import os
from osgeo import ogr
from osgeo import osr
from core import gdalProperties, ShapeDataError
ogr.UseExceptions()
def getfieldindex(layer, fieldname):
index = None
layerdefn = layer.GetLayerDefn()
for i in xrange(layerdefn.GetFieldCount()):
if layerdefn.GetFieldDefn(i).GetName == fieldname:
index = i
break
return index
def load_points(shapefile, outSpatialRef=None, fieldtoread=None):
"""
Returns a list of coordinate from points in an input shapefile.
Required Argument(s):
- shapefile: The path to a point-geometry shapefile
Optional Argument(s):
- None
Returns:
- points: A list of tuples with the x, y coords for each point in the input shapefile
"""
#TODO Clean up commenting
value = None # to prevent reference before assignment
if outSpatialRef:
inSpatialRef = get_ref_from_shapefile(shapefile)
coordTrans = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)
# Open shapeData
shapeData = ogr.Open(validateShapePath(shapefile))
# Validate shapeData
validateShapeData(shapeData)
# Get the first layer
layer = shapeData.GetLayer()
# if fieldtoread:
# fieldindex = getfieldindex(fieldtoread)
# if fieldindex is None:
# raise ShapeDataError("fieldtoread was not found in shape data.")
# Initialize
points = []
# For each point,
for index in xrange(layer.GetFeatureCount()):
# Get
feature = layer.GetFeature(index)
if fieldtoread:
value = feature.GetField(fieldtoread)
geometry = feature.GetGeometryRef()
if coordTrans:
geometry.Transform(coordTrans)
# Make sure that it is a point
if geometry.GetGeometryType() != ogr.wkbPoint:
raise ShapeDataError('This function only accepts point geometry.')
if value:
pointCoordinates = geometry.GetX(), geometry.GetY(), value
else:
pointCoordinates = geometry.GetX(), geometry.GetY()
points.append(pointCoordinates)
# Cleanup
feature.Destroy()
# Cleanup
shapeData.Destroy()
# Return
return points
def read_shapefile_to_points(shapefile, outSpatialRef=None):
shapeData = ogr.Open(validateShapePath(shapefile))
validateShapeData(shapeData)
layer = shapeData.GetLayer()
poly = layer.GetNextFeature()
geom = poly.GetGeometryRef()
if outSpatialRef:
inSpatialRef = get_ref_from_shapefile(shapefile)
coordTrans = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)
geom.Transform(coordTrans)
extent = geom.GetEnvelope()
points = geom.GetGeometryRef(0)
return extent, points.Clone()
def get_ref_from_shapefile(shapefile):
"""
Gets a spatial reference from an input shapefile.
Required Arguement(s):
- shapefile: The path to a shapefile
Optional Argument(s):
- None
Returns:
- spatialref: The spatial reference of then input shapefile in proj4 format
"""
# Open shapeData
shapeData = ogr.Open(validateShapePath(shapefile))
# Validate shapeData
validateShapeData(shapeData)
# Get the first layer
layer = shapeData.GetLayer()
# Get spatial reference as proj4
spatialref = layer.GetSpatialRef()
return spatialref
def getSpatialReferenceFromProj4(proj4):
"""Return GDAL spatial reference object from proj4 string"""
#TODO: docstrings
return osr.SpatialReference().ImportFromProj4(proj4)
def validateShapePath(shapePath):
"""Validate shapefile extension"""
#TODO: docstrings
return os.path.splitext(str(shapePath))[0] + '.shp'
def validateShapeData(shapeData):
"""Make sure we can access the shapefile"""
#TODO: docstrings
# Make sure the shapefile exists
if not shapeData:
raise ShapeDataError('The shapefile is invalid')
# Make sure there is exactly one layer
if shapeData.GetLayerCount() != 1:
raise ShapeDataError('The shapefile must have exactly one layer')
def check_spatial_refs(srs1, srs2):
if srs1 == srs2:
return False
else:
return True
def get_px_coords_from_geographic_coords(gdalPropertiesObject, pointcoords):
"""
"""
#TODO docstrings
image = gdalPropertiesObject
# get raster edge coords
left = image.geotransform[0]
top = image.geotransform[3]
right = image.cols * image.geotransform[1] + image.geotransform[0]
bottom = image.rows * image.geotransform[5] + image.geotransform[3]
# calc px coords for each set of point coords
pxcoords = []
for coords in pointcoords:
col = int(floor(image.cols * (coords[0] - left) / (right - left)))
row = int(floor(image.rows * (coords[1] - top) / (bottom - top)))
pxcoords.append((row, col))
return pxcoords
def get_geographic_coords_from_px_coords(gdalPropertiesObject, pxcoords):
"""
uses (row, col) format
"""
#TODO docstrings
image = gdalPropertiesObject
# get raster edge coords
left = image.geotransform[0]
top = image.geotransform[3]
horz_px_size = image.geotransform[1]
vert_px_size = image.geotransform[5]
# calc geo coords for each set of px coords
pointcoords = []
for coord in pxcoords:
x = left + coord[1] * horz_px_size
y = top + coord[0] * vert_px_size
pointcoords.append((x, y))
return pointcoords
def get_px_coords_from_shapefile(raster, shapefile):
"""
Takes geographic coordinates from a shapefile and finds the corresponding pixel coordinates on a raster.
rst = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/MODIS_KANSAS_2007-2012/reprojected/clips/KansasEVI_2012_clip1.tif"
#rst = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/polygonclip_20130929223024_325071991/resampled/newclips/2012clip1.tif"
shp = "/Users/phoetrymaster/Documents/School/Geography/Thesis/Data/MODIS_KANSAS_2007-2012/SampleAreas/samplepoints2012_clip1_new.shp"
print get_px_coords_from_shapefile(rst, shp)
"""
#TODO docstrings
from imageFunctions import openImage
# open, close image file and get properties
raster = openImage(raster)
imageproperties = gdalProperties(raster)
rasterwkt = raster.GetProjectionRef()
oSRSop = osr.SpatialReference()
oSRSop.ImportFromWkt(rasterwkt)
raster = None
shppoints = load_points(shapefile, oSRSop)
# get pixel coords from point coords
pxcoords = get_px_coords_from_geographic_coords(imageproperties, shppoints)
return pxcoords
|
|
from datetime import datetime
from django.urls import reverse
from django.test import RequestFactory
from mixer.backend.django import mixer
from members.views.ajax_views import *
import pytest
@pytest.fixture
def user_request(db):
user = mixer.blend(User)
request = RequestFactory().get("", {}, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
request.user = user
return request
@pytest.fixture
def search_request(db, request):
mixer.blend(Settings, membership_year=2018)
user = mixer.blend(User)
path = reverse("ajax-people")
request = RequestFactory().get(path, {"term": request.param}, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
request.user = user
return request
@pytest.fixture
def adults_with_sub(db):
for i in range(2):
mixer.blend(
Person,
first_name="Adult",
membership__is_adult=True,
membership__description="Full",
state=Person.ACTIVE,
sub__paid=True,
)
return
@pytest.fixture
def junior_with_sub(db):
person = mixer.blend(Person, first_name="Junior", membership__is_adult=False, state=Person.ACTIVE, sub__paid=True)
return person
@pytest.mark.parametrize("search_request", ["i s", "ian", "ste", "ist"], indirect=True)
def test_ajax_people_finds_person(search_request, db):
mixer.blend(Person, first_name="Ian", last_name="Stewart")
request = search_request
response = ajax_people(request)
assert response.status_code == 200
assert b"Ian Stewart" in response.content
@pytest.mark.parametrize("search_request", ["aaa", "iii"], indirect=True)
def test_ajax_people_bad_names(search_request, db):
mixer.blend(Person, first_name="Ian", last_name="Stewart")
request = search_request
response = ajax_people(request)
assert response.status_code == 200
assert b"Ian Stewart" not in response.content
def test_ajax_paid_adults_returns_adults(user_request, adults_with_sub, junior_with_sub):
request = user_request
request.path = reverse("ajax-adults")
response = ajax_adults(request)
assert response.status_code == 200
result = json.loads(response.content)
assert len(result) == 2
assert "Adult" in result[0]["value"]
assert "Adult" in result[1]["value"]
def test_ajax_person_returns_details(db):
person = mixer.blend(
Person,
first_name="Ian",
last_name="Stewart",
allow_phone=True,
allow_email=False,
address__home_phone="1234",
membership__description="Full",
)
request = RequestFactory().get(reverse("ajax-person"), {"id": person.id}, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
request.user = mixer.blend(User)
response = ajax_person(request)
assert response.status_code == 200
result = json.loads(response.content)
assert len(result) == 5
assert result["name"] == "Ian Stewart"
assert result["phone"] == "1234"
assert result["membership"] == "Full"
assert "not shared" in result["email"]
def test_ajax_password_pin_correct(db):
person = mixer.blend(Person)
person.set_pin(1234)
request = RequestFactory().post(
reverse("ajax-password"), {"person_id": person.id, "pin": 1234}, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = mixer.blend(User)
request.session = {}
response = ajax_password(request)
assert response.status_code == 200
result = json.loads(response.content)
assert result["authenticated"] == True
def test_ajax_password_pin_wrong(db):
person = mixer.blend(Person)
person.set_pin(4567)
request = RequestFactory().post(
reverse("ajax-password"), {"person_id": person.id, "pin": 1234}, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = mixer.blend(User)
request.session = {}
response = ajax_password(request)
assert response.status_code == 200
result = json.loads(response.content)
assert result["authenticated"] == False
def test_ajax_password_password_correct(db):
user = mixer.blend(User)
user.set_password("abc123")
user.save()
person = mixer.blend(Person, auth_id=user.id)
request = RequestFactory().post(
reverse("ajax-password"),
{"person_id": person.id, "password": "abc123"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
request.user = mixer.blend(User)
request.session = {}
response = ajax_password(request)
assert response.status_code == 200
result = json.loads(response.content)
assert result["authenticated"] == True
def test_ajax_password_password_wrong(db):
user = mixer.blend(User)
user.set_password("abc123")
user.save()
person = mixer.blend(Person, auth_id=user.id)
request = RequestFactory().post(
reverse("ajax-password"), {"person_id": person.id, "password": "abc"}, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = mixer.blend(User)
request.session = {}
response = ajax_password(request)
assert response.status_code == 200
result = json.loads(response.content)
assert result["authenticated"] == False
def test_ajax_dob_returns_dob(db, junior_with_sub):
person = junior_with_sub
person.dob = datetime(2010, 12, 25).date()
person.save()
request = RequestFactory().post(
reverse("ajax-dob"), {"person_id": person.id, "dob": "25/12/10"}, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = mixer.blend(User)
response = ajax_dob(request)
assert response.status_code == 200
assert response.content == b"OK"
def test_ajax_postcode_correct(db):
person = mixer.blend(Person, mobile_phone="1234", address__home_phone="5678", address__post_code="KT8 2LA")
request = RequestFactory().post(
reverse("ajax-postcode"),
{"person_id": person.id, "postcode": "kt82la", "phone": 1234},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
request.user = mixer.blend(User)
response = ajax_postcode(request)
assert response.status_code == 200
assert response.content == b"OK"
def test_ajax_postcode_wrongphone(db):
person = mixer.blend(Person, mobile_phone="1234", address__home_phone="5678", address__post_code="KT8 2LA")
request = RequestFactory().post(
reverse("ajax-postcode"),
{"person_id": person.id, "postcode": "kt82la", "phone": 9999},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
request.user = mixer.blend(User)
response = ajax_postcode(request)
assert response.status_code == 200
assert response.content != b"OK"
def test_ajax_set_pin_passes(db):
person = mixer.blend(Person, mobile_phone="1234", address__home_phone="5678", address__post_code="KT8 2LA")
request = RequestFactory().post(
reverse("ajax-set-pin"), {"person_id": person.id}, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = mixer.blend(User)
response = ajax_set_pin(request)
assert response.status_code == 200
assert response.content == b"Pin set"
def test_ajax_set_pin_fails_bad_person(db):
request = RequestFactory().post(
reverse("ajax-set-pin"), {"person_id": 9999}, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = mixer.blend(User)
response = ajax_set_pin(request)
assert response.status_code == 404
def test_ajax_task_status(db):
Settings.set_task_busy(True)
request = RequestFactory().post(
reverse("ajax-task-status"), {"person_id": 9999}, HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = mixer.blend(User)
response = ajax_task_status(request)
assert response.status_code == 200
result = json.loads(response.content)
assert result["busy"] == True
|
|
import unittest
from exprail.classifier import Classifier
from exprail.grammar import Grammar
from exprail.parser import Parser
from exprail.source import SourceString
class NumberClassifier(Classifier):
"""Classify number symbol sets"""
@staticmethod
def is_in_class(token_class, token):
"""
Distinguish digits, signs and the floating point.
:param token_class: 'empty', '0', '0-9', '1-9', '.', '+', '-'
:param token: the considered token
:return: True, when the token is in the class, else False
"""
if token.type == 'char':
if token_class == '0-9':
return token.value.isdigit()
elif token_class == '1-9':
return token.value in '123456789'
elif len(token_class) == 1:
return token.value == token_class
elif token_class == 'empty':
return False
else:
raise ValueError('Unhandled token class "{}"!'.format(token_class))
elif token.type == 'empty':
return token_class == 'empty'
else:
return False
class NumberParser(Parser):
"""Parse the input floating point number"""
def __init__(self, grammar, source):
super(NumberParser, self).__init__(grammar, source)
self._result = {}
@property
def result(self):
return self._result
def operate(self, operation, token):
"""Print the token value on print operation."""
if operation == 'negative':
self._result['sign'] = '-'
elif operation == 'non-negative':
self._result['sign'] = '+'
elif operation == 'save':
self._result['integer'] = ''.join(self._stacks['integer'])
if 'fraction' in self._stacks:
self._result['fraction'] = ''.join(self._stacks['fraction'])
if 'exponent' in self._stacks:
self._result['exponent'] = ''.join(self._stacks['exponent'])
else:
raise ValueError('The "{}" is an invalid operation!'.format(operation))
def show_error(self, message, token):
"""Show error in the parsing process."""
raise ValueError(message)
class NumberGrammarTest(unittest.TestCase):
"""Number grammar tests with examples"""
def test_empty_source(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'')
parser = NumberParser(grammar, source)
try:
parser.parse()
except ValueError as error:
self.assertEqual(str(error), 'Digit required!')
else:
self.fail('The expected ValueError has not raised!')
def test_leading_plus_sign(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'+1234')
parser = NumberParser(grammar, source)
try:
parser.parse()
except ValueError as error:
self.assertEqual(str(error), 'Unnecessary plus sign!')
else:
self.fail('The expected ValueError has not raised!')
def test_missing_digit(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'abc')
parser = NumberParser(grammar, source)
try:
parser.parse()
except ValueError as error:
self.assertEqual(str(error), 'Digit required!')
else:
self.fail('The expected ValueError has not raised!')
def test_zero(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'0')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '+',
'integer': '0'
}
self.assertEqual(parser.result, expected_result)
def test_positive_integer(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'1234')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '+',
'integer': '1234'
}
self.assertEqual(parser.result, expected_result)
def test_negative_integer(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'-1234')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '-',
'integer': '1234'
}
self.assertEqual(parser.result, expected_result)
def test_only_fraction(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'0.5678')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '+',
'integer': '0',
'fraction': '5678'
}
self.assertEqual(parser.result, expected_result)
def test_integer_and_fraction(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'1234.5678')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '+',
'integer': '1234',
'fraction': '5678'
}
self.assertEqual(parser.result, expected_result)
def test_missing_fraction(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'1234.e')
parser = NumberParser(grammar, source)
try:
parser.parse()
except ValueError as error:
self.assertEqual(str(error), 'Missing fraction!')
else:
self.fail('The expected ValueError has not raised!')
def test_only_exponent(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'0e100')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '+',
'integer': '0',
'exponent': '100'
}
self.assertEqual(parser.result, expected_result)
def test_negative_exponent(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'0E-100')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '+',
'integer': '0',
'exponent': '-100'
}
self.assertEqual(parser.result, expected_result)
def test_general_cases(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'1234.5678e9')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '+',
'integer': '1234',
'fraction': '5678',
'exponent': '9'
}
self.assertEqual(parser.result, expected_result)
def test_negated_cases(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'-1234.5678e-9')
parser = NumberParser(grammar, source)
parser.parse()
expected_result = {
'sign': '-',
'integer': '1234',
'fraction': '5678',
'exponent': '-9'
}
self.assertEqual(parser.result, expected_result)
def test_invalid_format(self):
number_classifier = NumberClassifier()
grammar = Grammar(filename='grammars/number.grammar', classifier=number_classifier)
source = SourceString(r'-1234.5678e-9a')
parser = NumberParser(grammar, source)
try:
parser.parse()
except ValueError as error:
self.assertEqual(str(error), 'Invalid number format!')
else:
self.fail('The expected ValueError has not raised!')
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All rights reserved.
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
import re
import math
import numpy as np
# ------------------------------------------------------------------ Matrix ---
class Matrix(object):
def __init__(self, a=1, b=0, c=0, d=1, e=0, f=0):
self._matrix = np.array([[a, c, e],
[b, d, f],
[0, 0, 1]], dtype=float)
@property
def matrix(self):
return self._matrix
def __array__(self, *args):
return self._matrix
def __repr__(self):
a, c, e = self._matrix[0]
b, d, f = self._matrix[1]
return "Matrix(%g,%g,%g,%g,%g,%g)" % (a, b, c, d, e, f)
# ---------------------------------------------------------------- Identity ---
class Identity(Matrix):
def __init__(self):
Matrix.__init__(self)
self._matrix[...] = ([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
def __repr__(self):
return "Identity()"
# --------------------------------------------------------------- Translate ---
class Translate(Matrix):
"""
Translation is equivalent to the matrix [1 0 0 1 tx ty], where tx and ty
are the distances to translate coordinates in X and Y, respectively.
"""
def __init__(self, x, y=0):
Matrix.__init__(self)
self._x, self._y = x, y
self._matrix[...] = ([[1, 0, x],
[0, 1, y],
[0, 0, 1]])
def __repr__(self):
return "Translate(%g,%g)" % (self._x, self._y)
# ------------------------------------------------------------------- Scale ---
class Scale(Matrix):
"""
Scaling is equivalent to the matrix [sx 0 0 sy 0 0]. One unit in the X and
Y directions in the new coordinate system equals sx and sy units in the
previous coordinate system, respectively.
"""
def __init__(self, x, y=0):
Matrix.__init__(self)
self._x = x
self._y = y or x
self._matrix[...] = ([[x, 0, 0],
[0, y, 0],
[0, 0, 1]])
def __repr__(self):
return "Scale(%g,%g)" % (self._x, self._y)
# ------------------------------------------------------------------- Scale ---
class Rotate(Matrix):
"""
Rotation about the origin is equivalent to the matrix [cos(a) sin(a)
-sin(a) cos(a) 0 0], which has the effect of rotating the coordinate system
axes by angle a.
"""
def __init__(self, angle, x=0, y=0):
Matrix.__init__(self)
self._angle = angle
self._x = x
self._y = y
angle = math.pi * angle / 180.0
rotate = np.array([[math.cos(angle), -math.sin(angle), 0],
[math.sin(angle), math.cos(angle), 0],
[0, 0, 1]], dtype=float)
forward = np.array([[1, 0, x],
[0, 1, y],
[0, 0, 1]], dtype=float)
inverse = np.array([[1, 0, -x],
[0, 1, -y],
[0, 0, 1]], dtype=float)
self._matrix = np.dot(inverse, np.dot(rotate, forward))
def __repr__(self):
return "Rotate(%g,%g,%g)" % (self._angle, self._x, self._y)
# ------------------------------------------------------------------- SkewX ---
class SkewX(Matrix):
"""
A skew transformation along the x-axis is equivalent to the matrix [1 0
tan(a) 1 0 0], which has the effect of skewing X coordinates by angle a.
"""
def __init__(self, angle):
Matrix.__init__(self)
self._angle = angle
angle = math.pi * angle / 180.0
self._matrix[...] = ([[1, math.tan(angle), 0],
[0, 1, 0],
[0, 0, 1]])
def __repr__(self):
return "SkewX(%g)" % (self._angle)
# ------------------------------------------------------------------- SkewY ---
class SkewY(Matrix):
"""
A skew transformation along the y-axis is equivalent to the matrix [1
tan(a) 0 1 0 0], which has the effect of skewing Y coordinates by angle a.
"""
def __init__(self, angle):
Matrix.__init__(self)
self._angle = angle
angle = math.pi * angle / 180.0
self._matrix[...] = ([[1, 0, 0],
[math.tan(angle), 1, 0],
[0, 0, 1]])
def __repr__(self):
return "SkewY(%g)" % (self._angle)
# --------------------------------------------------------------- Transform ---
class Transform(object):
"""
A Transform is defined as a list of transform definitions, which are
applied in the order provided. The individual transform definitions are
separated by whitespace and/or a comma.
"""
def __init__(self, content=""):
self._transforms = []
if not content:
return
converters = {"matrix": Matrix,
"scale": Scale,
"rotate": Rotate,
"translate": Translate,
"skewx": SkewX,
"skewy": SkewY}
keys = "|".join(converters.keys())
pattern = r"(?P<name>%s)\s*\((?P<args>[^)]*)\)" % keys
for match in re.finditer(pattern, content):
name = match.group("name").strip()
args = match.group("args").strip().replace(',', ' ')
args = [float(value) for value in args.split()]
transform = converters[name](*args)
self._transforms.append(transform)
def __add__(self, other):
T = Transform()
T._transforms.extend(self._transforms)
T._transforms.extend(other._transforms)
return T
def __radd__(self, other):
self._transforms.extend(other._transforms)
return self
@property
def matrix(self):
M = np.eye(3)
for transform in self._transforms:
M = np.dot(M, transform)
return M
def __array__(self, *args):
return self._matrix
def __repr__(self):
s = ""
for i in range(len(self._transforms)):
s += repr(self._transforms[i])
if i < len(self._transforms) - 1:
s += ", "
return s
@property
def xml(self):
return self._xml()
def _xml(self, prefix=""):
identity = True
for transform in self._transforms:
if not isinstance(transform, Identity):
identity = False
break
if identity:
return ""
return 'transform="%s" ' % repr(self)
|
|
# -*- coding: utf-8 -*-
from os import path
from gluon import current
from gluon.html import *
from gluon.storage import Storage
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
T = current.T
response = current.response
response.title = current.deployment_settings.get_system_name()
view = path.join(current.request.folder, "private", "templates",
"IFRC", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
script = '''
$('.marker').mouseover(function(){
$(this).children('.marker-window').show();
})
$('.marker').mouseout(function(){
$(this).children('.marker-window').hide();
})'''
response.s3.jquery_ready.append(script)
markers = [
Storage(name = "Afghan Red Crescent Society",
direction = "right",
top = 109,
left = 271),
Storage(name = "Australian Red Cross",
direction = "right",
top = 349,
left = 478),
Storage(name = "Bangladesh Red Crescent Society",
direction = "right",
top = 142,
left = 326),
Storage(name = "Brunei Darussalam Red Crescent Society",
direction = "right",
top = 205,
left = 402),
Storage(name = "Cambodian Red Cross Society",
direction = "right",
top = 181,
left = 374),
Storage(name = "Cook Islands Red Cross",
direction = "right",
top = 291,
left = 652),
Storage(name = "Fiji Red Cross Society",
direction = "right",
top = 278,
left = 590),
Storage(name = "Hong Kong Red Cross Society",
direction = "right",
top = 146,
left = 398),
Storage(name = "Indian Red Cross Society",
direction = "right",
top = 129,
left = 287),
Storage(name = "Indonesian Red Cross Society",
direction = "right",
top = 235,
left = 378),
Storage(name = "Japanese Red Cross Society",
direction = "right",
top = 94,
left = 463),
Storage(name = "Kiribati Red Cross Society",
direction = "left",
top = 214,
left = 564),
Storage(name = "Lao Red Cross Society",
direction = "right",
top = 159,
left = 366),
Storage(name = "Malaysian Red Crescent Society",
direction = "right",
top = 207,
left = 367),
Storage(name = "Maldivian Red Crescent",
direction = "right",
top = 205,
left = 278),
Storage(name = "Marshall Islands Red Cross Society",
direction = "left",
top = 200,
left = 561),
Storage(name = "Micronesia Red Cross Society",
direction = "left",
top = 200,
left = 532),
Storage(name = "Mongolian Red Cross Society",
direction = "right",
top = 54,
left = 372),
Storage(name = "Myanmar Red Cross Society",
direction = "right",
top = 165,
left = 349),
Storage(name = "Nepal Red Cross Society",
direction = "right",
top = 133,
left = 308),
Storage(name = "New Zealand Red Cross",
direction = "right",
top = 368,
left = 562),
Storage(name = "Pakistan Red Crescent Society",
direction = "right",
top = 115,
left = 278),
Storage(name = "Palau Red Cross Society",
direction = "right",
top = 197,
left = 463),
Storage(name = "Papua New Guinea Red Cross Society",
direction = "right",
top = 247,
left = 504),
Storage(name = "Philippine National Red Cross",
direction = "right",
top = 170,
left = 421),
Storage(name = "Red Cross of Viet Nam",
direction = "right",
top = 150,
left = 373),
Storage(name = "Red Cross Society of China",
direction = "right",
top = 81,
left = 399),
Storage(name = "Red Cross Society of the Democratic People's Republic of Korea",
direction = "right",
top = 82,
left = 423),
Storage(name = "Republic of Korea National Red Cross",
direction = "right",
top = 87,
left = 426),
Storage(name = "Samoa Red Cross Society",
direction = "left",
top = 261,
left = 621),
Storage(name = "Singapore Red Cross Society",
direction = "right",
top = 214,
left = 376),
Storage(name = "Solomon Islands Red Cross",
direction = "right",
top = 247,
left = 537),
Storage(name = "Sri Lanka Red Cross Society",
direction = "right",
top = 197,
left = 303),
Storage(name = "Thai Red Cross Society",
direction = "right",
top = 172,
left = 360),
Storage(name = "Timor-Leste Red Cross Society",
direction = "right",
top = 245,
left = 435),
Storage(name = "Tonga Red Cross Society",
direction = "right",
top = 291,
left = 563),
Storage(name = "Tuvalu Red Cross Society",
direction = "right",
top = 245,
left = 591),
Storage(name = "Vanuatu Red Cross Society",
direction = "right",
top = 276,
left = 559),
]
map = DIV(A(T("Go to Functional Map"),
_href=URL(c="gis", f="index"),
_class="map-click"),
_id="map-home")
append = map.append
for marker in markers:
append(DIV(A("",
_href=URL(c="org", f="organisation", args="read",
vars={"organisation.name": marker.name})),
DIV(SPAN(marker.name),
SPAN(_class="marker-plus"),
_class="marker-window %s" % marker.direction),
_class="marker",
_style="top:%ipx;left:%ipx;" % (marker.top,
marker.left)))
append(DIV(SPAN(T("Click anywhere on the map for full functionality")),
_class="map-tip"))
current.menu.breadcrumbs = None
return dict(map=map)
# END =========================================================================
|
|
import sys
import zmq
import pymongo
import os
import threading
import logging
from logging.handlers import RotatingFileHandler
#from logging.config import dictConfig
from bson import ObjectId
from m4ed.util.settings import parse_asset_settings
from m4ed.util.image import ImageProcessor
try:
import configparser
except ImportError: # pragma: no cover
import ConfigParser as configparser
log = logging.getLogger(__name__)
def test_logger(number, frame):
log.info('HUP Received! LOGGING IT! {} {}'.format(number, repr(frame)))
def main(config_ini):
print 'Starting up main'
_settings = configparser.SafeConfigParser()
with open(config_ini) as fp:
_settings.readfp(fp)
settings = dict(_settings.items('app:main'))
#formatting = dict(_settings.items('formatter_generic'))
logger_settings = dict(_settings.items('logger_uploader'))
settings.update(parse_asset_settings(settings, dev_ini_path=config_ini))
fh = RotatingFileHandler(
logger_settings.get('filename', 'uploader.log'),
maxBytes=int(logger_settings.get('maxBytes', 10000)),
encoding='utf-8'
)
lvl = logger_settings.get('level', 'DEBUG').upper()
lvl = logging.__getattribute__(lvl)
fh.setLevel(lvl)
formatter = logging.Formatter('%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)
log.setLevel(lvl)
url_worker = settings['zmq.worker_socket']
url_client = settings['zmq.socket']
context = zmq.Context(1)
# The 'frontend' facing the clients where new jobs are being sent
clients = context.socket(zmq.PULL)
clients.bind(url_client)
# The 'backend' facing the workers where received jobs are being pushed
workers = context.socket(zmq.PUSH)
workers.bind(url_worker)
conn = pymongo.Connection(
host=settings['db.mongo.host'],
port=int(settings['db.mongo.port'])
)
db = conn[settings['db.mongo.collection_name']]
cloud = None
if not settings['store_locally']:
log.info('Initializing cloud connection...')
cloud = settings['service'](**settings)
cloud.connect()
save_path = settings['save_path']
for i in range(int(settings['zmq.workers'])):
worker = UploadWorker(
name='[worker-thread-{}]'.format(i),
imager=ImageProcessor(
db=db,
image_save_path=settings['save_path']
),
context=context,
worker_url=url_worker,
db=db,
cloud=cloud,
save_path=save_path
)
worker.start()
try:
log.info('Starting zmq streamer')
log.info('Now waiting for jobs...')
zmq.device(zmq.STREAMER, clients, workers)
except KeyboardInterrupt:
pass
# We never get here... but if we do, shut down!
log.info('Shutting down...')
clients.close()
workers.close()
context.term()
class UploadWorker(threading.Thread):
def __init__(self, name, imager, context, worker_url, db, save_path, cloud=None):
threading.Thread.__init__(self)
#self.name = name
self.imager = imager
self.db = db
self.cloud = cloud
self.socket = context.socket(zmq.PULL)
# Setting linger to 0 kills the socket right away when
# it's closed and context is terminated
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.connect(worker_url)
self.save_path = save_path
log.info('{} spawned!'.format(self.name))
# def _print(self, msg):
# log.info('{} - {}'.format(self.name, msg))
def run(self):
while True:
try:
s = self.socket.recv_string()
log.info('Received a job!')
except zmq.ZMQError:
self.socket.close()
break
log.info('Handling the job...')
log.info(s)
args = s.split(':')
getattr(self, args[0])(args[1])
def save(self, file_path, **kw):
resource_uri = kw.pop('resource_uri', '/static/tmp')
try:
r = self.imager.process(file_path)
except [IOError, TypeError]:
# When the image format isn't supported by pil an IOError is raised
# TypeError is raised when the image doesn't match our whitelist
# return {'result': 'error', 'why': 'image format not supported'}
return
finally:
os.unlink(file_path)
# TODO: Might be unnecessary?
if len(r) == 0:
return
#return {'result': 'error', 'why': 'filetype not allowed'}
data = dict(
id=r.get('id'),
size=r.get('size'),
name=r.get('name'),
delete_url='/api/assets/{}'.format(r.get('id')),
delete_type='DELETE',
type=r.get('type'),
format=r.get('format'),
desc='This is an image',
status='local'
)
frames = r.get('frames')
if frames:
data['frames'] = frames
if self.cloud:
_id = self.db.assets.insert(data, safe=True)
self._cloud_save(str(_id))
else:
dir_name = r.get('name').rsplit('.', 1)[0]
#api_url = '/api/assets/{}'.format(r.get('id'))
#if resource_uri != '':
resource_uri = self.save_path
log.info('Saving the image to ' + resource_uri)
data['url'] = (resource_uri +
'/{directory}/{full}').format(
directory=dir_name,
full=r.get('full')
)
data['thumbnail_url'] = (resource_uri +
'/{directory}/{thumb}').format(
directory=dir_name,
thumb=r.get('thumb')
)
self.db.assets.insert(data, safe=True)
def _cloud_save(self, _id):
asset = self.db.assets.find_and_modify(
query={'_id': ObjectId(_id), 'status': 'local'},
update={'$set': {'status': 'processing'}}
)
# If there is no asset returned, assume it has been deleted
# before it entered the save queue
if not asset:
return
log.info(asset)
name = asset.get('name')
directory = asset.get('id')
_type = asset.get('type')
format = asset.get('format')
thumb_name = '_s.'.join(name.rsplit('.', 1))
full_save_path = os.path.join(
self.save_path, directory, name
)
thumb_save_path = os.path.join(
self.save_path, directory, thumb_name
)
url = self.cloud.save(
path=full_save_path,
_type=_type,
format=format
)
os.unlink(full_save_path)
thumb_url = self.cloud.save(
path=thumb_save_path,
_type=_type,
format=format
)
os.unlink(thumb_save_path)
if _type == 'anim':
filename, file_extension = name.rsplit('.', 1)
for i in range(1, asset.get('frames')):
p = os.path.join(
self.save_path, directory, '{filename}_{index}.{extension}'.format(
index=i, filename=filename, extension=file_extension)
)
self.cloud.save(
path=p,
_type=_type,
format=format
)
os.unlink(p)
os.rmdir(os.path.join(self.save_path, directory))
result = self.db.assets.find_and_modify(
query={'_id': ObjectId(_id)},
update={'$set': {
'url': url,
'thumbnail_url': thumb_url,
'status': 'cloud'
}},
safe=True
)
# If for some strange reason the object is not returned from mongo
# assume it has been deleted by someone while it was uploading.
# 'Revert' the changes by deleting the files from cloud.
if not result:
self._cloud_delete(name)
def delete(self, _id):
asset = self.db.assets.find_and_modify(
query={'_id': ObjectId(_id)},
remove=True,
safe=True
)
log.info('Proceeding to delete ' + str(asset))
if asset.get('type') == 'anim' and asset.get('status') == 'cloud':
anim_frames = asset.get('frames')
self._cloud_delete(asset.get('name'), anim_frames)
def _cloud_delete(self, name, anim_frames=0):
self.cloud.delete(name)
# A hack to derive the thumb name from full name
filename, file_extension = name.rsplit('.', 1)
thumb_name = '{filename}_s.{file_extension}'.format(
filename=filename, file_extension=file_extension)
self.cloud.delete(thumb_name)
for i in range(1, anim_frames):
self.cloud.delete(
'{filename}_{index}.{extension}'.format(
filename=filename, index=i, extension=file_extension
)
)
if __name__ == '__main__':
main(sys.argv[1])
|
|
import bottom
import datetime
import asyncio
from .backend import Backend
from .models import Event, Channel, User, ChannelMessage, UserMessage
def parse_prefixnick(prefixnick):
possible_prefix = prefixnick[0]
if possible_prefix in "+@":
return possible_prefix, prefixnick[1:]
return "", prefixnick
class IRCBackend(Backend):
type = "irc"
"""
It would be nice if we could somehow confirm our nickname back
from the server. It's in each reply but 'bottom' filters it from
the events
"""
def setup(self, nick, channels, host, port=6667,
ssl=True, **rest):
self.nick = nick
self.autojoin_channels = channels
self.ircserver = host
self.ircport = port
self.ssl = ssl
self.realname = "Hey there"
self.client = None
self.firstconnect = False
# local bookkeeping. Assumed to be generic -> Backend?
self.users = {} # ordered dict to preserve irc order?
self.channels = {}
def save(self):
data = super().save()
data['options'].update({
"nick": self.nick,
"host": self.ircserver,
"port": self.ircport,
"ssl": self.ssl,
"channels": [
c.name for c in self.channels.values()
]
})
return data
def isme(self, nick):
return nick.lower() == self.nick.lower()
async def run(self):
self.client = bottom.Client(host=self.ircserver, port=self.ircport,
ssl=self.ssl)
self.client.on('CLIENT_CONNECT', self.handle_connect)
self.client.on('CLIENT_DISCONNECT', self.handle_disconnect)
self.client.on('RPL_WELCOME', self.handle_welcome)
self.client.on('RPL_MYINFO', self.handle_myinfo)
self.client.on('PING', self.handle_ping)
self.client.on('PRIVMSG', self.handle_privmsg)
self.client.on('JOIN', self.handle_join)
self.client.on('PART', self.handle_part)
self.client.on('KICK', self.handle_kick)
self.client.on('QUIT', self.handle_quit)
self.client.on('NICK', self.handle_nick)
self.client.on('RPL_NAMREPLY', self.handle_namreply)
await self.client.connect()
async def handle_connect(self, **kwargs):
print("CONNECT", kwargs)
await self.enqueue(Event.CONNECT)
self.client.send('NICK', nick=self.nick)
self.client.send('USER', user=self.nick,
realname=self.realname)
# The first connection will join the configured channels,
# later connections (e.g. after disconnect+reconnect) will
# join active channels
if not self.firstconnect:
self.firstconnect = True
for channel in self.autojoin_channels:
self.client.send('JOIN', channel=channel)
else:
for channel in self.channels.values():
channel.reset()
self.client.send('JOIN', channel=channel.name, key=channel.key)
def reset(self):
""" reset state. E.g. after disconnect """
self.users = {}
async def handle_disconnect(self, **kwargs):
print("Disconnect")
self.reset()
await self.enqueue(Event.DISCONNECT)
await asyncio.sleep(3)
print("Attempt reconnect")
await self.client.connect()
await self.client.wait("client_connect")
print("Connected again!")
async def handle_nick(self, nick, user, newnick, host, **args):
isme = self.isme(nick)
if isme:
self.nick = newnick
user = self.get_create_user(nick)
self.change_nick(user, newnick)
for channel in self.channels.values():
if user not in channel.users:
continue
await self.enqueue(Event.NICKCHANGE, channel=channel,
user=user, isme=isme)
async def handle_namreply(self, channel, names, **kwargs):
channel = self.get_create_channel(channel)
for name in names:
# a name is a nick, optionally prefixed with '+' or '@'.
# available prefixes may have been repored through a 005
# reply when connecting
prefix, nick = parse_prefixnick(name)
# define a IRCUser that knows about op-ed and voiced users
user = self.get_create_user(nick)
channel.add_user(user)
await self.enqueue(Event.USERJOIN, channel=channel, user=user,
isme=self.isme(nick))
async def enqueue(self, type, **args):
await self.queue.put(Event(type, self, **args))
async def handle_part(self, nick, user, channel, host, **kwargs):
# XXX Was it me? -> update channel state
user = self.get_create_user(nick)
channel = self.get_create_channel(channel)
channel.remove_user(user)
await self.enqueue(Event.USERPART, isme=self.isme(nick),
channel=channel, user=user)
async def handle_kick(self, nick, user, channel, target, host, message,
**kwargs):
# XXX Was it me? -> update channel state
user = self.get_create_user(nick)
channel = self.get_create_channel(channel)
channel.remove_user(user)
await self.enqueue(Event.USERPART, isme=self.isme(nick),
channel=channel, user=user)
async def handle_quit(self, nick, host, user, message, **kwargs):
""" A quit is not related to a specific channel, it can apply
to all """
user = self.get_create_user(nick)
for channel in self.channels.values():
if channel.remove_user(user):
await self.enqueue(Event.USERPART, channel=channel, user=user,
isme=self.isme(nick))
def handle_welcome(self, **kwargs):
print("WELCOME", kwargs)
def handle_myinfo(self, **kwargs):
print("MYINFO", kwargs)
def handle_ping(self, message, **kwargs):
self.client.send('PONG', message=message)
async def handle_join(self, nick, user, channel, **kwargs):
print("JOIN {0} {1} {2} {3}".format(nick, user, channel, kwargs))
channel = self.get_create_channel(channel)
user = self.get_create_user(nick)
if self.isme(nick):
print("JOIN it appears I joined {0}".format(channel))
await self.enqueue(Event.CREATE_CHANNEL, channel=channel)
channel.add_user(user)
await self.enqueue(Event.USERJOIN, channel=channel, user=user,
isme=self.isme(nick))
def change_nick(self, user, newnick):
""" update the local user registry and the user itself """
self.users.pop(user.name.lower())
user.name = newnick
self.users[user.name.lower()] = user
def get_create_user(self, name):
"""
'name' (nick) can change which means updating
the dictionary. We may need to consider indexing
by id in stead?
"""
u = self.users.get(name.lower())
if u is None:
u = User(name)
self.users[name.lower()] = u
return u
def get_user_by_id(self, id):
for u in self.users.values():
if u.id == id:
return u
return None
def get_create_channel(self, name):
c = self.channels.get(name.lower())
if c is None:
c = Channel(name)
self.channels[name.lower()] = c
return c
async def handle_privmsg(self, nick, target, message, **kwargs):
print("PRIVMSG", nick, target, message)
user = self.get_create_user(nick)
channel = self.get_create_channel(target) # can also be user!
channel._dump_users()
print(str(user))
await self.enqueue(Event.CHANNEL_MESSAGE,
message=ChannelMessage(
message=message,
target=channel,
user=user
))
# client commands
def find_channel_by_id(self, channelid):
for c in self.channels.values():
if c.id == channelid:
return c
return None
async def channel_message(self, channelid, message):
""" Handle message send by the client """
channel = self.find_channel_by_id(channelid)
user = self.get_create_user(self.nick)
if not (channel and user):
return
self.client.send('PRIVMSG',
target=channel.name,
message=message)
await self.enqueue(Event.CHANNEL_MESSAGE,
message=ChannelMessage(
message=message,
target=channel,
user=user
))
async def channel_join(self, name, key=None):
""" handle client JOIN command """
self.client.send('JOIN', channel=name, key=key)
async def channel_part(self, channelid, key=None):
""" handle clent PART command """
channel = self.find_channel_by_id(channelid)
if channel:
self.client.send('PART', channel=channel.name)
Backend.register(IRCBackend)
|
|
import os
import sys
import gzip
import fbt_format_pb2
import cairo
import math
from optparse import OptionParser
def parse_args():
parser = OptionParser()
parser.add_option("-i", "--input-file", dest="input_file", default="trace.fbt", help="Input FBT file.")
parser.add_option("-o", "--output-file", dest="output_file", default="trace.pdf",
help="Output file.")
parser.add_option("-g", "--gl-times", action="store_true", dest="use_gl_times", help="Use GL events instead of CPU events")
parser.add_option("-f", "--in-point", dest="in_point", default=0, help="In-point into frames.")
parser.add_option("-n", "--frames-count", dest="number_of_frames", default=0, help="How many frames to visualize.")
parser.add_option("-t", "--title", dest="title", help="Overrides the title of the diagram.")
parser.add_option("-w", "--width", dest="width", help="Forces width to normalize to.")
return parser.parse_args()
def open_session(filepath):
# Open the input file
if not os.path.exists(filepath):
raise RuntimeError("Input file " + filepath + " does not exist.")
trace_session = fbt_format_pb2.TraceSession()
input_file = gzip.GzipFile(opts.input_file, 'rb')
trace_session.ParseFromString(input_file.read())
input_file.close()
return trace_session
def inch_to_mm(inch):
return inch*25.4
def mm_to_inch(mm):
return mm * (1/25.4)
def ns_to_ms(ns):
return ns * 1.0e-6
def ns_to_mus(ns):
return ns * 1.0e-3
def color_8bit_normalize(r, g, b):
return r/255.0, g/255.0, b/255.0
def draw_rounded_rect(ctx, x,y,w,h, r):
if h < r * 2:
r = h/2
if w < r * 2:
r = w/2
ctx.move_to(x, y+r)
ctx.arc(x+r,y+r, r, math.pi, math.pi * 1.5)
ctx.line_to(x+w-r, y)
ctx.arc(x+w-r,y+r, r, math.pi * -0.5, math.pi * 0.0)
ctx.line_to(x+w, y+h-r)
ctx.arc(x+w-r, y+h-r, r, math.pi * 0.0, math.pi * 0.5)
ctx.line_to(x+r, y+h)
ctx.arc(x+r, y+h-r, r, math.pi * 0.5, math.pi * 1.0)
#ctx.line_to(x, y+r)
ctx.close_path()
ctx.fill()
def select_font_for_stage_medians(ctx):
ctx.select_font_face ("Helvetica",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(14)
def select_font_for_axis_labels(ctx):
ctx.select_font_face ("Helvetica",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(18)
def select_font_for_title(ctx):
ctx.select_font_face ("Helvetica",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(24)
def select_font_for_hud(ctx):
ctx.select_font_face ("Helvetica",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(16)
def calculate_max_pipeline_axis_width(ctx, stage_traces):
max_text_width = 0
select_font_for_axis_labels(ctx)
for stage_trace in stage_traces:
_, _, width, height, _, _= ctx.text_extents(stage_trace.name)
if height > POINTS_HEIGHT_PER_STAGE:
print "Warning: Pipeline stage labeling is larger than pipeline height in points."
max_text_width = max(max_text_width, width)
return max_text_width
def draw_text_centered(ctx, text, location_x, location_y):
x_bearing, y_bearing, width, height, x_advance, y_advance = ctx.text_extents(text)
x = location_x-(width/2 + x_bearing)
y = location_y-(height/2 + y_bearing)
ctx.move_to(x, y)
ctx.show_text(text)
def draw_text_right_aligned_vert_centered(ctx, text, location_x, location_y):
x_bearing, y_bearing, width, height, x_advance, y_advance = ctx.text_extents(text)
x = location_x-(width + x_bearing)
y = location_y-(height/2 + y_bearing)
ctx.move_to(x, y)
ctx.show_text(text)
def draw_text_left_aligned_vert_centered(ctx, text, location_x, location_y):
x_bearing, y_bearing, width, height, x_advance, y_advance = ctx.text_extents(text)
x = location_x
y = location_y-(height/2 + y_bearing)
ctx.move_to(x, y)
ctx.show_text(text)
def calculate_pipeline_canvas_extent(stage_traces):
global POINTS_WIDTH_PER_MILLI_SECOND
first_stage_trace = stage_traces[0]
last_stage_trace = stage_traces[-1]
begin_trace = [x for x in first_stage_trace.event_traces if (x.type == BEGIN_EVENT)][0]
if begin_trace is None:
raise RuntimeError("Could not get begin trace.")
begin_time_ns = begin_trace.trace_times_ns[CONFIG_FRAME_IN_POINT]
begin_time_ms = ns_to_ms(begin_time_ns )
end_trace = [x for x in last_stage_trace.event_traces if (x.type == END_EVENT)][0]
if end_trace is None:
raise RuntimeError("Could not match end trace to begin trace.")
end_time_ns = end_trace.trace_times_ns[CONFIG_FRAME_IN_POINT + CONFIG_NUMBER_OF_FRAMES_TO_VISUALIZE-1]
end_time_ms = ns_to_ms(end_time_ns)
time_span_ms = end_time_ms - begin_time_ms
begin_draw_frame_num = CONFIG_FRAME_IN_POINT
while begin_draw_frame_num > 0 and end_trace.trace_times_ns[begin_draw_frame_num] > begin_time_ns:
begin_draw_frame_num -= 1
end_draw_frame_num = CONFIG_FRAME_IN_POINT + CONFIG_NUMBER_OF_FRAMES_TO_VISUALIZE-1
while end_draw_frame_num < len(begin_trace.trace_times_ns) and begin_trace.trace_times_ns[end_draw_frame_num] < end_time_ns:
end_draw_frame_num += 1
if opts.width is not None:
TARGET_WIDTH = int(opts.width)
POINTS_WIDTH_PER_MILLI_SECOND = TARGET_WIDTH / time_span_ms
else:
POINTS_WIDTH_PER_MILLI_SECOND = 40
width_pt = time_span_ms * POINTS_WIDTH_PER_MILLI_SECOND + POINTS_TIME_CANVAS_PADDING_RIGHT
height_pt = len(stage_traces) * POINTS_COMPLETE_HEIGHT_STAGE
return width_pt, height_pt, begin_time_ms, end_time_ms, begin_draw_frame_num, end_draw_frame_num
def calculate_title_bar_height(ctx, session):
select_font_for_title(ctx)
_, _, _, height, _, _= ctx.text_extents(session.name)
height = height + 2*POINTS_VERT_PADDING_TITLE_LABEL
return height
def calculate_time_axis_label_height(ctx):
select_font_for_axis_labels(ctx)
_, _, _, height, _, _= ctx.text_extents(PIPELINE_TIME_AXIS_LABEL)
height = height + 2*POINTS_VERT_PADDING_TIME_AXIS_LABELS
return height
def draw_stage_phases(ctx, stage_traces, pts_origin_x, pts_origin_y):
# fill background
# ctx.rectangle( pts_origin_x, pts_origin_y, POINTS_PIPELINE_CANVAS_EXTENTS_WIDTH, POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT)
# ctx.set_source_rgb(0.9, 0.9, 0.9)
# ctx.fill()
ctx.save()
ctx.rectangle(pts_origin_x, pts_origin_y, POINTS_PIPELINE_CANVAS_EXTENTS_WIDTH, POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT)
ctx.clip()
stage_pos_y = POINTS_COMPLETE_HEIGHT_STAGE * len(stage_traces) - POINTS_COMPLETE_HEIGHT_STAGE
ctx.set_source_rgb(1.0, 0.0, 0.0)
for stage_trace in stage_traces:
begin_trace = [x for x in stage_trace.event_traces if (x.type == BEGIN_EVENT)][0]
if begin_trace is None:
raise RuntimeError("Could not get begin trace.")
end_trace = [x for x in stage_trace.event_traces if (x.type == END_EVENT)][0]
if end_trace is None:
raise RuntimeError("Could not get begin trace.")
for nr in range(BEGIN_DRAW_FRAME_NUM, END_DRAW_FRAME_NUM):
origin_y = stage_pos_y + POINTS_STAGE_VERT_PADDING
alpha = 1.0
if nr in range(CONFIG_FRAME_IN_POINT, CONFIG_FRAME_IN_POINT + CONFIG_NUMBER_OF_FRAMES_TO_VISUALIZE):
color = STAGE_COLORS[nr % len(STAGE_COLORS)]
else:
color = color_8bit_normalize(220, 220, 220)
alpha = 0.5
rel_begin_time_ms = ns_to_ms(begin_trace.trace_times_ns[nr]) - TIME_ORIGIN_MS
rel_end_time_ms = ns_to_ms(end_trace.trace_times_ns[nr]) - TIME_ORIGIN_MS
# Hacky padding for minimum time
MINIMUM_WIDTH = 2
phase_pt_width = (rel_end_time_ms - rel_begin_time_ms) * POINTS_WIDTH_PER_MILLI_SECOND
if phase_pt_width < MINIMUM_WIDTH:
# How much is missing?
padding_time = (MINIMUM_WIDTH - phase_pt_width)/POINTS_WIDTH_PER_MILLI_SECOND
rel_begin_time_ms -= padding_time/2
rel_end_time_ms += padding_time/2
ctx.set_source_rgba(color[0], color[1], color[2], alpha)
# TODO: make nice functions here
draw_rounded_rect( ctx,
pts_origin_x + rel_begin_time_ms * POINTS_WIDTH_PER_MILLI_SECOND,
pts_origin_y + origin_y,
(rel_end_time_ms - rel_begin_time_ms) * POINTS_WIDTH_PER_MILLI_SECOND,
POINTS_HEIGHT_PER_STAGE,
3)
ctx.fill()
stage_pos_y -= POINTS_COMPLETE_HEIGHT_STAGE
ctx.restore()
def draw_axes(ctx):
ARROW_LENGTH = 8
ctx.set_source_rgb(0.1, 0.1, 0.1)
ctx.set_line_width(2.0)
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
ctx.move_to(POINTS_STAGE_DIAGRAM_ORIGIN_X, POINTS_STAGE_DIAGRAM_ORIGIN_Y + POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT)
ctx.rel_line_to(POINTS_PIPELINE_CANVAS_EXTENTS_WIDTH, 0)
ctx.rel_line_to(-ARROW_LENGTH, ARROW_LENGTH)
ctx.move_to(POINTS_STAGE_DIAGRAM_ORIGIN_X + POINTS_PIPELINE_CANVAS_EXTENTS_WIDTH, POINTS_STAGE_DIAGRAM_ORIGIN_Y + POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT)
ctx.rel_line_to(-ARROW_LENGTH, -ARROW_LENGTH)
ctx.stroke()
ctx.move_to(POINTS_STAGE_DIAGRAM_ORIGIN_X, POINTS_STAGE_DIAGRAM_ORIGIN_Y + POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT)
ctx.rel_line_to(0, -POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT)
# ctx.rel_line_to(-ARROW_LENGTH, ARROW_LENGTH)
# ctx.move_to(POINTS_STAGE_DIAGRAM_ORIGIN_X, POINTS_STAGE_DIAGRAM_ORIGIN_Y)
# ctx.rel_line_to(ARROW_LENGTH, ARROW_LENGTH)
ctx.stroke()
def draw_labels(ctx, stage_traces, origin_x, origin_y):
stage_pos_y = POINTS_COMPLETE_HEIGHT_STAGE * len(stage_traces) - POINTS_COMPLETE_HEIGHT_STAGE
for stage_trace in stage_traces:
statistic = [x for x in stage_trace.delta_statistics if (x.begin_event == BEGIN_EVENT and x.end_event == END_EVENT)][0]
text_coord_x = origin_x + POINTS_PIPELINE_LABEL_COLUMN_WIDTH - POINTS_HORIZ_PADDING_PIPELINE_AXIS_LABELS
text_coord_y = origin_y + stage_pos_y + POINTS_COMPLETE_HEIGHT_STAGE/2
ctx.set_source_rgb(0.6, 0.6, 0.6)
select_font_for_stage_medians(ctx)
draw_text_right_aligned_vert_centered(ctx, (("%.0f") % ns_to_mus(statistic.median_ns)) + u" \u03BC" + "s", text_coord_x+3, text_coord_y)
select_font_for_axis_labels(ctx)
ctx.set_source_rgb(0.2, 0.2, 0.2)
draw_text_right_aligned_vert_centered(ctx,
stage_trace.name,
text_coord_x - 50,
origin_y + stage_pos_y + POINTS_COMPLETE_HEIGHT_STAGE/2)
stage_pos_y -= POINTS_COMPLETE_HEIGHT_STAGE
draw_text_centered(ctx, PIPELINE_TIME_AXIS_LABEL, origin_x + POINTS_PIPELINE_LABEL_COLUMN_WIDTH + POINTS_PIPELINE_CANVAS_EXTENTS_WIDTH/2, origin_y + POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT + POINTS_TIME_AXIS_LABEL_HEIGHT/2)
def draw_grid(ctx, origin_x, origin_y, start_time_ms, end_time_ms):
time_ms = start_time_ms
ctx.set_source_rgb(0.85,0.85,0.85)
ctx.set_line_width(1.0)
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
ctx.set_line_join(cairo.LINE_JOIN_MITER)
while (time_ms < end_time_ms):
ctx.move_to(origin_x + (time_ms - TIME_ORIGIN_MS) * POINTS_WIDTH_PER_MILLI_SECOND, origin_y)
ctx.rel_line_to(0, POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT)
ctx.stroke()
time_ms += 1.0
def draw_title(ctx, session):
select_font_for_title(ctx)
text = ""
if opts.title is not None:
text = opts.title
else:
ctx.set_font_size(12)
text = session.name
draw_text_centered(ctx, text, POINTS_STAGE_DIAGRAM_ORIGIN_X + POINTS_PIPELINE_CANVAS_EXTENTS_WIDTH / 2, POINTS_VERT_PADDING_TITLE_LABEL + POINTS_TITLE_BAR_HEIGHT/2)
def draw_statistics_hud(ctx, stats):
select_font_for_hud(ctx)
loc_x = 2
loc_y = 20
# CONTINUE HERE AND CALCULATE AVG. PROCESSING TIME FROM FULL TRACE?
draw_text_left_aligned_vert_centered(ctx, "Avg. frame time: " + ("%.2f" % stat.avg_millisecs_per_frame) + " ms", loc_x, loc_y)
loc_y += 18
draw_text_left_aligned_vert_centered(ctx, "Avg. throughput: " + ("%.2f" % stat.avg_throughput_mb_per_sec) + " MB/s", loc_x, loc_y)
loc_y += 18
draw_text_left_aligned_vert_centered(ctx, "Median latency: " + (("%.2f" % ns_to_ms(stat.med_frame_processing_time_per_frame_ns))) + " ms", loc_x, loc_y)
loc_y += 18
draw_text_left_aligned_vert_centered(ctx, "Renderer: " + session.opengl_info.renderer, loc_x, loc_y)
# PIPELINE_TIME_AXIS_LABEL += ", + ", median of ms/frame = "+ ("%.2f" % ns_to_ms(stat.med_frame_processing_time_per_frame_ns))
####
## MAIN script execution
####
# Here are all the constants for drawing we can tweak
DPI = 72
POINTS_WIDTH_PER_MILLI_SECOND = 25
POINTS_HEIGHT_PER_STAGE = 28
POINTS_STAGE_VERT_PADDING = 4
POINTS_COMPLETE_HEIGHT_STAGE = POINTS_HEIGHT_PER_STAGE + 2*POINTS_STAGE_VERT_PADDING
POINTS_HORIZ_PADDING_PIPELINE_AXIS_LABELS = 10
POINTS_VERT_PADDING_TIME_AXIS_LABELS = 10
POINTS_VERT_PADDING_TITLE_LABEL = 10
POINTS_HORIZ_PADDING_COMPLETE_DIAGRAM = 4
POINTS_PIPELINE_LABEL_COLUMN_WIDTH = 204
POINTS_TIME_AXIS_LABEL_HEIGHT = 50
POINTS_TITLE_BAR_HEIGHT = 100
POINTS_TIME_CANVAS_PADDING_RIGHT = 10
(opts, args) = parse_args()
# Here are some constants which should come from user options
CONFIG_FRAME_IN_POINT = int(opts.in_point)
CONFIG_NUMBER_OF_FRAMES_TO_VISUALIZE = int(opts.number_of_frames)
SKIP_STAGES = ["FrameInput", "FrameOutput"]
show_gl_timer_query_traces = bool(opts.use_gl_times)
timer_name = ""
if show_gl_timer_query_traces:
BEGIN_EVENT = 7
END_EVENT = 8
timer_name = "GPU time"
else:
BEGIN_EVENT = 4
END_EVENT = 5
timer_name = "CPU time"
output_dir = os.path.dirname(opts.output_file)
if output_dir != "" and not os.path.exists(output_dir):
os.makedirs(output_dir)
session = open_session(opts.input_file)
if not session.HasField("session_statistic"):
raise RuntimeError("Expecting a session statistic to be present.")
stat = session.session_statistic
# AUX constants
PIPELINE_TIME_AXIS_LABEL = "Slice #" + `CONFIG_FRAME_IN_POINT` + " - #" + `(CONFIG_FRAME_IN_POINT + CONFIG_NUMBER_OF_FRAMES_TO_VISUALIZE)` + " / " + `int(stat.number_of_frames_processed)` + " frames (" + timer_name + " in milliseconds)"
stage_traces = []
for stage in session.stage_traces:
if stage.name in SKIP_STAGES:
continue;
begin_trace = [x for x in stage.event_traces if (x.type == BEGIN_EVENT)]
if begin_trace is None or (len(begin_trace) == 0):
continue
stage_traces.append(stage)
if len(stage_traces) == 0:
print ("Stage trace is empty, canceling drawing")
exit(0)
POINTS_PIPELINE_CANVAS_EXTENTS_WIDTH, POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT, BEGIN_TIME_MS, END_TIME_MS, BEGIN_DRAW_FRAME_NUM, END_DRAW_FRAME_NUM = calculate_pipeline_canvas_extent(stage_traces)
#SHIFT the Time origin a bit
TIME_ORIGIN_MS = BEGIN_TIME_MS - 0.1
POINTS_COMPLETE_WIDTH = POINTS_PIPELINE_CANVAS_EXTENTS_WIDTH + POINTS_PIPELINE_LABEL_COLUMN_WIDTH + POINTS_HORIZ_PADDING_COMPLETE_DIAGRAM*2
POINTS_COMPLETE_HEIGHT = POINTS_PIPELINE_CANVAS_EXTENTS_HEIGHT + POINTS_TIME_AXIS_LABEL_HEIGHT + POINTS_TITLE_BAR_HEIGHT
print "Writing to '" + opts.output_file + "'"
surface = cairo.PDFSurface(opts.output_file, POINTS_COMPLETE_WIDTH, POINTS_COMPLETE_HEIGHT)
# surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(POINTS_COMPLETE_WIDTH)*4, int(POINTS_COMPLETE_HEIGHT)*4)
ctx = cairo.Context(surface)
# ctx.scale(4, 4)
font_options = ctx.get_font_options()
font_options.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
ctx.set_font_options(font_options)
POINTS_STAGE_DIAGRAM_ORIGIN_X, POINTS_STAGE_DIAGRAM_ORIGIN_Y = POINTS_PIPELINE_LABEL_COLUMN_WIDTH + POINTS_HORIZ_PADDING_COMPLETE_DIAGRAM, POINTS_TITLE_BAR_HEIGHT
draw_grid(ctx, POINTS_STAGE_DIAGRAM_ORIGIN_X, POINTS_STAGE_DIAGRAM_ORIGIN_Y, TIME_ORIGIN_MS, TIME_ORIGIN_MS + (END_TIME_MS - BEGIN_TIME_MS))
STAGE_COLORS = []
STAGE_COLORS.append(color_8bit_normalize(15, 128, 140))
STAGE_COLORS.append(color_8bit_normalize(108, 140, 38))
STAGE_COLORS.append(color_8bit_normalize(242, 167, 27))
STAGE_COLORS.append(color_8bit_normalize(242, 106, 27))
STAGE_COLORS.append(color_8bit_normalize(217, 24, 24))
draw_stage_phases(ctx, stage_traces, POINTS_STAGE_DIAGRAM_ORIGIN_X, POINTS_STAGE_DIAGRAM_ORIGIN_Y)
draw_axes(ctx)
draw_labels(ctx, stage_traces, POINTS_HORIZ_PADDING_COMPLETE_DIAGRAM, POINTS_TITLE_BAR_HEIGHT)
draw_title(ctx, session)
draw_statistics_hud(ctx, stat)
# surface.write_to_png(file(opts.output_file, 'wb'))
surface.finish()
|
|
"""Migrating IPython < 4.0 to Jupyter
This *copies* configuration and resources to their new locations in Jupyter
Migrations:
- .ipython/
- nbextensions -> JUPYTER_DATA_DIR/nbextensions
- kernels -> JUPYTER_DATA_DIR/kernels
- .ipython/profile_default/
- static/custom -> .jupyter/custom
- nbconfig -> .jupyter/nbconfig
- security/
- notebook_secret, notebook_cookie_secret, nbsignatures.db -> JUPYTER_DATA_DIR
- ipython_{notebook,nbconvert,qtconsole}_config.py -> .jupyter/jupyter_{name}_config.py
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import re
import shutil
from datetime import datetime
from traitlets.config import PyFileConfigLoader, JSONFileConfigLoader
from traitlets.log import get_logger
from ipython_genutils.path import ensure_dir_exists
try:
from IPython.paths import get_ipython_dir
except ImportError:
# IPython < 4
try:
from IPython.utils.path import get_ipython_dir
except ImportError:
def get_ipython_dir():
return os.environ.get('IPYTHONDIR', os.path.expanduser('~/.ipython'))
from .paths import jupyter_config_dir, jupyter_data_dir
from .application import JupyterApp
pjoin = os.path.join
migrations = {
pjoin('{ipython_dir}', 'nbextensions'): pjoin('{jupyter_data}', 'nbextensions'),
pjoin('{ipython_dir}', 'kernels'): pjoin('{jupyter_data}', 'kernels'),
pjoin('{profile}', 'nbconfig'): pjoin('{jupyter_config}', 'nbconfig'),
}
custom_src_t = pjoin('{profile}', 'static', 'custom')
custom_dst_t = pjoin('{jupyter_config}', 'custom')
for security_file in ('notebook_secret', 'notebook_cookie_secret', 'nbsignatures.db'):
src = pjoin('{profile}', 'security', security_file)
dst = pjoin('{jupyter_data}', security_file)
migrations[src] = dst
config_migrations = ['notebook', 'nbconvert', 'qtconsole']
regex = re.compile
config_substitutions = {
regex(r'\bIPythonQtConsoleApp\b'): 'JupyterQtConsoleApp',
regex(r'\bIPythonWidget\b'): 'JupyterWidget',
regex(r'\bRichIPythonWidget\b'): 'RichJupyterWidget',
regex(r'\bIPython\.html\b'): 'notebook',
regex(r'\bIPython\.nbconvert\b'): 'nbconvert',
}
def migrate_dir(src, dst):
"""Migrate a directory from src to dst"""
log = get_logger()
if not os.listdir(src):
log.debug("No files in %s" % src)
return False
if os.path.exists(dst):
if os.listdir(dst):
# already exists, non-empty
log.debug("%s already exists" % dst)
return False
else:
os.rmdir(dst)
log.info("Copying %s -> %s" % (src, dst))
ensure_dir_exists(os.path.dirname(dst))
shutil.copytree(src, dst, symlinks=True)
return True
def migrate_file(src, dst, substitutions=None):
"""Migrate a single file from src to dst
substitutions is an optional dict of {regex: replacement} for performing replacements on the file.
"""
log = get_logger()
if os.path.exists(dst):
# already exists
log.debug("%s already exists" % dst)
return False
log.info("Copying %s -> %s" % (src, dst))
ensure_dir_exists(os.path.dirname(dst))
shutil.copy(src, dst)
if substitutions:
with open(dst) as f:
text = f.read()
for pat, replacement in substitutions.items():
text = pat.sub(replacement, text)
with open(dst, 'w') as f:
f.write(text)
return True
def migrate_one(src, dst):
"""Migrate one item
dispatches to migrate_dir/_file
"""
log = get_logger()
if os.path.isfile(src):
return migrate_file(src, dst)
elif os.path.isdir(src):
return migrate_dir(src, dst)
else:
log.debug("Nothing to migrate for %s" % src)
return False
def migrate_static_custom(src, dst):
"""Migrate non-empty custom.js,css from src to dst
src, dst are 'custom' directories containing custom.{js,css}
"""
log = get_logger()
migrated = False
custom_js = pjoin(src, 'custom.js')
custom_css = pjoin(src, 'custom.css')
# check if custom_js is empty:
custom_js_empty = True
if os.path.isfile(custom_js):
with open(custom_js) as f:
js = f.read().strip()
for line in js.splitlines():
if not (
line.isspace()
or line.strip().startswith(('/*', '*', '//'))
):
custom_js_empty = False
break
# check if custom_css is empty:
custom_css_empty = True
if os.path.isfile(custom_css):
with open(custom_css) as f:
css = f.read().strip()
custom_css_empty = css.startswith('/*') and css.endswith('*/')
if custom_js_empty:
log.debug("Ignoring empty %s" % custom_js)
if custom_css_empty:
log.debug("Ignoring empty %s" % custom_css)
if custom_js_empty and custom_css_empty:
# nothing to migrate
return False
ensure_dir_exists(dst)
if not custom_js_empty or not custom_css_empty:
ensure_dir_exists(dst)
if not custom_js_empty:
if migrate_file(custom_js, pjoin(dst, 'custom.js')):
migrated = True
if not custom_css_empty:
if migrate_file(custom_css, pjoin(dst, 'custom.css')):
migrated = True
return migrated
def migrate_config(name, env):
"""Migrate a config file
Includes substitutions for updated configurable names.
"""
log = get_logger()
src_base = pjoin('{profile}', 'ipython_{name}_config').format(name=name, **env)
dst_base = pjoin('{jupyter_config}', 'jupyter_{name}_config').format(name=name, **env)
loaders = {
'.py': PyFileConfigLoader,
'.json': JSONFileConfigLoader,
}
migrated = []
for ext in ('.py', '.json'):
src = src_base + ext
dst = dst_base + ext
if os.path.exists(src):
cfg = loaders[ext](src).load_config()
if cfg:
if migrate_file(src, dst, substitutions=config_substitutions):
migrated.append(src)
else:
# don't migrate empty config files
log.debug("Not migrating empty config file: %s" % src)
return migrated
def migrate():
"""Migrate IPython configuration to Jupyter"""
env = {
'jupyter_data': jupyter_data_dir(),
'jupyter_config': jupyter_config_dir(),
'ipython_dir': get_ipython_dir(),
'profile': os.path.join(get_ipython_dir(), 'profile_default'),
}
migrated = False
for src_t, dst_t in migrations.items():
src = src_t.format(**env)
dst = dst_t.format(**env)
if os.path.exists(src):
if migrate_one(src, dst):
migrated = True
for name in config_migrations:
if migrate_config(name, env):
migrated = True
custom_src = custom_src_t.format(**env)
custom_dst = custom_dst_t.format(**env)
if os.path.exists(custom_src):
if migrate_static_custom(custom_src, custom_dst):
migrated = True
# write a marker to avoid re-running migration checks
ensure_dir_exists(env['jupyter_config'])
with open(os.path.join(env['jupyter_config'], 'migrated'), 'w') as f:
f.write(datetime.utcnow().isoformat())
return migrated
class JupyterMigrate(JupyterApp):
name = 'jupyter-migrate'
description = """
Migrate configuration and data from .ipython prior to 4.0 to Jupyter locations.
This migrates:
- config files in the default profile
- kernels in ~/.ipython/kernels
- notebook javascript extensions in ~/.ipython/extensions
- custom.js/css to .jupyter/custom
to their new Jupyter locations.
All files are copied, not moved.
If the destinations already exist, nothing will be done.
"""
def start(self):
if not migrate():
self.log.info("Found nothing to migrate.")
main = JupyterMigrate.launch_instance
if __name__ == '__main__':
main()
|
|
# coding: utf-8
"""
UsersApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class UsersApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_users(self, document, **kwargs):
"""
Create some admin-users
Create one or more admin-users.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_users(document, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Users document: Create a document by sending the paths to be added in the request body. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:return: Users
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document', 'select', 'populate', 'sort']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_users" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document' is set
if ('document' not in params) or (params['document'] is None):
raise ValueError("Missing the required parameter `document` when calling `create_users`")
resource_path = '/admin-users'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'document' in params:
body_params = params['document']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Users',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_users_by_id(self, id, **kwargs):
"""
Delete a _users by its unique ID
Deletes an existing _users by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_users_by_id(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The identifier of the resource. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:return: Users
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'select', 'populate']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_users_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_users_by_id`")
resource_path = '/admin-users/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Users',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_users_by_query(self, **kwargs):
"""
Delete some admin-users by query
Delete all admin-users matching the specified query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_users_by_query(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:param int skip: How many documents to skip. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#skip)
:param int limit: The maximum number of documents to send. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#limit)
:param str conditions: Set the conditions used to find or remove the document(s). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#conditions)
:param str distinct: Set to a path name to retrieve an array of distinct values. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#distinct)
:param str hint: Add an index hint to the query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#hint)
:param str comment: Add a comment to a query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#comment)
:return: list[Users]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['select', 'populate', 'sort', 'skip', 'limit', 'conditions', 'distinct', 'hint', 'comment']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_users_by_query" % key
)
params[key] = val
del params['kwargs']
resource_path = '/admin-users'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
if 'skip' in params:
query_params['skip'] = params['skip']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'conditions' in params:
query_params['conditions'] = params['conditions']
if 'distinct' in params:
query_params['distinct'] = params['distinct']
if 'hint' in params:
query_params['hint'] = params['hint']
if 'comment' in params:
query_params['comment'] = params['comment']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Users]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_users_by_id(self, id, **kwargs):
"""
Get a _users by its unique ID
Retrieve a _users by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_users_by_id(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The identifier of the resource. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:return: Users
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'select', 'populate']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_users_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_users_by_id`")
resource_path = '/admin-users/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Users',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def query_users(self, **kwargs):
"""
Query some admin-users
Query over admin-users.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.query_users(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:param bool count: Set to true to return count instead of documents. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#count)
:param int skip: How many documents to skip. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#skip)
:param int limit: The maximum number of documents to send. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#limit)
:param str conditions: Set the conditions used to find or remove the document(s). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#conditions)
:param str distinct: Set to a path name to retrieve an array of distinct values. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#distinct)
:param str hint: Add an index hint to the query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#hint)
:param str comment: Add a comment to a query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#comment)
:return: list[Users]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['select', 'populate', 'sort', 'count', 'skip', 'limit', 'conditions', 'distinct', 'hint', 'comment']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method query_users" % key
)
params[key] = val
del params['kwargs']
resource_path = '/admin-users'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
if 'count' in params:
query_params['count'] = params['count']
if 'skip' in params:
query_params['skip'] = params['skip']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'conditions' in params:
query_params['conditions'] = params['conditions']
if 'distinct' in params:
query_params['distinct'] = params['distinct']
if 'hint' in params:
query_params['hint'] = params['hint']
if 'comment' in params:
query_params['comment'] = params['comment']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Users]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_users(self, id, document, **kwargs):
"""
Modify a _users by its unique ID
Update an existing _users by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_users(id, document, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The identifier of the resource. (required)
:param Users document: Update a document by sending the paths to be updated in the request body. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str x_baucis_update_operator: **BYPASSES VALIDATION** May be used with PUT to update the document using $push, $pull, or $set. [doc](https://github.com/wprl/baucis/wiki/HTTP-Headers)
:return: Users
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'document', 'select', 'populate', 'x_baucis_update_operator']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_users" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_users`")
# verify the required parameter 'document' is set
if ('document' not in params) or (params['document'] is None):
raise ValueError("Missing the required parameter `document` when calling `update_users`")
resource_path = '/admin-users/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
header_params = {}
if 'x_baucis_update_operator' in params:
header_params['X-Baucis-Update-Operator'] = params['x_baucis_update_operator']
form_params = []
local_var_files = {}
body_params = None
if 'document' in params:
body_params = params['document']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Users',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
|
from pandac.PandaModules import *
from otp.margins.WhisperPopup import WhisperPopup
from otp.nametag.NametagConstants import CFQuicktalker, CFPageButton, CFQuitButton, CFSpeech, CFThought, CFTimeout
from otp.chat import ChatGarbler
import string
from direct.task import Task
from otp.otpbase import OTPLocalizer
from otp.speedchat import SCDecoders
from direct.showbase import PythonUtil
from otp.avatar import DistributedAvatar
import time
from otp.avatar import Avatar, PlayerBase
from otp.chat import TalkAssistant
from otp.otpbase import OTPGlobals
from otp.avatar.Avatar import teleportNotify
from otp.distributed.TelemetryLimited import TelemetryLimited
from otp.ai.MagicWordGlobal import *
if base.config.GetBool('want-chatfilter-hacks', 0):
from otp.switchboard import badwordpy
import os
badwordpy.init(os.environ.get('OTP') + '\\src\\switchboard\\', '')
class DistributedPlayer(DistributedAvatar.DistributedAvatar, PlayerBase.PlayerBase, TelemetryLimited):
TeleportFailureTimeout = 60.0
chatGarbler = ChatGarbler.ChatGarbler()
def __init__(self, cr):
try:
self.DistributedPlayer_initialized
except:
self.DistributedPlayer_initialized = 1
DistributedAvatar.DistributedAvatar.__init__(self, cr)
PlayerBase.PlayerBase.__init__(self)
TelemetryLimited.__init__(self)
self.__teleportAvailable = 0
self.inventory = None
self.experience = None
self.friendsList = []
self.oldFriendsList = None
self.timeFriendsListChanged = None
self.ignoreList = []
self.lastFailedTeleportMessage = {}
self._districtWeAreGeneratedOn = None
self.DISLname = ''
self.DISLid = 0
self.adminAccess = 0
self.autoRun = 0
self.whiteListEnabled = base.config.GetBool('whitelist-chat-enabled', 1)
return
@staticmethod
def GetPlayerGenerateEvent():
return 'DistributedPlayerGenerateEvent'
@staticmethod
def GetPlayerNetworkDeleteEvent():
return 'DistributedPlayerNetworkDeleteEvent'
@staticmethod
def GetPlayerDeleteEvent():
return 'DistributedPlayerDeleteEvent'
def networkDelete(self):
DistributedAvatar.DistributedAvatar.networkDelete(self)
messenger.send(self.GetPlayerNetworkDeleteEvent(), [self])
def disable(self):
DistributedAvatar.DistributedAvatar.disable(self)
messenger.send(self.GetPlayerDeleteEvent(), [self])
def delete(self):
try:
self.DistributedPlayer_deleted
except:
self.DistributedPlayer_deleted = 1
del self.experience
if self.inventory:
self.inventory.unload()
del self.inventory
DistributedAvatar.DistributedAvatar.delete(self)
def generate(self):
DistributedAvatar.DistributedAvatar.generate(self)
def announceGenerate(self):
DistributedAvatar.DistributedAvatar.announceGenerate(self)
messenger.send(self.GetPlayerGenerateEvent(), [self])
def setLocation(self, parentId, zoneId):
DistributedAvatar.DistributedAvatar.setLocation(self, parentId, zoneId)
if not (parentId in (0, None) and zoneId in (0, None)):
if not self.cr._isValidPlayerLocation(parentId, zoneId):
self.cr.disableDoId(self.doId)
self.cr.deleteObject(self.doId)
return None
def isGeneratedOnDistrict(self, districtId = None):
if districtId is None:
return self._districtWeAreGeneratedOn is not None
else:
return self._districtWeAreGeneratedOn == districtId
return
def getArrivedOnDistrictEvent(self, districtId = None):
if districtId is None:
return 'arrivedOnDistrict'
else:
return 'arrivedOnDistrict-%s' % districtId
return
def arrivedOnDistrict(self, districtId):
curFrameTime = globalClock.getFrameTime()
if hasattr(self, 'frameTimeWeArrivedOnDistrict') and curFrameTime == self.frameTimeWeArrivedOnDistrict:
if districtId == 0 and self._districtWeAreGeneratedOn:
self.notify.warning('ignoring arrivedOnDistrict 0, since arrivedOnDistrict %d occured on the same frame' % self._districtWeAreGeneratedOn)
return
self._districtWeAreGeneratedOn = districtId
self.frameTimeWeArrivedOnDistrict = globalClock.getFrameTime()
messenger.send(self.getArrivedOnDistrictEvent(districtId))
messenger.send(self.getArrivedOnDistrictEvent())
def setLeftDistrict(self):
self._districtWeAreGeneratedOn = None
return
def hasParentingRules(self):
if self is localAvatar:
return True
def setAccountName(self, accountName):
self.accountName = accountName
def setSystemMessage(self, aboutId, chatString, whisperType = WhisperPopup.WTSystem):
self.displayWhisper(aboutId, chatString, whisperType)
def displayWhisper(self, fromId, chatString, whisperType):
print 'Whisper type %s from %s: %s' % (whisperType, fromId, chatString)
def displayWhisperPlayer(self, playerId, chatString, whisperType):
print 'WhisperPlayer type %s from %s: %s' % (whisperType, playerId, chatString)
def whisperSCTo(self, msgIndex, sendToId, toPlayer):
if toPlayer:
base.cr.playerFriendsManager.sendSCWhisper(sendToId, msgIndex)
elif sendToId not in base.cr.doId2do:
messenger.send('wakeup')
base.cr.ttrFriendsManager.d_whisperSCTo(sendToId, msgIndex)
else:
messenger.send('wakeup')
self.sendUpdate('setWhisperSCFrom', [self.doId, msgIndex], sendToId)
def setWhisperSCFrom(self, fromId, msgIndex):
handle = base.cr.identifyAvatar(fromId)
if handle == None:
return
if base.cr.avatarFriendsManager.checkIgnored(fromId):
self.d_setWhisperIgnored(fromId)
return
if fromId in self.ignoreList:
self.d_setWhisperIgnored(fromId)
return
chatString = SCDecoders.decodeSCStaticTextMsg(msgIndex)
if chatString:
self.displayWhisper(fromId, chatString, WhisperPopup.WTQuickTalker)
base.talkAssistant.receiveAvatarWhisperSpeedChat(TalkAssistant.SPEEDCHAT_NORMAL, msgIndex, fromId)
return
def whisperSCCustomTo(self, msgIndex, sendToId, toPlayer):
if toPlayer:
base.cr.playerFriendsManager.sendSCCustomWhisper(sendToId, msgIndex)
return
if sendToId not in base.cr.doId2do:
messenger.send('wakeup')
base.cr.ttrFriendsManager.d_whisperSCCustomTo(sendToId, msgIndex)
return
messenger.send('wakeup')
self.sendUpdate('setWhisperSCCustomFrom', [self.doId, msgIndex], sendToId)
def _isValidWhisperSource(self, source):
return True
def setWhisperSCCustomFrom(self, fromId, msgIndex):
handle = base.cr.identifyAvatar(fromId)
if handle == None:
return
if not self._isValidWhisperSource(handle):
self.notify.warning('displayWhisper from non-toon %s' % fromId)
return
if base.cr.avatarFriendsManager.checkIgnored(fromId):
self.d_setWhisperIgnored(fromId)
return
if fromId in self.ignoreList:
self.d_setWhisperIgnored(fromId)
return
chatString = SCDecoders.decodeSCCustomMsg(msgIndex)
if chatString:
self.displayWhisper(fromId, chatString, WhisperPopup.WTQuickTalker)
base.talkAssistant.receiveAvatarWhisperSpeedChat(TalkAssistant.SPEEDCHAT_CUSTOM, msgIndex, fromId)
return
def whisperSCEmoteTo(self, emoteId, sendToId, toPlayer):
print 'whisperSCEmoteTo %s %s %s' % (emoteId, sendToId, toPlayer)
if toPlayer:
base.cr.playerFriendsManager.sendSCEmoteWhisper(sendToId, emoteId)
return
if sendToId not in base.cr.doId2do:
messenger.send('wakeup')
base.cr.ttrFriendsManager.d_whisperSCEmoteTo(sendToId, emoteId)
return
messenger.send('wakeup')
self.sendUpdate('setWhisperSCEmoteFrom', [self.doId, emoteId], sendToId)
def setWhisperSCEmoteFrom(self, fromId, emoteId):
handle = base.cr.identifyAvatar(fromId)
if handle == None:
return
if base.cr.avatarFriendsManager.checkIgnored(fromId):
self.d_setWhisperIgnored(fromId)
return
chatString = SCDecoders.decodeSCEmoteWhisperMsg(emoteId, handle.getName())
if chatString:
self.displayWhisper(fromId, chatString, WhisperPopup.WTEmote)
base.talkAssistant.receiveAvatarWhisperSpeedChat(TalkAssistant.SPEEDCHAT_EMOTE, emoteId, fromId)
return
def d_setWhisperIgnored(self, sendToId):
pass
def setChatAbsolute(self, chatString, chatFlags, dialogue = None, interrupt = 1, quiet = 0):
DistributedAvatar.DistributedAvatar.setChatAbsolute(self, chatString, chatFlags, dialogue, interrupt)
if not quiet:
pass
def b_setChat(self, chatString, chatFlags):
if self.cr.wantMagicWords and len(chatString) > 0 and chatString[0] == '~':
messenger.send('magicWord', [chatString])
else:
if base.config.GetBool('want-chatfilter-hacks', 0):
if base.config.GetBool('want-chatfilter-drop-offending', 0):
if badwordpy.test(chatString):
return
else:
chatString = badwordpy.scrub(chatString)
messenger.send('wakeup')
self.setChatAbsolute(chatString, chatFlags)
self.d_setChat(chatString, chatFlags)
def d_setChat(self, chatString, chatFlags):
self.sendUpdate('setChat', [chatString, chatFlags, 0])
def setTalk(self, fromAV, fromAC, avatarName, chat, mods, flags):
newText, scrubbed = self.scrubTalk(chat, mods)
self.displayTalk(newText)
if base.talkAssistant.isThought(newText):
newText = base.talkAssistant.removeThoughtPrefix(newText)
base.talkAssistant.receiveThought(fromAV, avatarName, fromAC, None, newText, scrubbed)
else:
base.talkAssistant.receiveOpenTalk(fromAV, avatarName, fromAC, None, newText, scrubbed)
return
def setTalkWhisper(self, fromAV, fromAC, avatarName, chat, mods, flags):
newText, scrubbed = self.scrubTalk(chat, mods)
self.displayTalkWhisper(fromAV, avatarName, chat, mods)
base.talkAssistant.receiveWhisperTalk(fromAV, avatarName, fromAC, None, self.doId, self.getName(), newText, scrubbed)
return
def displayTalkWhisper(self, fromId, avatarName, chatString, mods):
print 'TalkWhisper from %s: %s' % (fromId, chatString)
def scrubTalk(self, chat, mods):
return chat
def setChat(self, chatString, chatFlags, DISLid):
self.notify.error('Should call setTalk')
chatString = base.talkAssistant.whiteListFilterMessage(chatString)
if base.cr.avatarFriendsManager.checkIgnored(self.doId):
return
if base.localAvatar.garbleChat and not self.isUnderstandable():
chatString = self.chatGarbler.garble(self, chatString)
chatFlags &= ~(CFQuicktalker | CFPageButton | CFQuitButton)
if chatFlags & CFThought:
chatFlags &= ~(CFSpeech | CFTimeout)
else:
chatFlags |= CFSpeech | CFTimeout
self.setChatAbsolute(chatString, chatFlags)
def b_setSC(self, msgIndex):
self.setSC(msgIndex)
self.d_setSC(msgIndex)
def d_setSC(self, msgIndex):
messenger.send('wakeup')
self.sendUpdate('setSC', [msgIndex])
def setSC(self, msgIndex):
if base.cr.avatarFriendsManager.checkIgnored(self.doId):
return
if self.doId in base.localAvatar.ignoreList:
return
chatString = SCDecoders.decodeSCStaticTextMsg(msgIndex)
if chatString:
self.setChatAbsolute(chatString, CFSpeech | CFQuicktalker | CFTimeout, quiet=1)
base.talkAssistant.receiveOpenSpeedChat(TalkAssistant.SPEEDCHAT_NORMAL, msgIndex, self.doId)
def b_setSCCustom(self, msgIndex):
self.setSCCustom(msgIndex)
self.d_setSCCustom(msgIndex)
def d_setSCCustom(self, msgIndex):
messenger.send('wakeup')
self.sendUpdate('setSCCustom', [msgIndex])
def setSCCustom(self, msgIndex):
if base.cr.avatarFriendsManager.checkIgnored(self.doId):
return
if self.doId in base.localAvatar.ignoreList:
return
chatString = SCDecoders.decodeSCCustomMsg(msgIndex)
if chatString:
self.setChatAbsolute(chatString, CFSpeech | CFQuicktalker | CFTimeout)
base.talkAssistant.receiveOpenSpeedChat(TalkAssistant.SPEEDCHAT_CUSTOM, msgIndex, self.doId)
def b_setSCEmote(self, emoteId):
self.b_setEmoteState(emoteId, animMultiplier=self.animMultiplier)
def d_friendsNotify(self, avId, status):
self.sendUpdate('friendsNotify', [avId, status])
def friendsNotify(self, avId, status):
avatar = base.cr.identifyFriend(avId)
if avatar != None:
if status == 1:
self.setSystemMessage(avId, OTPLocalizer.WhisperNoLongerFriend % avatar.getName())
elif status == 2:
self.setSystemMessage(avId, OTPLocalizer.WhisperNowSpecialFriend % avatar.getName())
return
def d_teleportQuery(self, requesterId, sendToId = None):
if sendToId in base.cr.doId2do:
teleportNotify.debug('sending teleportQuery%s' % ((requesterId, sendToId),))
self.sendUpdate('teleportQuery', [requesterId], sendToId)
else:
teleportNotify.debug('sending TTRFM teleportQuery%s' % ((requesterId, sendToId),))
base.cr.ttrFriendsManager.d_teleportQuery(sendToId)
def teleportQuery(self, requesterId):
teleportNotify.debug('receieved teleportQuery(%s)' % requesterId)
avatar = base.cr.playerFriendsManager.identifyFriend(requesterId)
if avatar != None:
teleportNotify.debug('avatar is not None')
if base.cr.avatarFriendsManager.checkIgnored(requesterId):
teleportNotify.debug('avatar ignored via avatarFriendsManager')
self.d_teleportResponse(self.doId, 2, 0, 0, 0, sendToId=requesterId)
return
if requesterId in self.ignoreList:
teleportNotify.debug('avatar ignored via ignoreList')
self.d_teleportResponse(self.doId, 2, 0, 0, 0, sendToId=requesterId)
return
if hasattr(base, 'distributedParty'):
if base.distributedParty.partyInfo.isPrivate:
if requesterId not in base.distributedParty.inviteeIds:
teleportNotify.debug('avatar not in inviteeIds')
self.d_teleportResponse(self.doId, 0, 0, 0, 0, sendToId=requesterId)
return
if base.distributedParty.isPartyEnding:
teleportNotify.debug('party is ending')
self.d_teleportResponse(self.doId, 0, 0, 0, 0, sendToId=requesterId)
return
if self.__teleportAvailable and not self.ghostMode and base.config.GetBool('can-be-teleported-to', 1):
teleportNotify.debug('teleport initiation successful')
self.setSystemMessage(requesterId, OTPLocalizer.WhisperComingToVisit % avatar.getName())
messenger.send('teleportQuery', [avatar, self])
return
teleportNotify.debug('teleport initiation failed')
if self.failedTeleportMessageOk(requesterId):
self.setSystemMessage(requesterId, OTPLocalizer.WhisperFailedVisit % avatar.getName())
teleportNotify.debug('sending try-again-later message')
self.d_teleportResponse(self.doId, 0, 0, 0, 0, sendToId=requesterId)
return
def failedTeleportMessageOk(self, fromId):
now = globalClock.getFrameTime()
lastTime = self.lastFailedTeleportMessage.get(fromId, None)
if lastTime != None:
elapsed = now - lastTime
if elapsed < self.TeleportFailureTimeout:
return 0
self.lastFailedTeleportMessage[fromId] = now
return 1
def d_teleportResponse(self, avId, available, shardId, hoodId, zoneId, sendToId = None):
teleportNotify.debug('sending teleportResponse%s' % ((avId,
available,
shardId,
hoodId,
zoneId,
sendToId),))
self.sendUpdate('teleportResponse', [avId,
available,
shardId,
hoodId,
zoneId], sendToId)
def teleportResponse(self, avId, available, shardId, hoodId, zoneId):
teleportNotify.debug('received teleportResponse%s' % ((avId,
available,
shardId,
hoodId,
zoneId),))
messenger.send('teleportResponse', [avId,
available,
shardId,
hoodId,
zoneId])
def d_teleportGiveup(self, requesterId, sendToId = None):
teleportNotify.debug('sending teleportGiveup(%s) to %s' % (requesterId, sendToId))
self.sendUpdate('teleportGiveup', [requesterId], sendToId)
def teleportGiveup(self, requesterId):
teleportNotify.debug('received teleportGiveup(%s)' % (requesterId,))
avatar = base.cr.identifyAvatar(requesterId)
if not self._isValidWhisperSource(avatar):
self.notify.warning('teleportGiveup from non-toon %s' % requesterId)
return
if avatar != None:
self.setSystemMessage(requesterId, OTPLocalizer.WhisperGiveupVisit % avatar.getName())
return
def b_teleportGreeting(self, avId):
self.d_teleportGreeting(avId)
self.teleportGreeting(avId)
def d_teleportGreeting(self, avId):
self.sendUpdate('teleportGreeting', [avId])
def teleportGreeting(self, avId):
avatar = base.cr.getDo(avId)
if isinstance(avatar, Avatar.Avatar):
self.setChatAbsolute(OTPLocalizer.TeleportGreeting % avatar.getName(), CFSpeech | CFTimeout)
elif avatar is not None:
self.notify.warning('got teleportGreeting from %s referencing non-toon %s' % (self.doId, avId))
return
def setTeleportAvailable(self, available):
self.__teleportAvailable = available
def getTeleportAvailable(self):
return self.__teleportAvailable
def getFriendsList(self):
return self.friendsList
def setFriendsList(self, friendsList):
self.oldFriendsList = self.friendsList
self.friendsList = friendsList
self.timeFriendsListChanged = globalClock.getFrameTime()
messenger.send('friendsListChanged')
Avatar.reconsiderAllUnderstandable()
def setDISLname(self, name):
self.DISLname = name
def setDISLid(self, id):
self.DISLid = id
def setAdminAccess(self, access):
self.adminAccess = access
if self.isLocal():
self.cr.wantMagicWords = self.adminAccess >= MINIMUM_MAGICWORD_ACCESS
def getAdminAccess(self):
return self.adminAccess
def setAutoRun(self, value):
self.autoRun = value
def getAutoRun(self):
return self.autoRun
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing volumes.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.volumes \
.volumes import forms as project_forms
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables as project_tables
from openstack_dashboard.dashboards.project.volumes \
.volumes import tabs as project_tabs
class DetailView(tabs.TabView):
tab_group_class = project_tabs.VolumeDetailTabs
template_name = 'project/volumes/volumes/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["volume"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
for att in volume.attachments:
att['instance'] = api.nova.server_get(self.request,
att['server_id'])
except Exception:
redirect = reverse('horizon:project:volumes:index')
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=redirect)
return volume
def get_tabs(self, request, *args, **kwargs):
volume = self.get_data()
return self.tab_group_class(request, volume=volume, **kwargs)
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateForm
template_name = 'project/volumes/volumes/create.html'
success_url = reverse_lazy('horizon:project:volumes:volumes_tab')
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
try:
context['usages'] = quotas.tenant_limit_usages(self.request)
except Exception:
exceptions.handle(self.request)
return context
class ExtendView(forms.ModalFormView):
form_class = project_forms.ExtendForm
template_name = 'project/volumes/volumes/extend.html'
success_url = reverse_lazy("horizon:project:volumes:index")
def get_object(self):
if not hasattr(self, "_object"):
volume_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return self._object
def get_context_data(self, **kwargs):
context = super(ExtendView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
try:
usages = quotas.tenant_limit_usages(self.request)
usages['gigabytesUsed'] = (usages['gigabytesUsed']
- context['volume'].size)
context['usages'] = usages
except Exception:
exceptions.handle(self.request)
return context
def get_initial(self):
volume = self.get_object()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'orig_size': volume.size}
class CreateSnapshotView(forms.ModalFormView):
form_class = project_forms.CreateSnapshotForm
template_name = 'project/volumes/volumes/create_snapshot.html'
success_url = reverse_lazy('horizon:project:volumes:snapshots_tab')
def get_context_data(self, **kwargs):
context = super(CreateSnapshotView, self).get_context_data(**kwargs)
context['volume_id'] = self.kwargs['volume_id']
try:
volume = cinder.volume_get(self.request, context['volume_id'])
if (volume.status == 'in-use'):
context['attached'] = True
context['form'].set_warning(_("This volume is currently "
"attached to an instance. "
"In some cases, creating a "
"snapshot from an attached "
"volume can result in a "
"corrupted snapshot."))
context['usages'] = quotas.tenant_limit_usages(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return context
def get_initial(self):
return {'volume_id': self.kwargs["volume_id"]}
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateForm
template_name = 'project/volumes/volumes/update.html'
success_url = reverse_lazy("horizon:project:volumes:index")
def get_object(self):
if not hasattr(self, "_object"):
vol_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, vol_id)
except Exception:
msg = _('Unable to retrieve volume.')
url = reverse('horizon:project:volumes:index')
exceptions.handle(self.request, msg, redirect=url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
return context
def get_initial(self):
volume = self.get_object()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'description': volume.description}
class EditAttachmentsView(tables.DataTableView, forms.ModalFormView):
table_class = project_tables.AttachmentsTable
form_class = project_forms.AttachForm
template_name = 'project/volumes/volumes/attach.html'
success_url = reverse_lazy("horizon:project:volumes:index")
@memoized.memoized_method
def get_object(self):
volume_id = self.kwargs['volume_id']
try:
return cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
def get_data(self):
attachments = []
volume = self.get_object()
if volume is not None:
for att in volume.attachments:
att['volume_name'] = getattr(volume, 'name', att['device'])
attachments.append(att)
return attachments
def get_initial(self):
try:
instances, has_more = api.nova.server_list(self.request)
except Exception:
instances = []
exceptions.handle(self.request,
_("Unable to retrieve attachment information."))
return {'volume': self.get_object(),
'instances': instances}
@memoized.memoized_method
def get_form(self):
form_class = self.get_form_class()
return super(EditAttachmentsView, self).get_form(form_class)
def get_context_data(self, **kwargs):
context = super(EditAttachmentsView, self).get_context_data(**kwargs)
context['form'] = self.get_form()
volume = self.get_object()
if volume and volume.status == 'available':
context['show_attach'] = True
else:
context['show_attach'] = False
context['volume'] = volume
if self.request.is_ajax():
context['hide'] = True
return context
def get(self, request, *args, **kwargs):
# Table action handling
handled = self.construct_tables()
if handled:
return handled
return self.render_to_response(self.get_context_data(**kwargs))
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.get(request, *args, **kwargs)
|
|
# -*- coding: utf-8 -*-
"""Mappings between the ordering of PyFR nodes, and those of external formats
"""
import numpy as np
class GmshNodeMaps(object):
"""Mappings between the node ordering of PyFR and that of Gmsh
Node mappings are contained within two dictionaries; one maps from
Gmsh node ordering to PyFR, and the other provides the inverse.
Dictionary items are keyed by a tuple of element type (string) and
number of solution points per element (integer).
Each dictionary value is a list of integers that provide mappings
via their list index. When lists in the "gmsh_to_pyfr" dictionary
are indexed using the Gmsh node number, they return the equivalent
PyFR node number. The reverse is true for the "pyfr_to_gmsh"
dictionary.
:Example: Convert Gmsh node number 4, in a 64 point hexahedra, to
the equivalent node number in PyFR:
>>> from pyfr.readers.nodemaps import GmshNodeMaps
>>> GmshNodeMaps.gmsh_to_pyfr['hex', 64][4]
>>> 48
"""
to_pyfr = {
('tet', 4): np.array([0, 1, 2, 3]),
('tet', 10): np.array([0, 2, 5, 9, 1, 4, 3, 6, 8, 7]),
('tet', 20): np.array([0, 3, 9, 19, 1, 2, 6, 8, 7, 4, 16, 10, 18, 15,
17, 12, 5, 11, 13, 14]),
('tet', 35): np.array([0, 4, 14, 34, 1, 2, 3, 8, 11, 13, 12, 9, 5, 31,
25, 15, 33, 30, 24, 32, 27, 18, 6, 10, 7, 16,
17, 26, 19, 28, 22, 29, 21, 23, 20]),
('tet', 56): np.array([0, 5, 20, 55, 1, 2, 3, 4, 10, 14, 17, 19, 18,
15, 11, 6, 52, 46, 36, 21, 54, 51, 45, 35, 53,
48, 39, 25, 7, 16, 9, 12, 13, 8, 22, 24, 47,
23, 38, 37, 26, 49, 33, 40, 43, 30, 50, 29, 34,
42, 32, 44, 27, 28, 31, 41]),
('tet', 84): np.array([0, 6, 27, 83, 1, 2, 3, 4, 5, 12, 17, 21, 24,
26, 25, 22, 18, 13, 7, 80, 74, 64, 49, 28, 82,
79, 73, 63, 48, 81, 76, 67, 53, 33, 8, 23, 11,
14, 19, 20, 16, 10, 9, 15, 29, 32, 75, 30, 31,
52, 66, 65, 50, 51, 34, 77, 46, 54, 68, 71, 61,
43, 39, 58, 78, 38, 47, 70, 57, 42, 45, 62, 72,
60, 35, 37, 44, 69, 36, 41, 40, 55, 59, 56]),
('pri', 6): np.array([0, 1, 2, 3, 4, 5]),
('pri', 18): np.array([0, 2, 5, 12, 14, 17, 1, 3, 6, 4, 8, 11, 13, 15,
16, 7, 9, 10]),
('pri', 40): np.array([0, 3, 9, 30, 33, 39, 1, 2, 4, 7, 10, 20, 6, 8,
13, 23, 19, 29, 31, 32, 34, 37, 36, 38, 5, 35,
11, 12, 22, 21, 14, 24, 27, 17, 16, 18, 28, 26,
15, 25]),
('pri', 75): np.array([0, 4, 14, 60, 64, 74, 1, 2, 3, 5, 9, 12, 15,
30, 45, 8, 11, 13, 19, 34, 49, 29, 44, 59, 61,
62, 63, 65, 69, 72, 68, 71, 73, 6, 10, 7, 66,
67, 70, 16, 18, 48, 46, 17, 33, 47, 31, 32, 20,
50, 57, 27, 35, 54, 42, 24, 39, 23, 28, 58, 53,
26, 43, 56, 38, 41, 21, 51, 36, 22, 52, 37, 25,
55, 40]),
('pri', 126): np.array([0, 5, 20, 105, 110, 125, 1, 2, 3, 4, 6, 11,
15, 18, 21, 42, 63, 84, 10, 14, 17, 19, 26,
47, 68, 89, 41, 62, 83, 104, 106, 107, 108,
109, 111, 116, 120, 123, 115, 119, 122, 124,
7, 16, 9, 12, 13, 8, 112, 114, 121, 113, 118,
117, 22, 25, 88, 85, 23, 24, 46, 67, 87, 86,
64, 43, 44, 45, 66, 65, 27, 90, 102, 39, 48,
69, 95, 99, 81, 60, 36, 32, 53, 74, 78, 57,
31, 40, 103, 94, 35, 38, 61, 82, 101, 98, 73,
52, 56, 59, 80, 77, 28, 91, 49, 70, 30, 93,
51, 72, 37, 100, 58, 79, 29, 92, 50, 71, 34,
97, 55, 76, 33, 96, 54, 75]),
('pri', 196): np.array([0, 6, 27, 168, 174, 195, 1, 2, 3, 4, 5, 7, 13,
18, 22, 25, 28, 56, 84, 112, 140, 12, 17, 21,
24, 26, 34, 62, 90, 118, 146, 55, 83, 111,
139, 167, 169, 170, 171, 172, 173, 175, 181,
186, 190, 193, 180, 185, 189, 192, 194, 8, 23,
11, 14, 19, 20, 16, 10, 9, 15, 176, 179, 191,
177, 178, 184, 188, 187, 182, 183, 29, 33,
145, 141, 30, 31, 32, 61, 89, 117, 144, 143,
142, 113, 85, 57, 58, 60, 116, 114, 59, 88,
115, 86, 87, 35, 147, 165, 53, 63, 91, 119,
153, 158, 162, 137, 109, 81, 50, 46, 41, 69,
125, 134, 78, 97, 130, 106, 74, 102, 40, 54,
166, 152, 45, 49, 52, 82, 110, 138, 164, 161,
157, 124, 96, 68, 73, 80, 136, 129, 77, 108,
133, 101, 105, 36, 148, 64, 92, 120, 39, 151,
67, 95, 123, 51, 163, 79, 107, 135, 37, 149,
65, 93, 121, 38, 150, 66, 94, 122, 44, 156,
72, 100, 128, 48, 160, 76, 104, 132, 47, 159,
75, 103, 131, 42, 154, 70, 98, 126, 43, 155,
71, 99, 127]),
('pyr', 5): np.array([0, 1, 3, 2, 4]),
('pyr', 14): np.array([0, 2, 8, 6, 13, 1, 3, 9, 5, 10, 7, 12, 11, 4]),
('pyr', 30): np.array([0, 3, 15, 12, 29, 1, 2, 4, 8, 16, 25, 7, 11,
18, 26, 14, 13, 24, 28, 22, 27, 17, 19, 21, 23,
5, 9, 10, 6, 20]),
('pyr', 55): np.array([0, 4, 24, 20, 54, 1, 2, 3, 5, 10, 15, 25, 41,
50, 9, 14, 19, 28, 43, 51, 23, 22, 21, 40, 49,
53, 37, 47, 52, 26, 27, 42, 33, 29, 44, 32, 36,
46, 39, 38, 48, 6, 16, 18, 8, 11, 17, 13, 7,
12, 30, 31, 35, 34, 45]),
('pyr', 91): np.array([0, 5, 35, 30, 90, 1, 2, 3, 4, 6, 12, 18, 24,
36, 61, 77, 86, 11, 17, 23, 29, 40, 64, 79,
87, 34, 33, 32, 31, 60, 76, 85, 89, 56, 73,
83, 88, 37, 39, 78, 38, 63, 62, 51, 41, 80,
46, 65, 69, 45, 55, 82, 50, 72, 68, 59, 57,
84, 58, 74, 75, 7, 25, 28, 10, 13, 19, 26, 27,
22, 16, 9, 8, 14, 20, 21, 15, 42, 44, 54, 52,
81, 43, 47, 66, 49, 67, 53, 71, 70, 48]),
('pyr', 140): np.array([0, 6, 48, 42, 139, 1, 2, 3, 4, 5, 7, 14, 21,
28, 35, 49, 85, 110, 126, 135, 13, 20, 27, 34,
41, 54, 89, 113, 128, 136, 47, 46, 45, 44, 43,
84, 109, 125, 134, 138, 79, 105, 122, 132,
137, 50, 53, 127, 51, 52, 88, 112, 111, 86,
87, 73, 55, 129, 67, 61, 90, 114, 118, 100,
95, 60, 78, 131, 66, 72, 104, 121, 117, 94,
99, 83, 80, 133, 82, 81, 106, 123, 124, 108,
107, 8, 36, 40, 12, 15, 22, 29, 37, 38, 39,
33, 26, 19, 11, 10, 9, 16, 30, 32, 18, 23, 31,
25, 17, 24, 56, 59, 77, 74, 130, 57, 58, 62,
68, 91, 115, 65, 71, 93, 116, 76, 75, 103,
120, 101, 119, 92, 96, 98, 102, 63, 69, 70,
64, 97]),
('hex', 8): np.array([0, 1, 3, 2, 4, 5, 7, 6]),
('hex', 27): np.array([0, 2, 8, 6, 18, 20, 26, 24, 1, 3, 9, 5, 11, 7,
17, 15, 19, 21, 23, 25, 4, 10, 12, 14, 16, 22,
13]),
('hex', 64): np.array([0, 3, 15, 12, 48, 51, 63, 60, 1, 2, 4, 8, 16,
32, 7, 11, 19, 35, 14, 13, 31, 47, 28, 44, 49,
50, 52, 56, 55, 59, 62, 61, 5, 9, 10, 6, 17, 18,
34, 33, 20, 36, 40, 24, 23, 27, 43, 39, 30, 29,
45, 46, 53, 54, 58, 57, 21, 22, 26, 25, 37, 38,
42, 41]),
('hex', 125): np.array([0, 4, 24, 20, 100, 104, 124, 120, 1, 2, 3, 5,
10, 15, 25, 50, 75, 9, 14, 19, 29, 54, 79, 23,
22, 21, 49, 74, 99, 45, 70, 95, 101, 102, 103,
105, 110, 115, 109, 114, 119, 123, 122, 121, 6,
16, 18, 8, 11, 17, 13, 7, 12, 26, 28, 78, 76,
27, 53, 77, 51, 52, 30, 80, 90, 40, 55, 85, 65,
35, 60, 34, 44, 94, 84, 39, 69, 89, 59, 64, 48,
46, 96, 98, 47, 71, 97, 73, 72, 106, 108, 118,
116, 107, 113, 117, 111, 112, 31, 33, 43, 41,
81, 83, 93, 91, 32, 36, 56, 38, 58, 42, 68, 66,
82, 86, 88, 92, 37, 57, 61, 63, 67, 87, 62]),
('hex', 216): np.array([0, 5, 35, 30, 180, 185, 215, 210, 1, 2, 3, 4,
6, 12, 18, 24, 36, 72, 108, 144, 11, 17, 23,
29, 41, 77, 113, 149, 34, 33, 32, 31, 71, 107,
143, 179, 66, 102, 138, 174, 181, 182, 183,
184, 186, 192, 198, 204, 191, 197, 203, 209,
214, 213, 212, 211, 7, 25, 28, 10, 13, 19, 26,
27, 22, 16, 9, 8, 14, 20, 21, 15, 37, 40, 148,
145, 38, 39, 76, 112, 147, 146, 109, 73, 74,
75, 111, 110, 42, 150, 168, 60, 78, 114, 156,
162, 132, 96, 54, 48, 84, 120, 126, 90, 47, 65,
173, 155, 53, 59, 101, 137, 167, 161, 119, 83,
89, 95, 131, 125, 70, 67, 175, 178, 69, 68,
103, 139, 176, 177, 142, 106, 105, 104, 140,
141, 187, 190, 208, 205, 188, 189, 196, 202,
207, 206, 199, 193, 194, 195, 201, 200, 43, 46,
64, 61, 151, 154, 172, 169, 44, 45, 49, 55, 79,
115, 52, 58, 82, 118, 63, 62, 100, 136, 97,
133, 152, 153, 157, 163, 160, 166, 171, 170,
50, 56, 57, 51, 80, 81, 117, 116, 85, 121, 127,
91, 88, 94, 130, 124, 99, 98, 134, 135, 158,
159, 165, 164, 86, 87, 93, 92, 122, 123, 129,
128]),
('tri', 3): np.array([0, 1, 2]),
('tri', 6): np.array([0, 2, 5, 1, 4, 3]),
('tri', 10): np.array([0, 3, 9, 1, 2, 6, 8, 7, 4, 5]),
('tri', 15): np.array([0, 4, 14, 1, 2, 3, 8, 11, 13, 12, 9, 5, 6, 7,
10]),
('tri', 21): np.array([0, 5, 20, 1, 2, 3, 4, 10, 14, 17, 19, 18, 15,
11, 6, 7, 8, 9, 13, 16, 12]),
('quad', 4): np.array([0, 1, 3, 2]),
('quad', 9): np.array([0, 2, 8, 6, 1, 5, 7, 3, 4]),
('quad', 16): np.array([0, 3, 15, 12, 1, 2, 7, 11, 14, 13, 8, 4, 5, 6,
10, 9]),
('quad', 25): np.array([0, 4, 24, 20, 1, 2, 3, 9, 14, 19, 23, 22, 21,
15, 10, 5, 6, 8, 18, 16, 7, 13, 17, 11, 12]),
('quad', 36): np.array([0, 5, 35, 30, 1, 2, 3, 4, 11, 17, 23, 29, 34,
33, 32, 31, 24, 18, 12, 6, 7, 10, 28, 25, 8, 9,
16, 22, 27, 26, 19, 13, 14, 15, 21, 20])
}
from_pyfr = {k: np.argsort(v) for k, v in to_pyfr.items()}
class CGNSNodeMaps(object):
to_pyfr = {
('tet', 4): np.array([0, 1, 2, 3]),
('tet', 10): np.array([0, 2, 5, 9, 1, 4, 3, 6, 7, 8]),
('tet', 20): np.array([0, 3, 9, 19, 1, 2, 6, 8, 7, 4, 10, 16, 12, 17,
15, 18, 5, 11, 14, 13]),
('tet', 35): np.array([0, 4, 14, 34, 1, 2, 3, 8, 11, 13, 12, 9, 5, 15,
25, 31, 18, 27, 32, 24, 30, 33, 6, 7, 10, 16,
17, 26, 21, 23, 29, 22, 19, 28, 20]),
('pri', 6): np.array([0, 1, 2, 3, 4, 5]),
('pri', 18): np.array([0, 2, 5, 12, 14, 17, 1, 4, 3, 6, 8, 11, 13, 16,
15, 7, 10, 9]),
('pri', 40): np.array([0, 3, 9, 30, 33, 39, 1, 2, 6, 8, 7, 4, 10, 20,
13, 23, 19, 29, 31, 32, 36, 38, 37, 34, 5, 11,
12, 22, 21, 16, 18, 28, 26, 17, 14, 24, 27, 35,
15, 25]),
('pri', 75): np.array([0, 4, 14, 60, 64, 74, 1, 2, 3, 8, 11, 13, 12, 9,
5, 15, 30, 45, 19, 34, 49, 29, 44, 59, 61, 62,
63, 68, 71, 73, 72, 69, 65, 6, 7, 10, 16, 17,
18, 33, 48, 47, 46, 31, 32, 23, 26, 28, 43, 58,
56, 53, 38, 41, 27, 24, 20, 35, 50, 54, 57, 42,
39, 66, 67, 70, 21, 22, 25, 36, 37, 40, 51, 52,
55]),
('pyr', 5): np.array([0, 1, 3, 2, 4]),
('pyr', 14): np.array([0, 2, 8, 6, 13, 1, 5, 7, 3, 9, 10, 12, 11, 4]),
('pyr', 30): np.array([0, 3, 15, 12, 29, 1, 2, 7, 11, 14, 13, 8, 4, 16,
25, 18, 26, 24, 28, 22, 27, 5, 6, 10, 9, 17, 21,
23, 19, 20]),
('pyr', 55): np.array([0, 4, 24, 20, 54, 1, 2, 3, 9, 14, 19, 23, 22,
21, 15, 10, 5, 25, 41, 50, 28, 43, 51, 40, 49,
53, 37, 47, 52, 6, 7, 8, 13, 18, 17, 16, 11,
12, 26, 27, 42, 32, 36, 46, 39, 38, 48, 33, 29,
44, 30, 31, 35, 34, 45]),
('hex', 8): np.array([0, 1, 3, 2, 4, 5, 7, 6]),
('hex', 27): np.array([0, 2, 8, 6, 18, 20, 26, 24, 1, 5, 7, 3, 9, 11,
17, 15, 19, 23, 25, 21, 4, 10, 14, 16, 12, 22,
13]),
('hex', 64): np.array([0, 3, 15, 12, 48, 51, 63, 60, 1, 2, 7, 11, 14,
13, 8, 4, 16, 32, 19, 35, 31, 47, 28, 44, 49,
50, 55, 59, 62, 61, 56, 52, 5, 6, 10, 9, 17, 18,
34, 33, 23, 27, 43, 39, 30, 29, 45, 46, 24, 20,
36, 40, 53, 54, 58, 57, 21, 22, 26, 25, 37, 38,
42, 41]),
('hex', 125): np.array([0, 4, 24, 20, 100, 104, 124, 120, 1, 2, 3, 9,
14, 19, 23, 22, 21, 15, 10, 5, 25, 50, 75, 29,
54, 79, 49, 74, 99, 45, 70, 95, 101, 102, 103,
109, 114, 119, 123, 122, 121, 115, 110, 105, 6,
7, 8, 13, 18, 17, 16, 11, 12, 26, 27, 28, 53,
78, 77, 76, 51, 52, 34, 39, 44, 69, 94, 89, 84,
59, 64, 48, 47, 46, 71, 96, 97, 98, 73, 72, 40,
35, 30, 55, 80, 85, 90, 65, 60, 106, 107, 108,
113, 118, 117, 116, 111, 112, 31, 32, 33, 38,
43, 42, 41, 36, 37, 56, 57, 58, 63, 68, 67, 66,
61, 62, 81, 82, 83, 88, 93, 92, 91, 86, 87]),
('tri', 3): np.array([0, 1, 2]),
('tri', 6): np.array([0, 2, 5, 1, 4, 3]),
('tri', 10): np.array([0, 3, 9, 1, 2, 6, 8, 7, 4, 5]),
('tri', 15): np.array([0, 4, 14, 1, 2, 3, 8, 11, 13, 12, 9, 5, 6, 7,
10]),
('quad', 4): np.array([0, 1, 3, 2]),
('quad', 9): np.array([0, 2, 8, 6, 1, 5, 7, 3, 4]),
('quad', 16): np.array([0, 3, 15, 12, 1, 2, 7, 11, 14, 13, 8, 4, 5, 6,
10, 9]),
('quad', 25): np.array([0, 4, 24, 20, 1, 2, 3, 9, 14, 19, 23, 22, 21,
15, 10, 5, 6, 8, 18, 16, 7, 13, 17, 11, 12])
}
from_pyfr = {k: np.argsort(v) for k, v in to_pyfr.items()}
|
|
# -*- coding: utf-8 -*-
#Created on Sat Sep 20 11:23:30 2014
#@author: breedlu
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import numpy as _np
from matplotlib.lines import Line2D as _mpl_Line2D
from matplotlib.patches import Polygon as _mpl_Polygon
import matplotlib.text as _mpl_text
import clearplot as _cp
from clearplot import axes as _axes
from clearplot import color_bar as _color_bar
from clearplot import utilities as _utl
class Figure(object):
"""
Figure class
"""
def __init__(self, size = None, dpmm = _cp.params.dpmm):
"""
Instantiates a figure object
Parameters
----------
fig_size: list or tuple, optional
Width and height of figure in mm
dpmm: float, optional
Dots per mm
"""
self._ui_size = size
if size is None:
size_inch = _np.array(_mpl.rcParams['figure.figsize'])
else:
size_inch = _np.array(size) / 25.4
self.mpl_fig = _plt.figure(figsize = size_inch, dpi = dpmm * 25.4)
#Force the figure to be on top of all other windows
self.put_window_on_top()
#Set the figure renderer to None
self._renderer = None
#Set the number of mm to pad the tight bbox
self.tight_bbox_pad = _mpl.rcParams['savefig.pad_inches'] * 25.4
#Initialize the axes container
self.axes = []
#Initialize the color_bar container
self.color_bars = []
def add_axes(self, **kwargs):
"""
Adds a set of generic axes to the figure
Parameters
----------
Keyword arguments get passed to axes.Axes()
Returns
-------
ax : axes object
"""
ax = _axes.Axes(self, **kwargs)
return(ax)
def add_invisible_axes(self, **kwargs):
"""
Adds a set of invisible axes to the figure.
Parameters
----------
Keyword arguments get passed to axes.Invisible_Axes()
Returns
-------
ax : invisible axes object
"""
ax = _axes.Invisible_Axes(self, **kwargs)
return(ax)
# def add_axes_grid(self, n_row = 1, n_col = 1, position = None, **kwargs):
# ax_list = []
# for i in range(n_row):
# if i == 0:
# position = None
# else:
# ax_upper_left = ax_list[0].position
# ax_upper_left[1] = ax_upper_left[1] + ax_list[0].size[1]
# trans = _utl.Offset_From(ax_list[i-1], [0,1], 'mm')
# position = [0, 20]
# ax_list.append(_axes.Axes(self, position, transform = trans, **kwargs))
#
# self.axes.extend(ax_list)
# return(ax_list)
@property
def tight_bbox(self):
"""
Get/set the tight bounding box for the figure, in mm.
"""
#It seems to matter if you have a draw command before getting the
#tight bbox
self.draw()
bbox_inches = self.mpl_fig.get_tightbbox(self.renderer)
# #NOTE: this is a blatant copy from print_figure() in matplotlib's
# #backend_bases.py, but I made a few changes:
# #1) The padding was changed to mm units.
# #2) The bbox width and height are checked to make sure they are not
# # infinity.
# bbox_inches = self.mpl_fig.get_tightbbox(self.renderer)
# bbox_artists = self.mpl_fig.get_default_bbox_extra_artists()
# bbox_filtered = []
# for a in bbox_artists:
# bbox = a.get_window_extent(self.renderer)
# if a.get_clip_on():
# clip_box = a.get_clip_box()
# if clip_box is not None:
# bbox = _mpl.transforms.Bbox.intersection(bbox, clip_box)
# clip_path = a.get_clip_path()
# if clip_path is not None and bbox is not None:
# clip_path = clip_path.get_fully_transformed_path()
# bbox = _mpl.transforms.Bbox.intersection(bbox,
# clip_path.get_extents())
# if bbox is not None and (bbox.width != 0 or bbox.height != 0) and \
# _np.abs(bbox.width) != _np.inf and \
# _np.abs(bbox.height) != _np.inf:
# bbox_filtered.append(bbox)
# if bbox_filtered:
# _bbox = _mpl.transforms.Bbox.union(bbox_filtered)
# trans = _mpl.transforms.Affine2D().scale(1.0 / self.mpl_fig.dpi)
# bbox_extra = _mpl.transforms.TransformedBbox(_bbox, trans)
# bbox_inches = _mpl.transforms.Bbox.union([bbox_inches, bbox_extra])
# bbox_inches = bbox_inches.padded(self.tight_bbox_pad/25.4)
# #This is the end of the copy from matplotlib's print_figure()
#Convert the bbox from inches to mm
trans = _mpl.transforms.Affine2D()
trans.scale(25.4, 25.4)
bbox = bbox_inches.transformed(trans)
return(bbox)
@property
def mm_to_pix_trans(self):
"""
Gets a transformation object that converts mm to pixels, relative to
the bottom left hand corner of the figure.
"""
fig_size_pix = self.mpl_fig.bbox.size
fig_size = self.size
trans = _mpl.transforms.Affine2D()
trans.scale(fig_size_pix[0] / fig_size[0], fig_size_pix[1] / fig_size[1])
return(trans)
@property
def fig_to_mm_trans(self):
"""
Gets a transformation object that converts normalized figure
coordinates to mm, relative to the bottom left hand corner of the
figure.
"""
fig_size = self.size
trans = _mpl.transforms.Affine2D()
trans.scale(fig_size[0], fig_size[1])
return(trans)
def put_window_on_top(self):
"""
Places the figure window on top of all other windows if interactive
mode is on.
"""
if hasattr(self.mpl_fig.canvas.manager, 'window') and _plt.isinteractive():
#Force the figure to be on top of all other windows
if hasattr(self.mpl_fig.canvas.manager.window, 'attributes'):
#(The command below doees not work with the MacOSX or Qt4Agg backend.
#I have only verified it works with the TkAgg backend.)
self.mpl_fig.canvas.manager.window.attributes('-topmost', 1)
#Disable this right afterwards so that other windows can be on top of the
#figure window
self.mpl_fig.canvas.manager.window.attributes('-topmost', 0)
elif hasattr(self.mpl_fig.canvas.manager.window, 'raise_'):
#(The command below doees not work with the MacOSX or TkAgg backend.
#I have only verified it works with the Qt4Agg backend.)
self.mpl_fig.canvas.manager.window.raise_()
@property
def renderer(self):
"""
Gets the current renderer
Returns
-------
renderer: renderer object
"""
if self._renderer is None:
if hasattr(self.mpl_fig.canvas, "get_renderer"):
#Some backends, such as TkAgg, have the get_renderer method,
#which makes this easy.
self._renderer = self.mpl_fig.canvas.get_renderer()
elif hasattr(self.mpl_fig.canvas, "print_pdf"):
#Other backends do not have the get_renderer method, so we have a work
#around to find the renderer. Print the figure to a temporary file
#object, and then grab the renderer that was used.
#(I stole this trick from the matplotlib backend_bases.py
#print_figure() method.)
import io
self.mpl_fig.canvas.print_pdf(io.BytesIO())
self._renderer = self.mpl_fig._cachedRenderer
else:
self._renderer = self.mpl_fig.canvas.renderer
return(self._renderer)
#Old method that gives the incorrect renderer
#renderer = _mpl.backend_bases.RendererBase()
#For further information, see this post:
#http://stackoverflow.com/questions/22667224/matplotlib-get-text-bounding-box-independent-of-backend/22689498#22689498
def add_text(self, x, txt, **kwargs):
"""
Adds text to the figure window
Parameters
----------
x: 1x2 numpy array
Coordinates of the text in mm
txt: string
Text to add to the figure
kwargs:
Keyword arguments to matplotlib's text function
Returns
-------
txt_obj: text object
See Also
--------
ax.add_text : adds text to axes
ax.annotate : adds an annotation to the axes
"""
#Convert into normalized figure coordinates
fig_size = self.size
x_nfc = _np.array(x) / fig_size
#Add text to the figure
txt_obj = self.mpl_fig.text(x_nfc[0], x_nfc[1], txt, **kwargs)
return(txt_obj)
def add_line(self, x, **kwargs):
"""
Adds a 2D line to the figure
Parameters
----------
x: 2x2 numpy array
Coordinates of the line in mm. First row contains the first point,
and the second row contains the second point.
kwargs:
Keyword arguments to matplotlib's Line2D class
Returns
-------
line_obj: matplotlib line object
See Also
--------
ax.add_line : adds line to axes
ax.annotate : adds an annotation to the axes
"""
#Convert the coordinates into normalized figure coordinates
fig_size = self.size
x_nfc = _np.array(x)
x_nfc[:,0] = x_nfc[:,0] / fig_size[0]
x_nfc[:,1] = x_nfc[:,1] / fig_size[1]
#Create the line object
line_obj = _mpl_Line2D(x_nfc[:,0], x_nfc[:,1], transform = self.mpl_fig.transFigure, **kwargs)
#Add the line to the figure
self.mpl_fig.lines.append(line_obj)
return(line_obj)
def add_polygon(self, x, **kwargs):
"""
Adds a polygon to the figure
Parameters
----------
x: Nx2 numpy array
Coordinates of the polygon in mm. First row contains the first
point, the second row contains the second point, etc.
kwargs:
Keyword arguments to matplotlib's Polygon class
Returns
-------
patch_obj: matplotlib patch object
"""
#Convert the coordinates into normalized figure coordinates
fig_size = self.size
x_nfc = _np.array(x)
x_nfc[:,0] = x_nfc[:,0] / fig_size[0]
x_nfc[:,1] = x_nfc[:,1] / fig_size[1]
#Create the polygon object
patch_obj = _mpl_Polygon(x_nfc, transform = self.mpl_fig.transFigure, **kwargs)
#Add the polygon to the figure
self.mpl_fig.patches.append(patch_obj)
return(patch_obj)
def add_color_bar(self, data_obj, **kwargs):
"""
Places a color bar in the specified figure
Parameters
----------
data_obj : data object
Object that the color bar pertains to.
Other Parameters
----------------
See parameters in Color_Bar()
"""
bar = _color_bar.Color_Bar(data_obj, **kwargs)
label = kwargs.pop('label', None)
if label is not None:
#Make sure the label is in the correct format
label = _utl.adjust_depth(label, 1)
#Apply the label
bar.label = label
return(bar)
def get_obj_bbox(self, obj):
"""
Finds the coordinates of the bounding box surrounding an object, in mm,
relative to the lower left corner of the figure window
Parameters
----------
obj: graphics primitive object
object you wish to get the bounding box for
Returns
-------
bbox: bounding box object
"""
#Since many objects are defined relative to other objects, it is
#important to draw the object. Otherwise, a call to
#obj.get_window_extent can give incorrect values.
obj.draw(self.renderer)
#Get dimensions of object in pixel units
bbox_pix = obj.get_window_extent(self.renderer)
#Convert to mm units
bbox = bbox_pix.transformed(self.mm_to_pix_trans.inverted())
#For some reason, the bounding box for LaTeX text is too tight in the
#vertical direction. All other text bounding boxes have some padding,
#so LaTeX bounding boxes need to be adjusted.
if _mpl.rcParams['text.usetex']:
if obj.findobj(match=_mpl_text.Text) and obj.get_text() != '':
#Set the amount of adjustment (in mm)
y_adj = 0.7
#Shift the text position up or down, as needed
#(This assumes that position is defined in mm)
[x, y] = obj.get_position()
if obj.get_va() == 'bottom':
y = y + y_adj
elif obj.get_va() == 'top':
y = y - y_adj
obj.set_position([x,y])
#Get the new bounding box
bbox_pix = obj.get_window_extent(self._renderer)
#Convert to mm units
bbox = bbox_pix.transformed(self.mm_to_pix_trans.inverted())
#Adjust the new bounding box
bbox = bbox.from_extents(bbox.x0, bbox.y0 - y_adj, \
bbox.x1, bbox.y1 + y_adj)
# #Verify that you have captured the bounding box
# from matplotlib.patches import Rectangle
# fig_size = self.size
# rect = Rectangle([bbox.x0/fig_size[0], bbox.y0/fig_size[1]], \
# bbox.width/fig_size[0], bbox.height/fig_size[1], \
# linewidth = 0.75, color = [0,0,0], fill = False, \
# transform = self.mpl_fig.transFigure)
# self.mpl_fig.patches.append(rect)
# self.draw()
return(bbox)
@property
def size(self):
"""
Get/set the figure size in mm. When changing the figure size, the
figure content is left the same size.
"""
size = self.mpl_fig.get_size_inches() * 25.4
return(size)
#This method was created because matplotlib's fig.set_size_inches()
#automatically scales the content of the figure window. In clearplot, we
#wish to change the figure window size while the figure content remains
#the same size. See https://stackoverflow.com/questions/25396766/matplotlib-resize-figure-window-without-scaling-figure-content
#for more info.
@size.setter
def size(self, size):
#Get the size of the figure before changing it
#(The _np.copy() command is needed to keep the size data from updating
#when we change the figure size.)
old_fig_size = _np.copy(self.size)
#Change the size of the figure
size_inch = _np.array(size) / 25.4
self.mpl_fig.set_size_inches(size_inch, forward = True)
# #We must do a draw() command because drawing can cause the figure
# #window size to change slightly, so requests for the figure size
# #would be wrong.
# self.draw()
#Scale the figure content back down to its original size
#Get the current size of the figure window
new_fig_size = self.size
sf = new_fig_size / old_fig_size
#Cycle through the figure content and scale them back down to their
#original sizes
#Any axes that share a x-axis or y-axis will both be updated
#simultaneously when either one is scaled. This means we must collect
#all the axes positions first and then scale each of them second.
pos_list = []
for mpl_ax in self.mpl_fig.axes:
pos_list.append(mpl_ax.get_position())
for mpl_ax, pos in zip(self.mpl_fig.axes, pos_list):
mpl_ax.set_position([pos.x0 / sf[0], pos.y0 / sf[1], \
pos.width / sf[0], pos.height / sf[1]])
for text in self.mpl_fig.texts:
pos = _np.array(text.get_position())
text.set_position(pos / sf)
for line in self.mpl_fig.lines:
x = line.get_xdata()
y = line.get_ydata()
line.set_xdata(x / sf[0])
line.set_ydata(y / sf[1])
for patch in self.mpl_fig.patches:
xy = patch.get_xy()
patch.set_xy(xy / sf)
def auto_adjust_layout(self, pad = _cp.params.auto_layout_pad):
"""
If the figure size and axes positions have not been explicitly
specified, this method will resize the figure window and move the
content to center all the content inside the window. If the content
positions within the window have been explicitly specified, then the
figure size will be adjusted, but the content positions relative to the
lower left corner of the window will remain the same.
Parameters
----------
pad : float
Padding on edges of figure, in mm.
"""
#Only adjust figure if the size has not been specified
if self._ui_size is None:
fig_bbox = self.tight_bbox
#Collect the user input axes positions
ui_ax_pos_auto = []
for ax in self.axes:
ui_ax_pos_auto.append(ax._ui_pos is None)
#Treat the colorbars as if they were axes
for bar in self.color_bars:
ui_ax_pos_auto.append(bar._ui_pos is None)
if False not in ui_ax_pos_auto:
#If the axes positions have not been specified then move the
#axes all by the same amount, and resize the figure window.
#(We must immediately reset the user input position to None
#since the position setter method assumes any input was
#supplied by the user.)
dx = - _np.array([fig_bbox.x0 - pad, fig_bbox.y0 - pad])
for ax in self.axes:
ax.position = ax.position + dx
ax._ui_pos = None
for bar in self.color_bars:
bar.position = bar.position + dx
bar._ui_pos = None
self.size = [fig_bbox.width + 2*pad, fig_bbox.height + 2*pad]
else:
#If any of the axes positions have been specified, then just
#resize the figure window. This may end up with content far
#off center, or even outside of the figure window area.
self.size = [fig_bbox.x0 + fig_bbox.width + 2*pad, \
fig_bbox.y0 + fig_bbox.height + 2*pad]
def draw(self):
"""
Convenience method that draws all the content in the figure
window.
"""
self.mpl_fig.canvas.draw()
def update(self):
"""
Convenience method that draws any object that has been added to the
the figure window.
"""
try:
self.mpl_fig.canvas.update()
except:
self.mpl_fig.canvas.draw()
#This method is just a wrapper around the matplotlib fig.savefig() method
#It allows the user to work in mm instead of inches, and implements many
#of the defaults clearplot typically uses.
def save(self, file_name, dpmm = _cp.params.dpmm, face_color = [0,0,0,0], \
edge_color = [0,0,0,0], transparent = True, bbox = None, pad = 0):
"""
Save the current figure.
Parameters
----------
file_name : string
A string containing a path to a filename.
If `format` is ``None`` and `filename` is a string, the output
format is deduced from the extension of the filename. If
the filename has no extension, the value of the rc parameter
``savefig.format`` is used.
dpmm : [ None | float > 0 ]
The resolution in dots per mm.
face_color : [string | RGB color]
The color of the figure background
edge_color : [string | RGB color]
The color of the figure border
transparent : bool
If *True*, the axes patches will all be transparent; the
figure patch will also be transparent unless facecolor
and/or edgecolor are specified via kwargs.
This is useful, for example, for displaying
a plot on top of a colored background on a web page. The
transparency of these patches will be restored to their
original values upon exit of this function.
bbox : bounding box object
Bounding box of the region to be saved, in mm. If 'tight', then
matplotlib will try to figure out the tightest bounding box that
contains the figure content. If ``None``, then the figure window
will be saved as is.
pad : float
Amount of padding in mm around the figure when `bbox` is 'tight'.
"""
if bbox is None or bbox == 'tight':
bbox_inch = bbox
else:
bbox_inch = _np.array(bbox) / 25.4
self.mpl_fig.savefig(file_name, dpi = dpmm * 25.4, \
facecolor = face_color, edgecolor = edge_color, \
transparent = transparent, bbox_inches = bbox_inch, \
pad_inches = pad / 25.4)
def close(self):
"""
Closes the figure window
"""
_plt.close(self.mpl_fig)
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# util __init__.py
from __future__ import unicode_literals
from werkzeug.test import Client
import os, sys, re, urllib
import frappe
# utility functions like cint, int, flt, etc.
from frappe.utils.data import *
default_fields = ['doctype', 'name', 'owner', 'creation', 'modified', 'modified_by',
'parent', 'parentfield', 'parenttype', 'idx', 'docstatus']
# used in import_docs.py
# TODO: deprecate it
def getCSVelement(v):
"""
Returns the CSV value of `v`, For example:
* apple becomes "apple"
* hi"there becomes "hi""there"
"""
v = cstr(v)
if not v: return ''
if (',' in v) or ('\n' in v) or ('"' in v):
if '"' in v: v = v.replace('"', '""')
return '"'+v+'"'
else: return v or ''
def get_fullname(user):
"""get the full name (first name + last name) of the user from User"""
if not hasattr(frappe.local, "fullnames"):
frappe.local.fullnames = {}
if not frappe.local.fullnames.get(user):
p = frappe.db.get_value("User", user, ["first_name", "last_name"], as_dict=True)
if p:
frappe.local.fullnames[user] = " ".join(filter(None,
[p.get('first_name'), p.get('last_name')])) or user
else:
frappe.local.fullnames[user] = user
return frappe.local.fullnames.get(user)
def get_formatted_email(user):
"""get email id of user formatted as: John Doe <johndoe@example.com>"""
if user == "Administrator":
return user
from email.utils import formataddr
fullname = get_fullname(user)
return formataddr((fullname, user))
def extract_email_id(email):
"""fetch only the email part of the email id"""
from email.utils import parseaddr
fullname, email_id = parseaddr(email)
if isinstance(email_id, basestring) and not isinstance(email_id, unicode):
email_id = email_id.decode("utf-8", "ignore")
return email_id
def validate_email_add(email_str):
"""Validates the email string"""
email = extract_email_id(email_str)
match = re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email.lower())
if not match:
return False
return match.group(0)==email.lower()
def random_string(length):
"""generate a random string"""
import string
from random import choice
return ''.join([choice(string.letters + string.digits) for i in range(length)])
def get_gravatar(email):
import md5
return "https://secure.gravatar.com/avatar/{hash}?d=retro".format(hash=md5.md5(email).hexdigest())
def get_traceback():
"""
Returns the traceback of the Exception
"""
import traceback
exc_type, value, tb = sys.exc_info()
trace_list = traceback.format_tb(tb, None) + \
traceback.format_exception_only(exc_type, value)
body = "Traceback (innermost last):\n" + "%-20s %s" % \
(unicode((b"").join(trace_list[:-1]), 'utf-8'), unicode(trace_list[-1], 'utf-8'))
if frappe.logger:
frappe.logger.error('Db:'+(frappe.db and frappe.db.cur_db_name or '') \
+ ' - ' + body)
return body
def log(event, details):
frappe.logger.info(details)
def dict_to_str(args, sep='&'):
"""
Converts a dictionary to URL
"""
t = []
for k in args.keys():
t.append(str(k)+'='+urllib.quote(str(args[k] or '')))
return sep.join(t)
# Get Defaults
# ==============================================================================
def get_defaults(key=None):
"""
Get dictionary of default values from the defaults, or a value if key is passed
"""
return frappe.db.get_defaults(key)
def set_default(key, val):
"""
Set / add a default value to defaults`
"""
return frappe.db.set_default(key, val)
def remove_blanks(d):
"""
Returns d with empty ('' or None) values stripped
"""
empty_keys = []
for key in d:
if d[key]=='' or d[key]==None:
# del d[key] raises runtime exception, using a workaround
empty_keys.append(key)
for key in empty_keys:
del d[key]
return d
def pprint_dict(d, level=1, no_blanks=True):
"""
Pretty print a dictionary with indents
"""
if no_blanks:
remove_blanks(d)
# make indent
indent, ret = '', ''
for i in range(0,level): indent += '\t'
# add lines
comment, lines = '', []
kl = d.keys()
kl.sort()
# make lines
for key in kl:
if key != '##comment':
tmp = {key: d[key]}
lines.append(indent + str(tmp)[1:-1] )
# add comment string
if '##comment' in kl:
ret = ('\n' + indent) + '# ' + d['##comment'] + '\n'
# open
ret += indent + '{\n'
# lines
ret += indent + ',\n\t'.join(lines)
# close
ret += '\n' + indent + '}'
return ret
def get_common(d1,d2):
"""
returns (list of keys) the common part of two dicts
"""
return [p for p in d1 if p in d2 and d1[p]==d2[p]]
def get_common_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
ret = {}
for key in d1:
if key in d2 and d2[key]==d1[key]:
ret[key] = d1[key]
return ret
def get_diff_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
diff_keys = set(d2.keys()).difference(set(d1.keys()))
ret = {}
for d in diff_keys: ret[d] = d2[d]
return ret
def get_file_timestamp(fn):
"""
Returns timestamp of the given file
"""
from frappe.utils import cint
try:
return str(cint(os.stat(fn).st_mtime))
except OSError, e:
if e.args[0]!=2:
raise
else:
return None
# to be deprecated
def make_esc(esc_chars):
"""
Function generator for Escaping special characters
"""
return lambda s: ''.join(['\\' + c if c in esc_chars else c for c in s])
# esc / unescape characters -- used for command line
def esc(s, esc_chars):
"""
Escape special characters
"""
if not s:
return ""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(c, esc_str)
return s
def unesc(s, esc_chars):
"""
UnEscape special characters
"""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(esc_str, c)
return s
def execute_in_shell(cmd, verbose=0):
# using Popen instead of os.system - as recommended by python docs
from subprocess import Popen
import tempfile
with tempfile.TemporaryFile() as stdout:
with tempfile.TemporaryFile() as stderr:
p = Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
p.wait()
stdout.seek(0)
out = stdout.read()
stderr.seek(0)
err = stderr.read()
if verbose:
if err: print err
if out: print out
return err, out
def get_path(*path, **kwargs):
base = kwargs.get('base')
if not base:
base = frappe.local.site_path
return os.path.join(base, *path)
def get_site_base_path(sites_dir=None, hostname=None):
return frappe.local.site_path
def get_site_path(*path):
return get_path(base=get_site_base_path(), *path)
def get_files_path(*path):
return get_site_path("public", "files", *path)
def get_backups_path():
return get_site_path("private", "backups")
def get_request_site_address(full_address=False):
return get_url(full_address=full_address)
def encode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and isinstance(d[key], unicode):
d[key] = d[key].encode(encoding)
return d
def decode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and not isinstance(d[key], unicode):
d[key] = d[key].decode(encoding, "ignore")
return d
def get_site_name(hostname):
return hostname.split(':')[0]
def get_disk_usage():
"""get disk usage of files folder"""
files_path = get_files_path()
if not os.path.exists(files_path):
return 0
err, out = execute_in_shell("du -hsm {files_path}".format(files_path=files_path))
return cint(out.split("\n")[-2].split("\t")[0])
def touch_file(path):
with open(path, 'a'):
os.utime(path, None)
return True
def get_test_client():
from frappe.app import application
return Client(application)
def get_hook_method(hook_name, fallback=None):
method = (frappe.get_hooks().get(hook_name))
if method:
method = frappe.get_attr(method[0])
return method
if fallback:
return fallback
def update_progress_bar(txt, i, l):
lt = len(txt)
if lt < 36:
txt = txt + " "*(36-lt)
complete = int(float(i+1) / l * 40)
sys.stdout.write("\r{0}: [{1}{2}]".format(txt, "="*complete, " "*(40-complete)))
sys.stdout.flush()
def get_html_format(print_path):
html_format = None
if os.path.exists(print_path):
with open(print_path, "r") as f:
html_format = f.read()
for include_directive, path in re.findall("""({% include ['"]([^'"]*)['"] %})""", html_format):
for app_name in frappe.get_installed_apps():
include_path = frappe.get_app_path(app_name, *path.split(os.path.sep))
if os.path.exists(include_path):
with open(include_path, "r") as f:
html_format = html_format.replace(include_directive, f.read())
break
return html_format
|
|
"""
This class is defined to override standard pickle functionality
The goals of it follow:
-Serialize lambdas and nested functions to compiled byte code
-Deal with main module correctly
-Deal with other non-serializable objects
It does not include an unpickler, as standard python unpickling suffices.
This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
<http://www.picloud.com>`_.
Copyright (c) 2012, Regents of the University of California.
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California, Berkeley nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
import operator
import os
import io
import pickle
import struct
import sys
import types
from functools import partial
import itertools
import dis
import traceback
if sys.version < '3':
from pickle import Pickler
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
types.ClassType = type
from pickle import _Pickler as Pickler
from io import BytesIO as StringIO
PY3 = True
#relevant opcodes
STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
DELETE_GLOBAL = dis.opname.index('DELETE_GLOBAL')
LOAD_GLOBAL = dis.opname.index('LOAD_GLOBAL')
GLOBAL_OPS = [STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL]
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
EXTENDED_ARG = dis.EXTENDED_ARG
def islambda(func):
return getattr(func,'__name__') == '<lambda>'
_BUILTIN_TYPE_NAMES = {}
for k, v in types.__dict__.items():
if type(v) is type:
_BUILTIN_TYPE_NAMES[v] = k
def _builtin_type(name):
return getattr(types, name)
class CloudPickler(Pickler):
dispatch = Pickler.dispatch.copy()
def __init__(self, file, protocol=None):
Pickler.__init__(self, file, protocol)
# set of modules to unpickle
self.modules = set()
# map ids to dictionary. used to ensure that functions can share global env
self.globals_ref = {}
def dump(self, obj):
self.inject_addons()
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if 'recursion' in e.args[0]:
msg = """Could not pickle object as excessively deep recursion required."""
raise pickle.PicklingError(msg)
def save_memoryview(self, obj):
"""Fallback to save_string"""
Pickler.save_string(self, str(obj))
def save_buffer(self, obj):
"""Fallback to save_string"""
Pickler.save_string(self,str(obj))
if PY3:
dispatch[memoryview] = save_memoryview
else:
dispatch[buffer] = save_buffer
def save_unsupported(self, obj):
raise pickle.PicklingError("Cannot pickle objects of type %s" % type(obj))
dispatch[types.GeneratorType] = save_unsupported
# itertools objects do not pickle!
for v in itertools.__dict__.values():
if type(v) is type:
dispatch[v] = save_unsupported
def save_module(self, obj):
"""
Save a module as an import
"""
self.modules.add(obj)
self.save_reduce(subimport, (obj.__name__,), obj=obj)
dispatch[types.ModuleType] = save_module
def save_codeobject(self, obj):
"""
Save a code object
"""
if PY3:
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames,
obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
args = (
obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code,
obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars
)
self.save_reduce(types.CodeType, args, obj=obj)
dispatch[types.CodeType] = save_codeobject
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
write = self.write
if name is None:
name = obj.__name__
modname = pickle.whichmodule(obj, name)
# print('which gives %s %s %s' % (modname, obj, name))
try:
themodule = sys.modules[modname]
except KeyError:
# eval'd items such as namedtuple give invalid items for their function __module__
modname = '__main__'
if modname == '__main__':
themodule = None
if themodule:
self.modules.add(themodule)
if getattr(themodule, name, None) is obj:
return self.save_global(obj, name)
# if func is lambda, def'ed at prompt, is in main, or is nested, then
# we'll pickle the actual function object rather than simply saving a
# reference (as is done in default pickler), via save_function_tuple.
if islambda(obj) or obj.__code__.co_filename == '<stdin>' or themodule is None:
#print("save global", islambda(obj), obj.__code__.co_filename, modname, themodule)
self.save_function_tuple(obj)
return
else:
# func is nested
klass = getattr(themodule, name, None)
if klass is None or klass is not obj:
self.save_function_tuple(obj)
return
if obj.__dict__:
# essentially save_reduce, but workaround needed to avoid recursion
self.save(_restore_attr)
write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
self.save(obj.__dict__)
write(pickle.TUPLE + pickle.REDUCE)
else:
write(pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
dispatch[types.FunctionType] = save_function
def save_function_tuple(self, func):
""" Pickles an actual func object.
A func comprises: code, globals, defaults, closure, and dict. We
extract and save these, injecting reducing functions at certain points
to recreate the func object. Keep in mind that some of these pieces
can contain a ref to the func itself. Thus, a naive save on these
pieces could trigger an infinite loop of save's. To get around that,
we first create a skeleton func object using just the code (this is
safe, since this won't contain a ref to the func), and memoize it as
soon as it's created. The other stuff can then be filled in later.
"""
save = self.save
write = self.write
code, f_globals, defaults, closure, dct, base_globals = self.extract_func_data(func)
save(_fill_function) # skeleton function updater
write(pickle.MARK) # beginning of tuple that _fill_function expects
# create a skeleton function object and memoize it
save(_make_skel_func)
save((code, closure, base_globals))
write(pickle.REDUCE)
self.memoize(func)
# save the rest of the func data needed by _fill_function
save(f_globals)
save(defaults)
save(dct)
save(func.__module__)
write(pickle.TUPLE)
write(pickle.REDUCE) # applies _fill_function on the tuple
@staticmethod
def extract_code_globals(co):
"""
Find all globals names read or written to by codeblock co
"""
code = co.co_code
if not PY3:
code = [ord(c) for c in code]
names = co.co_names
out_names = set()
n = len(code)
i = 0
extended_arg = 0
while i < n:
op = code[i]
i += 1
if op >= HAVE_ARGUMENT:
oparg = code[i] + code[i+1] * 256 + extended_arg
extended_arg = 0
i += 2
if op == EXTENDED_ARG:
extended_arg = oparg*65536
if op in GLOBAL_OPS:
out_names.add(names[oparg])
# see if nested function have any global refs
if co.co_consts:
for const in co.co_consts:
if type(const) is types.CodeType:
out_names |= CloudPickler.extract_code_globals(const)
return out_names
def extract_func_data(self, func):
"""
Turn the function into a tuple of data necessary to recreate it:
code, globals, defaults, closure, dict
"""
code = func.__code__
# extract all global ref's
func_global_refs = self.extract_code_globals(code)
# process all variables referenced by global environment
f_globals = {}
for var in func_global_refs:
if var in func.__globals__:
f_globals[var] = func.__globals__[var]
# defaults requires no processing
defaults = func.__defaults__
# process closure
closure = [c.cell_contents for c in func.__closure__] if func.__closure__ else []
# save the dict
dct = func.__dict__
base_globals = self.globals_ref.get(id(func.__globals__), {})
self.globals_ref[id(func.__globals__)] = base_globals
return (code, f_globals, defaults, closure, dct, base_globals)
def save_builtin_function(self, obj):
if obj.__module__ is "__builtin__":
return self.save_global(obj)
return self.save_function(obj)
dispatch[types.BuiltinFunctionType] = save_builtin_function
def save_global(self, obj, name=None, pack=struct.pack):
if obj.__module__ == "__builtin__" or obj.__module__ == "builtins":
if obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if name is None:
name = obj.__name__
modname = getattr(obj, "__module__", None)
if modname is None:
modname = pickle.whichmodule(obj, name)
if modname == '__main__':
themodule = None
else:
__import__(modname)
themodule = sys.modules[modname]
self.modules.add(themodule)
if hasattr(themodule, name) and getattr(themodule, name) is obj:
return Pickler.save_global(self, obj, name)
typ = type(obj)
if typ is not obj and isinstance(obj, (type, types.ClassType)):
d = dict(obj.__dict__) # copy dict proxy to a dict
if not isinstance(d.get('__dict__', None), property):
# don't extract dict that are properties
d.pop('__dict__', None)
d.pop('__weakref__', None)
# hack as __new__ is stored differently in the __dict__
new_override = d.get('__new__', None)
if new_override:
d['__new__'] = obj.__new__
# workaround for namedtuple (hijacked by PySpark)
if getattr(obj, '_is_namedtuple_', False):
self.save_reduce(_load_namedtuple, (obj.__name__, obj._fields))
return
self.save(_load_class)
self.save_reduce(typ, (obj.__name__, obj.__bases__, {"__doc__": obj.__doc__}), obj=obj)
d.pop('__doc__', None)
# handle property and staticmethod
dd = {}
for k, v in d.items():
if isinstance(v, property):
k = ('property', k)
v = (v.fget, v.fset, v.fdel, v.__doc__)
elif isinstance(v, staticmethod) and hasattr(v, '__func__'):
k = ('staticmethod', k)
v = v.__func__
elif isinstance(v, classmethod) and hasattr(v, '__func__'):
k = ('classmethod', k)
v = v.__func__
dd[k] = v
self.save(dd)
self.write(pickle.TUPLE2)
self.write(pickle.REDUCE)
else:
raise pickle.PicklingError("Can't pickle %r" % obj)
dispatch[type] = save_global
dispatch[types.ClassType] = save_global
def save_instancemethod(self, obj):
# Memoization rarely is ever useful due to python bounding
if PY3:
self.save_reduce(types.MethodType, (obj.__func__, obj.__self__), obj=obj)
else:
self.save_reduce(types.MethodType, (obj.__func__, obj.__self__, obj.__self__.__class__),
obj=obj)
dispatch[types.MethodType] = save_instancemethod
def save_inst(self, obj):
"""Inner logic to save instance. Based off pickle.save_inst
Supports __transient__"""
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
pickle._keep_alive(args, memo)
else:
args = ()
write(pickle.MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(pickle.OBJ)
else:
for arg in args:
save(arg)
write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
#remove items if transient
if hasattr(obj, '__transient__'):
transient = obj.__transient__
stuff = stuff.copy()
for k in list(stuff.keys()):
if k in transient:
del stuff[k]
else:
stuff = getstate()
pickle._keep_alive(stuff, memo)
save(stuff)
write(pickle.BUILD)
if not PY3:
dispatch[types.InstanceType] = save_inst
def save_property(self, obj):
# properties not correctly saved in python
self.save_reduce(property, (obj.fget, obj.fset, obj.fdel, obj.__doc__), obj=obj)
dispatch[property] = save_property
def save_itemgetter(self, obj):
"""itemgetter serializer (needed for namedtuple support)"""
class Dummy:
def __getitem__(self, item):
return item
items = obj(Dummy())
if not isinstance(items, tuple):
items = (items, )
return self.save_reduce(operator.itemgetter, items)
if type(operator.itemgetter) is type:
dispatch[operator.itemgetter] = save_itemgetter
def save_attrgetter(self, obj):
"""attrgetter serializer"""
class Dummy(object):
def __init__(self, attrs, index=None):
self.attrs = attrs
self.index = index
def __getattribute__(self, item):
attrs = object.__getattribute__(self, "attrs")
index = object.__getattribute__(self, "index")
if index is None:
index = len(attrs)
attrs.append(item)
else:
attrs[index] = ".".join([attrs[index], item])
return type(self)(attrs, index)
attrs = []
obj(Dummy(attrs))
return self.save_reduce(operator.attrgetter, tuple(attrs))
if type(operator.attrgetter) is type:
dispatch[operator.attrgetter] = save_attrgetter
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
"""Modified to support __transient__ on new objects
Change only affects protocol level 2 (which is always used by PiCloud"""
# Assert that args is a tuple or None
if not isinstance(args, tuple):
raise pickle.PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise pickle.PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
#Added fix to allow transient
cls = args[0]
if not hasattr(cls, "__new__"):
raise pickle.PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise pickle.PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
#Don't pickle transient entries
if hasattr(obj, '__transient__'):
transient = obj.__transient__
state = state.copy()
for k in list(state.keys()):
if k in transient:
del state[k]
save(args)
write(pickle.NEWOBJ)
else:
save(func)
save(args)
write(pickle.REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(pickle.BUILD)
def save_partial(self, obj):
"""Partial objects do not serialize correctly in python2.x -- this fixes the bugs"""
self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords))
if sys.version_info < (2,7): # 2.7 supports partial pickling
dispatch[partial] = save_partial
def save_file(self, obj):
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys,'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys,'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading")
name = obj.name
try:
fsize = os.stat(name).st_size
except OSError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name)
if obj.closed:
#create an empty closed string io
retval = pystringIO.StringIO("")
retval.close()
elif not fsize: #empty file
retval = pystringIO.StringIO("")
try:
tmpfile = file(name)
tst = tmpfile.read(1)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
tmpfile.close()
if tst != '':
raise pickle.PicklingError("Cannot pickle file %s as it does not appear to map to a physical, real file" % name)
else:
try:
tmpfile = file(name)
contents = tmpfile.read()
tmpfile.close()
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval = pystringIO.StringIO(contents)
curloc = obj.tell()
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj)
if PY3:
dispatch[io.TextIOWrapper] = save_file
else:
dispatch[file] = save_file
"""Special functions for Add-on libraries"""
def inject_numpy(self):
numpy = sys.modules.get('numpy')
if not numpy or not hasattr(numpy, 'ufunc'):
return
self.dispatch[numpy.ufunc] = self.__class__.save_ufunc
def save_ufunc(self, obj):
"""Hack function for saving numpy ufunc objects"""
name = obj.__name__
numpy_tst_mods = ['numpy', 'scipy.special']
for tst_mod_name in numpy_tst_mods:
tst_mod = sys.modules.get(tst_mod_name, None)
if tst_mod and name in tst_mod.__dict__:
return self.save_reduce(_getobject, (tst_mod_name, name))
raise pickle.PicklingError('cannot save %s. Cannot resolve what module it is defined in'
% str(obj))
def inject_addons(self):
"""Plug in system. Register additional pickling functions if modules already loaded"""
self.inject_numpy()
# Shorthands for legacy support
def dump(obj, file, protocol=2):
CloudPickler(file, protocol).dump(obj)
def dumps(obj, protocol=2):
file = StringIO()
cp = CloudPickler(file,protocol)
cp.dump(obj)
return file.getvalue()
#hack for __import__ not working as desired
def subimport(name):
__import__(name)
return sys.modules[name]
# restores function attributes
def _restore_attr(obj, attr):
for key, val in attr.items():
setattr(obj, key, val)
return obj
def _get_module_builtins():
return pickle.__builtins__
def print_exec(stream):
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
def _modules_to_main(modList):
"""Force every module in modList to be placed into main"""
if not modList:
return
main = sys.modules['__main__']
for modname in modList:
if type(modname) is str:
try:
mod = __import__(modname)
except Exception as e:
sys.stderr.write('warning: could not import %s\n. '
'Your function may unexpectedly error due to this import failing;'
'A version mismatch is likely. Specific error was:\n' % modname)
print_exec(sys.stderr)
else:
setattr(main, mod.__name__, mod)
#object generators:
def _genpartial(func, args, kwds):
if not args:
args = ()
if not kwds:
kwds = {}
return partial(func, *args, **kwds)
def _fill_function(func, globals, defaults, dict, module):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
func.__globals__.update(globals)
func.__defaults__ = defaults
func.__dict__ = dict
func.__module__ = module
return func
def _make_cell(value):
return (lambda: value).__closure__[0]
def _reconstruct_closure(values):
return tuple([_make_cell(v) for v in values])
def _make_skel_func(code, closures, base_globals = None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
closure = _reconstruct_closure(closures) if closures else None
if base_globals is None:
base_globals = {}
base_globals['__builtins__'] = __builtins__
return types.FunctionType(code, base_globals,
None, None, closure)
def _load_class(cls, d):
"""
Loads additional properties into class `cls`.
"""
for k, v in d.items():
if isinstance(k, tuple):
typ, k = k
if typ == 'property':
v = property(*v)
elif typ == 'staticmethod':
v = staticmethod(v)
elif typ == 'classmethod':
v = classmethod(v)
setattr(cls, k, v)
return cls
def _load_namedtuple(name, fields):
"""
Loads a class generated by namedtuple
"""
from collections import namedtuple
return namedtuple(name, fields)
"""Constructors for 3rd party libraries
Note: These can never be renamed due to client compatibility issues"""
def _getobject(modname, attribute):
mod = __import__(modname, fromlist=[attribute])
return mod.__dict__[attribute]
|
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Used to inject faults into the file replication code
#
#
# THIS IMPORT MUST COME FIRST
#
# import mainUtils FIRST to get python version check
from hawqpylib.mainUtils import *
from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.utils import toNonNoneString
from gppylib import gplog
from hawqpylib import hawqarray
from gppylib.commands import base
from gppylib.commands import unix
from gppylib.commands import gp
from gppylib.commands import pg
from gppylib.db import catalog
from gppylib.db import dbconn
from gppylib.system import configurationInterface, fileSystemInterface, osInterface
from gppylib import pgconf
from gppylib.testold.testUtils import testOutput
from gppylib.system.environment import GpMasterEnvironment
logger = gplog.get_default_logger()
#-------------------------------------------------------------------------
class HAWQInjectFaultProgram:
#
# Constructor:
#
# @param options the options as returned by the options parser
#
def __init__(self, options):
self.options = options
#
# Build the fault transition message. Fault options themselves will NOT be validated by the
# client -- the server will do that when we send the fault
#
def buildMessage(self) :
# note that we don't validate these strings -- if they contain newlines
# (and so mess up the transition protocol) then the server will error
result = ["faultInject"]
result.append(toNonNoneString(self.options.faultName))
result.append(toNonNoneString(self.options.type))
result.append(toNonNoneString(self.options.ddlStatement))
result.append(toNonNoneString(self.options.databaseName))
result.append(toNonNoneString(self.options.tableName))
result.append(toNonNoneString(self.options.numOccurrences))
result.append(toNonNoneString(self.options.sleepTimeSeconds))
return '\n'.join(result)
#
# build a message that will get status of the fault
#
def buildGetStatusMessage(self) :
# note that we don't validate this string then the server may error
result = ["getFaultInjectStatus"]
result.append(toNonNoneString(self.options.faultName))
return '\n'.join(result)
#
# return True if the segment matches the given role, False otherwise
#
def isMatchingRole(self, role, hawqdb):
hdbRole = hawqdb.getRole()
if role == "master":
return hdbRole == 'm'
elif role == "standby":
return hdbRole == 's'
elif role == "primary":
return hdbRole == 'p'
else:
raise ProgramArgumentValidationException("Invalid role specified: %s" % role)
#
# load the segments and filter to the ones we should target
#
def loadTargetSegments(self) :
targetHost = self.options.targetHost
targetRole = self.options.targetRole
targetRegistrationOrder = self.options.targetRegistrationOrder
if targetHost is None and targetRegistrationOrder is None:
raise ProgramArgumentValidationException(\
"neither --host nor --registration_order is specified. " \
"Exactly one should be specified.")
if targetHost is not None and targetRegistrationOrder is not None:
raise ProgramArgumentValidationException(\
"both --host and --registration_order are specified. " \
"Exactly one should be specified.")
if targetHost is not None and targetRole is None:
raise ProgramArgumentValidationException(\
"--role is not specified when --host is specified. " \
"Role is required when targeting a host.")
if targetRegistrationOrder is not None and targetRole is not None:
raise ProgramArgumentValidationException(\
"--role is specified when --registration_order is specified. " \
"Role should not be specified when targeting a single registration_order.")
#
# load from master db
#
masterPort = self.options.masterPort
if masterPort is None:
gpEnv = GpMasterEnvironment(self.options.masterDataDirectory, False)
masterPort = gpEnv.getMasterPort()
conf = configurationInterface.getConfigurationProvider().initializeProvider(masterPort)
hawqArray = conf.loadSystemConfig(useUtilityMode=True)
hawqdbs = hawqArray.getDbList()
#
# prune gpArray according to filter settings
#
if targetHost is not None and targetHost != "ALL":
hawqdbs = [hdb for hdb in hawqdbs if hdb.getHostName() == targetHost]
if targetRegistrationOrder is not None:
hawqdbs = gpArray.getDbList()
regorder = int(targetRegistrationOrder)
hawqdbs = [hdb for hdb in hawqdbs if hdb.getRegistrationOrder() == regorder]
if targetRole is not None:
hawqdbs = [hdb for hdb in hawqdbs if self.isMatchingRole(targetRole, hdb)]
# only DOWN segments remaining? Error out
downhawqdbs = [hdb for hdb in hawqdbs if hdb.getStatus() != 'u']
if len(downhawqdbs) > 0:
downhawqdbStr = "\n Down Segment: "
raise ExceptionNoStackTraceNeeded(
"Unable to inject fault. At least one segment is marked as down in the database.%s%s" %
(downhawqdbStr, downhawqdbStr.join([str(downhdb) for downhdb in downhawqdbs])))
print "### DEBUG: loadTargetSegments"
print "### DEBUG: HAWQDBS "
print hawqdbs
return hawqdbs
#
# write string to a temporary file that will be deleted on completion
#
def writeToTempFile(self, str):
inputFile = fileSystemInterface.getFileSystemProvider().createNamedTemporaryFile()
inputFile.write(str)
inputFile.flush()
return inputFile
def injectFaults(self, segments, messageText):
inputFile = self.writeToTempFile(messageText)
logger.info("Injecting fault on %d segment(s)", len(segments))
testOutput("Injecting fault on %d segment(s)" % len(segments))
# run the command in serial to each target
for segment in segments :
logger.info("Injecting fault on %s", segment)
print "### DEBUG: injectFaults -> SendFilerepTransitionMessage inputFile.name = %s" % inputFile.name
print "### DEBUG: injectFaults -> SendFilerepTransitionMessage segment.getPort = %s" % segment.getPort()
print "### DEBUG: injectFaults -> SendFilerepTransitionMessage base.LOCAL = %s" % base.LOCAL
print "### DEBUG: injectFaults -> SendFilerepTransitionMessage segment.getHostName = %s" % segment.getHostName()
# if there is an error then an exception is raised by command execution
cmd = gp.SendFilerepTransitionMessage("Fault Injector", inputFile.name, \
segment.getPort(), base.LOCAL, segment.getHostName())
print "### DEBUG: injectFaults -> Command = %s ###" % cmd
print "### DEBUG: injectFaults -> SendFilerepTransitionMessage ###"
cmd.run(validateAfter=False)
print "### DEBUG: injectFaults -> Run ###"
# validate ourselves
if cmd.results.rc != 0:
print "### DEBUG: injectFaults -> Injection Failed ###"
raise ExceptionNoStackTraceNeeded("Injection Failed: %s" % cmd.results.stderr)
elif self.options.type == "status":
# server side prints nice success messages on status...so print it
print "### DEBUG: injectFaults -> Report Status ###"
str = cmd.results.stderr
print "### DEBUG: injectFaults -> STDERROR = %s ###" % str
if str.startswith("Success: "):
str = str.replace("Success: ", "", 1)
logger.info("%s", str)
print "### DEBUG: injectFaults -> END ###"
inputFile.close()
def waitForFaults(self, segments, statusQueryText):
inputFile = self.writeToTempFile(statusQueryText)
segments = [seg for seg in segments]
sleepTimeSec = 0.115610199
sleepTimeMultipler = 1.5 # sleepTimeMultipler * sleepTimeMultipler^11 ~= 10
logger.info("Awaiting fault on %d segment(s)", len(segments))
while len(segments) > 0 :
logger.info("Sleeping %.2f seconds " % sleepTimeSec)
osInterface.getOsProvider().sleep(sleepTimeSec)
segmentsForNextPass = []
for segment in segments:
logger.info("Checking for fault completion on %s", segment)
cmd = gp.SendFilerepTransitionMessage.local("Fault Injector Status Check", inputFile.name, \
segment.getPort(), segment.getHostName())
resultStr = cmd.results.stderr.strip()
if resultStr == "Success: waitMore":
segmentsForNextPass.append(segment)
elif resultStr != "Success: done":
raise Exception("Unexpected result from server %s" % resultStr)
segments = segmentsForNextPass
sleepTimeSec = sleepTimeSec if sleepTimeSec > 7 else sleepTimeSec * sleepTimeMultipler
inputFile.close()
def isSyncableFaultType(self):
type = self.options.type
return type != "reset" and type != "status"
######
def run(self):
if self.options.masterPort is not None and self.options.masterDataDirectory is not None:
raise ProgramArgumentValidationException("both master port and master data directory options are specified;" \
" at most one should be specified, or specify none to use MASTER_DATA_DIRECTORY environment variable")
print "### DEBUG: Build Message ###"
messageText = self.buildMessage()
print "### DEBUG: Load Target Segments ###"
segments = self.loadTargetSegments()
# inject, maybe wait
print "### DEBUG: Inject Faults ###"
self.injectFaults(segments, messageText)
if self.isSyncableFaultType() :
statusQueryText = self.buildGetStatusMessage()
self.waitForFaults(segments, statusQueryText)
logger.info("DONE")
return 0 # success -- exit code 0!
def cleanup(self):
pass
#-------------------------------------------------------------------------
@staticmethod
def createParser():
description = ("""
This utility is NOT SUPPORTED and is for internal-use only.
Used to inject faults into the file replication code.
""")
help = ["""
Return codes:
0 - Fault injected
non-zero: Error or invalid options
"""]
parser = OptParser(option_class=OptChecker,
description=' '.join(description.split()),
version='%prog version $Revision$')
parser.setHelp(help)
addStandardLoggingAndHelpOptions(parser, False)
# these options are used to determine the target segments
addTo = OptionGroup(parser, 'Target Segment Options: ')
parser.add_option_group(addTo)
addTo.add_option('-r', '--role', dest="targetRole", type='string', metavar="<role>",
help="Role of segments to target: master, standby, primary")
addTo.add_option("-s", "--registration_order", dest="targetRegistrationOrder", type="string", metavar="<registration_order>",
help="The segment registration_order on which fault should be set and triggered.")
addTo.add_option("-H", "--host", dest="targetHost", type="string", metavar="<host>",
help="The hostname on which fault should be set and triggered; pass ALL to target all hosts")
addTo = OptionGroup(parser, 'Master Connection Options')
parser.add_option_group(addTo)
addMasterDirectoryOptionForSingleClusterProgram(addTo)
addTo.add_option("-p", "--master_port", dest="masterPort", type="int", default=None,
metavar="<masterPort>",
help="DEPRECATED, use MASTER_DATA_DIRECTORY environment variable or -d option. " \
"The port number of the master database on localhost, " \
"used to fetch the segment configuration.")
addTo = OptionGroup(parser, 'Client Polling Options: ')
parser.add_option_group(addTo)
addTo.add_option('-m', '--mode', dest="syncMode", type='string', default="async",
metavar="<syncMode>",
help="Synchronization mode : sync (client waits for fault to occur)" \
" or async (client only sets fault request on server)")
# these options are used to build the message for the segments
addTo = OptionGroup(parser, 'Fault Options: ')
parser.add_option_group(addTo)
addTo.add_option('-y', '--type', dest="type", type='string', metavar="<type>",
help="fault type: sleep (insert sleep), fault (report fault to postmaster and fts prober), " \
"fatal (inject FATAL error), panic (inject PANIC error), error (inject ERROR), " \
"infinite_loop, data_curruption (corrupt data in memory and persistent media), " \
"suspend (suspend execution), resume (resume execution that was suspended), " \
"skip (inject skip i.e. skip checkpoint), " \
"memory_full (all memory is consumed when injected), " \
"reset (remove fault injection), status (report fault injection status), " \
"panic_suppress (inject suppressed PANIC in critical section), " \
"segv (inject a SEGV), " \
"interrupt (inject an Interrupt) ")
addTo.add_option("-z", "--sleep_time_s", dest="sleepTimeSeconds", type="int", default="10" ,
metavar="<sleepTime>",
help="For 'sleep' faults, the amount of time for the sleep. Defaults to %default." \
"Min Max Range is [0, 7200 sec] ")
addTo.add_option('-f', '--fault_name', dest="faultName", type='string', metavar="<name>",
help="fault name: " \
"postmaster (inject fault when new connection is accepted in postmaster), " \
"pg_control (inject fault when global/pg_control file is written), " \
"pg_xlog (inject fault when files in pg_xlog directory are written), " \
"start_prepare (inject fault during start prepare transaction), " \
"filerep_consumer (inject fault before data are processed, i.e. if mirror " \
"then before file operation is issued to file system, if primary " \
"then before mirror file operation is acknowledged to backend processes), " \
"filerep_consumer_verificaton (inject fault before ack verification data are processed on primary), " \
"filerep_change_tracking_compacting (inject fault when compacting change tracking log files), " \
"filerep_sender (inject fault before data are sent to network), " \
"filerep_receiver (inject fault after data are received from network), " \
"filerep_flush (inject fault before fsync is issued to file system), " \
"filerep_resync (inject fault while in resync when first relation is ready to be resynchronized), " \
"filerep_resync_in_progress (inject fault while resync is in progress), " \
"filerep_resync_worker (inject fault after write to mirror), " \
"filerep_resync_worker_read (inject fault before read required for resync), " \
"filerep_transition_to_resync (inject fault during transition to InResync before mirror re-create), " \
"filerep_transition_to_resync_mark_recreate (inject fault during transition to InResync before marking re-created), " \
"filerep_transition_to_resync_mark_completed (inject fault during transition to InResync before marking completed), " \
"filerep_transition_to_sync_begin (inject fault before transition to InSync begin), " \
"filerep_transition_to_sync (inject fault during transition to InSync), " \
"filerep_transition_to_sync_before_checkpoint (inject fault during transition to InSync before checkpoint is created), " \
"filerep_transition_to_sync_mark_completed (inject fault during transition to InSync before marking completed), " \
"filerep_transition_to_change_tracking (inject fault during transition to InChangeTracking), " \
"checkpoint (inject fault before checkpoint is taken), " \
"change_tracking_compacting_report (report if compacting is in progress), " \
"change_tracking_disable (inject fault before fsync to Change Tracking log files), " \
"transaction_abort_after_distributed_prepared (abort prepared transaction), " \
"transaction_commit_pass1_from_create_pending_to_created, " \
"transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
"transaction_commit_pass1_from_aborting_create_needed_to_aborting_create, " \
"transaction_abort_pass1_from_create_pending_to_aborting_create, " \
"transaction_abort_pass1_from_aborting_create_needed_to_aborting_create, " \
"transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
"transaction_commit_pass2_from_aborting_create_needed_to_aborting_create, " \
"transaction_abort_pass2_from_create_pending_to_aborting_create, " \
"transaction_abort_pass2_from_aborting_create_needed_to_aborting_create, " \
"finish_prepared_transaction_commit_pass1_from_create_pending_to_created, " \
"finish_prepared_transaction_commit_pass2_from_create_pending_to_created, " \
"finish_prepared_transaction_abort_pass1_from_create_pending_to_aborting_create, " \
"finish_prepared_transaction_abort_pass2_from_create_pending_to_aborting_create, " \
"finish_prepared_transaction_commit_pass1_from_drop_in_memory_to_drop_pending, " \
"finish_prepared_transaction_commit_pass2_from_drop_in_memory_to_drop_pending, " \
"finish_prepared_transaction_commit_pass1_aborting_create_needed, " \
"finish_prepared_transaction_commit_pass2_aborting_create_needed, " \
"finish_prepared_transaction_abort_pass1_aborting_create_needed, " \
"finish_prepared_transaction_abort_pass2_aborting_create_needed, " \
"twophase_transaction_commit_prepared (inject fault before transaction commit is inserted in xlog), " \
"twophase_transaction_abort_prepared (inject fault before transaction abort is inserted in xlog), " \
"dtm_broadcast_prepare (inject fault after prepare broadcast), " \
"dtm_broadcast_commit_prepared (inject fault after commit broadcast), " \
"dtm_broadcast_abort_prepared (inject fault after abort broadcast), " \
"dtm_xlog_distributed_commit (inject fault after distributed commit was inserted in xlog), " \
"fault_before_pending_delete_relation_entry (inject fault before putting pending delete relation entry, " \
"fault_before_pending_delete_database_entry (inject fault before putting pending delete database entry, " \
"fault_before_pending_delete_tablespace_entry (inject fault before putting pending delete tablespace entry, " \
"fault_before_pending_delete_filespace_entry (inject fault before putting pending delete filespace entry, " \
"dtm_init (inject fault before initializing dtm), " \
"end_prepare_two_phase_sleep (inject sleep after two phase file creation), " \
"segment_transition_request (inject fault after segment receives state transition request), " \
"segment_probe_response (inject fault after segment is probed by FTS), " \
"sync_persistent_table (inject fault to sync persistent table to disk), " \
"xlog_insert (inject fault to skip insert record into xlog), " \
"local_tm_record_transaction_commit (inject fault for local transactions after transaction commit is recorded and flushed in xlog ), " \
"malloc_failure (inject fault to simulate memory allocation failure), " \
"transaction_abort_failure (inject fault to simulate transaction abort failure), " \
"update_committed_eof_in_persistent_table (inject fault before committed EOF is updated in gp_persistent_relation_node for Append Only segment files), " \
"fault_during_exec_dynamic_table_scan (inject fault during scanning of a partition), " \
"internal_flush_error (inject an error during internal_flush), " \
"exec_simple_query_end_command (inject fault before EndCommand in exec_simple_query), " \
"multi_exec_hash_large_vmem (allocate large vmem using palloc inside MultiExecHash to attempt to exceed vmem limit), " \
"execsort_before_sorting (inject fault in nodeSort after receiving all tuples and before sorting), " \
"workfile_cleanup_set (inject fault in workfile manager cleanup set)" \
"execsort_mksort_mergeruns (inject fault in MKSort during the mergeruns phase), " \
"cdb_copy_start_after_dispatch (inject fault in cdbCopyStart after dispatch), " \
"fault_in_background_writer_main (inject fault in BackgroundWriterMain), " \
"exec_hashjoin_new_batch (inject fault before switching to a new batch in Hash Join), " \
"analyze_subxact_error (inject an error during analyze)," \
"opt_task_allocate_string_buffer (inject fault while allocating string buffer), " \
"runaway_cleanup (inject fault before starting the cleanup for a runaway query)" \
"connection_fail_after_gang_creation (inject fault after gang thread creation, set connection null)" \
"create_cdb_dispath_result_object (inject fault when create cdb dispatch result object, set out of memory)" \
"worker_manager_submit_job (inject fault when worker manager submit job , set error)" \
"fail_qe_after_connection (inject fault after connecting to QD, sleep to wait QE fail)" \
"fail_qe_when_do_query (inject fault when QE actually working, set error)" \
"fail_qe_when_begin_parquet_scan (inject fault when begin scan parquet table, set error)"\
"fail_qe_when_parquet_get_next (inject fault when get next, set error)"\
"interconnect_stop_ack_is_lost (inject fault in interconnect to skip sending the stop ack), " \
"all (affects all faults injected, used for 'status' and 'reset'), ")
addTo.add_option("-c", "--ddl_statement", dest="ddlStatement", type="string",
metavar="ddlStatement",
help="The DDL statement on which fault should be set and triggered " \
"(i.e. create_database, drop_database, create_table, drop_table)")
addTo.add_option("-D", "--database_name", dest="databaseName", type="string",
metavar="databaseName",
help="The database name on which fault should be set and triggered.")
addTo.add_option("-t", "--table_name", dest="tableName", type="string",
metavar="tableName",
help="The table name on which fault should be set and triggered.")
addTo.add_option("-o", "--occurrence", dest="numOccurrences", type="int", default=1,
metavar="numOccurrences",
help="The number of occurrence of the DDL statement with the database name " \
"and the table name before fault is triggered. Defaults to %default. Max is 1000. " \
"Fault is triggered always if set to '0'. ")
parser.set_defaults()
return parser
@staticmethod
def createProgram(options, args):
if len(args) > 0 :
raise ProgramArgumentValidationException(\
"too many arguments: only options may be specified")
return HAWQInjectFaultProgram(options)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Interface for database access.
Usage:
>>> from heat import db
>>> db.event_get(context, event_id)
# Event object received
The underlying driver is loaded. SQLAlchemy is currently the only
supported backend.
"""
from oslo_config import cfg
from oslo_db import api
CONF = cfg.CONF
_BACKEND_MAPPING = {'sqlalchemy': 'heat.db.sqlalchemy.api'}
IMPL = api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)
def get_engine():
return IMPL.get_engine()
def get_session():
return IMPL.get_session()
def raw_template_get(context, template_id):
return IMPL.raw_template_get(context, template_id)
def raw_template_create(context, values):
return IMPL.raw_template_create(context, values)
def raw_template_update(context, template_id, values):
return IMPL.raw_template_update(context, template_id, values)
def raw_template_delete(context, template_id):
return IMPL.raw_template_delete(context, template_id)
def resource_data_get_all(context, resource_id, data=None):
return IMPL.resource_data_get_all(context, resource_id, data)
def resource_data_get(resource, key):
return IMPL.resource_data_get(resource, key)
def resource_data_set(resource, key, value, redact=False):
return IMPL.resource_data_set(resource, key, value, redact=redact)
def resource_data_get_by_key(context, resource_id, key):
return IMPL.resource_data_get_by_key(context, resource_id, key)
def resource_data_delete(resource, key):
"""Remove a resource_data element associated to a resource."""
return IMPL.resource_data_delete(resource, key)
def stack_tags_set(context, stack_id, tags):
return IMPL.stack_tags_set(context, stack_id, tags)
def stack_tags_delete(context, stack_id):
return IMPL.stack_tags_delete(context, stack_id)
def stack_tags_get(context, stack_id):
return IMPL.stack_tags_get(context, stack_id)
def resource_get(context, resource_id):
return IMPL.resource_get(context, resource_id)
def resource_get_all(context):
return IMPL.resource_get_all(context)
def resource_update(context, resource_id, values, atomic_key,
expected_engine_id=None):
return IMPL.resource_update(context, resource_id, values, atomic_key,
expected_engine_id)
def resource_create(context, values):
return IMPL.resource_create(context, values)
def resource_exchange_stacks(context, resource_id1, resource_id2):
return IMPL.resource_exchange_stacks(context, resource_id1, resource_id2)
def resource_get_all_by_stack(context, stack_id, key_id=False, filters=None):
return IMPL.resource_get_all_by_stack(context, stack_id, key_id, filters)
def resource_get_by_name_and_stack(context, resource_name, stack_id):
return IMPL.resource_get_by_name_and_stack(context,
resource_name, stack_id)
def resource_get_by_physical_resource_id(context, physical_resource_id):
return IMPL.resource_get_by_physical_resource_id(context,
physical_resource_id)
def stack_get(context, stack_id, show_deleted=False, tenant_safe=True,
eager_load=False):
return IMPL.stack_get(context, stack_id, show_deleted=show_deleted,
tenant_safe=tenant_safe,
eager_load=eager_load)
def stack_get_by_name_and_owner_id(context, stack_name, owner_id):
return IMPL.stack_get_by_name_and_owner_id(context, stack_name,
owner_id=owner_id)
def stack_get_by_name(context, stack_name):
return IMPL.stack_get_by_name(context, stack_name)
def stack_get_all(context, limit=None, sort_keys=None, marker=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
return IMPL.stack_get_all(context, limit, sort_keys,
marker, sort_dir, filters, tenant_safe,
show_deleted, show_nested, show_hidden,
tags, tags_any, not_tags, not_tags_any)
def stack_get_all_by_owner_id(context, owner_id):
return IMPL.stack_get_all_by_owner_id(context, owner_id)
def stack_count_all(context, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
return IMPL.stack_count_all(context, filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags,
tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any)
def stack_create(context, values):
return IMPL.stack_create(context, values)
def stack_update(context, stack_id, values, exp_trvsl=None):
return IMPL.stack_update(context, stack_id, values, exp_trvsl=exp_trvsl)
def stack_delete(context, stack_id):
return IMPL.stack_delete(context, stack_id)
def stack_lock_create(stack_id, engine_id):
return IMPL.stack_lock_create(stack_id, engine_id)
def stack_lock_get_engine_id(stack_id):
return IMPL.stack_lock_get_engine_id(stack_id)
def stack_lock_steal(stack_id, old_engine_id, new_engine_id):
return IMPL.stack_lock_steal(stack_id, old_engine_id, new_engine_id)
def stack_lock_release(stack_id, engine_id):
return IMPL.stack_lock_release(stack_id, engine_id)
def persist_state_and_release_lock(context, stack_id, engine_id, values):
return IMPL.persist_state_and_release_lock(context, stack_id,
engine_id, values)
def stack_get_root_id(context, stack_id):
return IMPL.stack_get_root_id(context, stack_id)
def stack_count_total_resources(context, stack_id):
return IMPL.stack_count_total_resources(context, stack_id)
def user_creds_create(context):
return IMPL.user_creds_create(context)
def user_creds_delete(context, user_creds_id):
return IMPL.user_creds_delete(context, user_creds_id)
def user_creds_get(context_id):
return IMPL.user_creds_get(context_id)
def event_get(context, event_id):
return IMPL.event_get(context, event_id)
def event_get_all(context):
return IMPL.event_get_all(context)
def event_get_all_by_tenant(context, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
return IMPL.event_get_all_by_tenant(context,
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters)
def event_get_all_by_stack(context, stack_id, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
return IMPL.event_get_all_by_stack(context, stack_id,
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters)
def event_count_all_by_stack(context, stack_id):
return IMPL.event_count_all_by_stack(context, stack_id)
def event_create(context, values):
return IMPL.event_create(context, values)
def watch_rule_get(context, watch_rule_id):
return IMPL.watch_rule_get(context, watch_rule_id)
def watch_rule_get_by_name(context, watch_rule_name):
return IMPL.watch_rule_get_by_name(context, watch_rule_name)
def watch_rule_get_all(context):
return IMPL.watch_rule_get_all(context)
def watch_rule_get_all_by_stack(context, stack_id):
return IMPL.watch_rule_get_all_by_stack(context, stack_id)
def watch_rule_create(context, values):
return IMPL.watch_rule_create(context, values)
def watch_rule_update(context, watch_id, values):
return IMPL.watch_rule_update(context, watch_id, values)
def watch_rule_delete(context, watch_id):
return IMPL.watch_rule_delete(context, watch_id)
def watch_data_create(context, values):
return IMPL.watch_data_create(context, values)
def watch_data_get_all(context):
return IMPL.watch_data_get_all(context)
def watch_data_get_all_by_watch_rule_id(context, watch_rule_id):
return IMPL.watch_data_get_all_by_watch_rule_id(context, watch_rule_id)
def software_config_create(context, values):
return IMPL.software_config_create(context, values)
def software_config_get(context, config_id):
return IMPL.software_config_get(context, config_id)
def software_config_get_all(context, limit=None, marker=None,
tenant_safe=True):
return IMPL.software_config_get_all(context,
limit=limit,
marker=marker,
tenant_safe=tenant_safe)
def software_config_delete(context, config_id):
return IMPL.software_config_delete(context, config_id)
def software_deployment_create(context, values):
return IMPL.software_deployment_create(context, values)
def software_deployment_get(context, deployment_id):
return IMPL.software_deployment_get(context, deployment_id)
def software_deployment_get_all(context, server_id=None):
return IMPL.software_deployment_get_all(context, server_id)
def software_deployment_update(context, deployment_id, values):
return IMPL.software_deployment_update(context, deployment_id, values)
def software_deployment_delete(context, deployment_id):
return IMPL.software_deployment_delete(context, deployment_id)
def snapshot_create(context, values):
return IMPL.snapshot_create(context, values)
def snapshot_get(context, snapshot_id):
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_by_stack(context, snapshot_id, stack):
return IMPL.snapshot_get_by_stack(context, snapshot_id, stack)
def snapshot_update(context, snapshot_id, values):
return IMPL.snapshot_update(context, snapshot_id, values)
def snapshot_delete(context, snapshot_id):
return IMPL.snapshot_delete(context, snapshot_id)
def snapshot_get_all(context, stack_id):
return IMPL.snapshot_get_all(context, stack_id)
def service_create(context, values):
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
return IMPL.service_update(context, service_id, values)
def service_delete(context, service_id, soft_delete=True):
return IMPL.service_delete(context, service_id, soft_delete)
def service_get(context, service_id):
return IMPL.service_get(context, service_id)
def service_get_all(context):
return IMPL.service_get_all(context)
def service_get_all_by_args(context, host, binary, hostname):
return IMPL.service_get_all_by_args(context, host, binary, hostname)
def sync_point_delete_all_by_stack_and_traversal(context, stack_id,
traversal_id):
return IMPL.sync_point_delete_all_by_stack_and_traversal(context,
stack_id,
traversal_id)
def sync_point_create(context, values):
return IMPL.sync_point_create(context, values)
def sync_point_get(context, entity_id, traversal_id, is_update):
return IMPL.sync_point_get(context, entity_id, traversal_id, is_update)
def sync_point_update_input_data(context, entity_id,
traversal_id, is_update, atomic_key,
input_data):
return IMPL.sync_point_update_input_data(context, entity_id,
traversal_id, is_update,
atomic_key, input_data)
def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(engine, version=version)
def db_version(engine):
"""Display the current database version."""
return IMPL.db_version(engine)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the S3 backend store"""
import hashlib
import StringIO
import boto.s3.connection
import stubout
from glance.common import exception
from glance.openstack.common import uuidutils
from glance.store.location import get_location_from_uri
import glance.store.s3
from glance.store.s3 import Store, get_s3_location
from glance.store import UnsupportedBackend
from glance.tests.unit import base
FAKE_UUID = uuidutils.generate_uuid()
FIVE_KB = (5 * 1024)
S3_CONF = {'verbose': True,
'debug': True,
'default_store': 's3',
's3_store_access_key': 'user',
's3_store_secret_key': 'key',
's3_store_host': 'localhost:8080',
's3_store_bucket': 'glance'}
# We stub out as little as possible to ensure that the code paths
# between glance.store.s3 and boto.s3.connection are tested
# thoroughly
def stub_out_s3(stubs):
class FakeKey:
"""
Acts like a ``boto.s3.key.Key``
"""
def __init__(self, bucket, name):
self.bucket = bucket
self.name = name
self.data = None
self.size = 0
self.BufferSize = 1024
def close(self):
pass
def exists(self):
return self.bucket.exists(self.name)
def delete(self):
self.bucket.delete(self.name)
def compute_md5(self, data):
chunk = data.read(self.BufferSize)
checksum = hashlib.md5()
while chunk:
checksum.update(chunk)
chunk = data.read(self.BufferSize)
checksum_hex = checksum.hexdigest()
return checksum_hex, None
def set_contents_from_file(self, fp, replace=False, **kwargs):
self.data = StringIO.StringIO()
for bytes in fp:
self.data.write(bytes)
self.size = self.data.len
# Reset the buffer to start
self.data.seek(0)
self.read = self.data.read
def get_file(self):
return self.data
class FakeBucket:
"""
Acts like a ``boto.s3.bucket.Bucket``
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or {}
def __str__(self):
return self.name
def exists(self, key):
return key in self.keys
def delete(self, key):
del self.keys[key]
def get_key(self, key_name, **kwargs):
key = self.keys.get(key_name)
if not key:
return FakeKey(self, key_name)
return key
def new_key(self, key_name):
new_key = FakeKey(self, key_name)
self.keys[key_name] = new_key
return new_key
fixture_buckets = {'glance': FakeBucket('glance')}
b = fixture_buckets['glance']
k = b.new_key(FAKE_UUID)
k.set_contents_from_file(StringIO.StringIO("*" * FIVE_KB))
def fake_connection_constructor(self, *args, **kwargs):
host = kwargs.get('host')
if host.startswith('http://') or host.startswith('https://'):
raise UnsupportedBackend(host)
def fake_get_bucket(conn, bucket_id):
bucket = fixture_buckets.get(bucket_id)
if not bucket:
bucket = FakeBucket(bucket_id)
return bucket
stubs.Set(boto.s3.connection.S3Connection,
'__init__', fake_connection_constructor)
stubs.Set(boto.s3.connection.S3Connection,
'get_bucket', fake_get_bucket)
def format_s3_location(user, key, authurl, bucket, obj):
"""
Helper method that returns a S3 store URI given
the component pieces.
"""
scheme = 's3'
if authurl.startswith('https://'):
scheme = 's3+https'
authurl = authurl[8:]
elif authurl.startswith('http://'):
authurl = authurl[7:]
authurl = authurl.strip('/')
return "%s://%s:%s@%s/%s/%s" % (scheme, user, key, authurl,
bucket, obj)
class TestStore(base.StoreClearingUnitTest):
def setUp(self):
"""Establish a clean test environment"""
self.config(**S3_CONF)
super(TestStore, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_s3(self.stubs)
self.store = Store()
self.addCleanup(self.stubs.UnsetAll)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
loc = get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID)
(image_s3, image_size) = self.store.get(loc)
self.assertEqual(image_size, FIVE_KB)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_s3:
data += chunk
self.assertEqual(expected_data, data)
def test_get_calling_format_path(self):
"""Test a "normal" retrieval of an image in chunks"""
self.config(s3_store_bucket_url_format='path')
def fake_S3Connection_init(*args, **kwargs):
expected_cls = boto.s3.connection.OrdinaryCallingFormat
self.assertTrue(isinstance(kwargs.get('calling_format'),
expected_cls))
self.stubs.Set(boto.s3.connection.S3Connection, '__init__',
fake_S3Connection_init)
loc = get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID)
(image_s3, image_size) = self.store.get(loc)
def test_get_calling_format_default(self):
"""Test a "normal" retrieval of an image in chunks"""
def fake_S3Connection_init(*args, **kwargs):
expected_cls = boto.s3.connection.SubdomainCallingFormat
self.assertTrue(isinstance(kwargs.get('calling_format'),
expected_cls))
self.stubs.Set(boto.s3.connection.S3Connection, '__init__',
fake_S3Connection_init)
loc = get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID)
(image_s3, image_size) = self.store.get(loc)
def test_get_non_existing(self):
"""
Test that trying to retrieve a s3 that doesn't exist
raises an error
"""
uri = "s3://user:key@auth_address/badbucket/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.get, loc)
uri = "s3://user:key@auth_address/glance/noexist"
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_add(self):
"""Test that we can add an image via the s3 backend"""
expected_image_id = uuidutils.generate_uuid()
expected_s3_size = FIVE_KB
expected_s3_contents = "*" * expected_s3_size
expected_checksum = hashlib.md5(expected_s3_contents).hexdigest()
expected_location = format_s3_location(
S3_CONF['s3_store_access_key'],
S3_CONF['s3_store_secret_key'],
S3_CONF['s3_store_host'],
S3_CONF['s3_store_bucket'],
expected_image_id)
image_s3 = StringIO.StringIO(expected_s3_contents)
location, size, checksum, _ = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_s3_size, size)
self.assertEqual(expected_checksum, checksum)
loc = get_location_from_uri(expected_location)
(new_image_s3, new_image_size) = self.store.get(loc)
new_image_contents = StringIO.StringIO()
for chunk in new_image_s3:
new_image_contents.write(chunk)
new_image_s3_size = new_image_contents.len
self.assertEqual(expected_s3_contents, new_image_contents.getvalue())
self.assertEqual(expected_s3_size, new_image_s3_size)
def test_add_host_variations(self):
"""
Test that having http(s):// in the s3serviceurl in config
options works as expected.
"""
variations = ['http://localhost:80',
'http://localhost',
'http://localhost/v1',
'http://localhost/v1/',
'https://localhost',
'https://localhost:8080',
'https://localhost/v1',
'https://localhost/v1/',
'localhost',
'localhost:8080/v1']
for variation in variations:
expected_image_id = uuidutils.generate_uuid()
expected_s3_size = FIVE_KB
expected_s3_contents = "*" * expected_s3_size
expected_checksum = hashlib.md5(expected_s3_contents).hexdigest()
new_conf = S3_CONF.copy()
new_conf['s3_store_host'] = variation
expected_location = format_s3_location(
new_conf['s3_store_access_key'],
new_conf['s3_store_secret_key'],
new_conf['s3_store_host'],
new_conf['s3_store_bucket'],
expected_image_id)
image_s3 = StringIO.StringIO(expected_s3_contents)
self.config(**new_conf)
self.store = Store()
location, size, checksum, _ = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_s3_size, size)
self.assertEqual(expected_checksum, checksum)
loc = get_location_from_uri(expected_location)
(new_image_s3, new_image_size) = self.store.get(loc)
new_image_contents = new_image_s3.getvalue()
new_image_s3_size = len(new_image_s3)
self.assertEqual(expected_s3_contents, new_image_contents)
self.assertEqual(expected_s3_size, new_image_s3_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
image_s3 = StringIO.StringIO("nevergonnamakeit")
self.assertRaises(exception.Duplicate,
self.store.add,
FAKE_UUID, image_s3, 0)
def _option_required(self, key):
conf = S3_CONF.copy()
conf[key] = None
try:
self.config(**conf)
self.store = Store()
return self.store.add == self.store.add_disabled
except Exception:
return False
return False
def test_no_access_key(self):
"""
Tests that options without access key disables the add method
"""
self.assertTrue(self._option_required('s3_store_access_key'))
def test_no_secret_key(self):
"""
Tests that options without secret key disables the add method
"""
self.assertTrue(self._option_required('s3_store_secret_key'))
def test_no_host(self):
"""
Tests that options without host disables the add method
"""
self.assertTrue(self._option_required('s3_store_host'))
def test_delete(self):
"""
Test we can delete an existing image in the s3 store
"""
uri = "s3://user:key@auth_address/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a s3 that doesn't exist
raises an error
"""
uri = "s3://user:key@auth_address/glance/noexist"
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.delete, loc)
def _do_test_get_s3_location(self, host, loc):
self.assertEqual(get_s3_location(host), loc)
self.assertEqual(get_s3_location(host + ':80'), loc)
self.assertEqual(get_s3_location('http://' + host), loc)
self.assertEqual(get_s3_location('http://' + host + ':80'), loc)
self.assertEqual(get_s3_location('https://' + host), loc)
self.assertEqual(get_s3_location('https://' + host + ':80'), loc)
def test_get_s3_good_location(self):
"""
Test that the s3 location can be derived from the host
"""
good_locations = [
('s3.amazonaws.com', ''),
('s3-eu-west-1.amazonaws.com', 'EU'),
('s3-us-west-1.amazonaws.com', 'us-west-1'),
('s3-ap-southeast-1.amazonaws.com', 'ap-southeast-1'),
('s3-ap-northeast-1.amazonaws.com', 'ap-northeast-1'),
]
for (url, expected) in good_locations:
self._do_test_get_s3_location(url, expected)
def test_get_s3_bad_location(self):
"""
Test that the s3 location cannot be derived from an unexpected host
"""
bad_locations = [
('', ''),
('s3.amazon.co.uk', ''),
('s3-govcloud.amazonaws.com', ''),
('cloudfiles.rackspace.com', ''),
]
for (url, expected) in bad_locations:
self._do_test_get_s3_location(url, expected)
def test_calling_format_path(self):
self.config(s3_store_bucket_url_format='path')
self.assertTrue(isinstance(glance.store.s3.get_calling_format(),
boto.s3.connection.OrdinaryCallingFormat))
def test_calling_format_subdomain(self):
self.config(s3_store_bucket_url_format='subdomain')
self.assertTrue(isinstance(glance.store.s3.get_calling_format(),
boto.s3.connection.SubdomainCallingFormat))
def test_calling_format_default(self):
self.assertTrue(isinstance(glance.store.s3.get_calling_format(),
boto.s3.connection.SubdomainCallingFormat))
|
|
#!/usr/bin/env python
'Classes and functions for handling XML files in pysilfont scripts'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2015 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
from xml.etree import cElementTree as ET
from glob import glob
import silfont.core
import re, sys, os, codecs, argparse, datetime, shutil, csv, collections
_elementprotect = {
'&' : '&',
'<' : '<',
'>' : '>' }
_attribprotect = dict(_elementprotect)
_attribprotect['"'] = '"' # Copy of element protect with double quote added
class ETWriter(object) :
""" General purpose ElementTree pretty printer complete with options for attribute order
beyond simple sorting, and which elements should use cdata """
def __init__(self, etree, namespaces = None, attributeOrder = {}, takesCData = set(),
indentIncr = " ", indentFirst = " ", indentML = False, inlineelem=[], precision = None, numAttribs = []):
self.root = etree
if namespaces is None : namespaces = {}
self.namespaces = namespaces
self.attributeOrder = attributeOrder # Sort order for attributes - just one list for all elements
self.takesCData = takesCData
self.indentIncr = indentIncr # Incremental increase in indent
self.indentFirst = indentFirst # Indent for first level
self.indentML = indentML # Add indent to multi-line strings
self.inlineelem = inlineelem # For supporting in-line elements. Does not work with mix of inline and other subelements in same element
self.precision = precision # Precision to use outputting numeric attribute values
self.numAttribs = numAttribs # List of numeric attributes used with precision
def _protect(self, txt, base=_attribprotect) :
return re.sub(ur'['+ur"".join(base.keys())+ur"]", lambda m: base[m.group(0)], txt)
def serialize_xml(self, write, base = None, indent = '') :
"""Output the object using write() in a normalised way:
If namespaces are used, use serialize_nsxml instead"""
outstr=""
if base is None :
base = self.root
outstr += '<?xml version="1.0" encoding="UTF-8"?>\n'
if '.pi' in base.attrib : # Processing instructions
for pi in base.attrib['.pi'].split(",") : outstr += u'<?{}?>\n'.format(pi)
if '.doctype' in base.attrib : outstr += u'<!DOCTYPE {}>\n'.format(base.attrib['.doctype'])
tag = base.tag
attribs = base.attrib
if '.comments' in attribs :
for c in attribs['.comments'].split(",") : outstr += u'{}<!--{}-->\n'.format(indent, c)
i = indent if tag not in self.inlineelem else ""
outstr += u'{}<{}'.format(i, tag)
for k in sorted(attribs.keys(), cmp=lambda x,y: cmp(self.attributeOrder.get(x, 999), self.attributeOrder.get(y, 999)) or cmp(x, y)) :
if k[0] <> '.' :
att = attribs[k]
if self.precision is not None and k in self.numAttribs :
num = round(float(attribs[k]), self.precision)
att = "{}".format(int(num)) if num == int(num) else "{}".format(num)
outstr += u' {}="{}"'.format(k, att)
if len(base) or (base.text and base.text.strip()) :
outstr += '>'
if base.text and base.text.strip() :
if tag not in self.takesCData :
t = base.text
if self.indentML : t = t.replace('\n', '\n' + indent)
t = self._protect(t, base=_elementprotect)
else :
t = "<![CDATA[\n\t" + indent + base.text.replace('\n', '\n\t' + indent) + "\n" + indent + "]]>"
outstr += t
if len(base) :
if base[0].tag not in self.inlineelem : outstr += '\n'
if base == self.root:
incr = self.indentFirst
else:
incr = self.indentIncr
write(outstr); outstr=""
for b in base : self.serialize_xml(write, base=b, indent=indent + incr)
if base[-1].tag not in self.inlineelem : outstr += indent
outstr += '</{}>'.format(tag)
else :
outstr += '/>'
if base.tail and base.tail.strip() :
outstr += self._protect(base.tail, base=_elementprotect)
if tag not in self.inlineelem : outstr += "\n"
if '.commentsafter' in base.attrib :
for c in base.attrib['.commentsafter'].split(",") : outstr += u'{}<!--{}-->\n'.format(indent, c)
write(outstr)
def _localisens(self, tag) :
if tag[0] == '{' :
ns, localname = tag[1:].split('}', 1)
qname = self.namespaces.get(ns, '')
if qname :
return ('{}:{}'.format(qname, localname), qname, ns)
else :
self.nscount += 1
return (localname, 'ns_' + str(self.nscount), ns)
else :
return (tag, None, None)
def _nsprotectattribs(self, attribs, localattribs, namespaces) :
if attribs is not None :
for k, v in attribs.items() :
(lt, lq, lns) = self._localisens(k)
if lns and lns not in namespaces :
namespaces[lns] = lq
localattribs['xmlns:'+lq] = lns
localattribs[lt] = v
def serialize_nsxml(self, write, base = None, indent = '', topns = True, namespaces = {}) :
## Not currently used. Needs amending to mirror changes in serialize_xml for dummy attributes (and efficiency)"""
"""Output the object using write() in a normalised way:
topns if set puts all namespaces in root element else put them as low as possible"""
if base is None :
base = self.root
write('<?xml version="1.0" encoding="UTF-8"?>\n')
doctype = base.attrib['_doctype'] if '_doctype' in base.attrib else None
if doctype is not None:
del base.attrib["_doctype"]
write(u'<!DOCTYPE {}>\n'.format(doctype))
(tag, q, ns) = self._localisens(base.tag)
localattribs = {}
if ns and ns not in namespaces :
namespaces[ns] = q
localattribs['xmlns:'+q] = ns
if topns :
if base == self.root :
for n,q in self.namespaces.items() :
localattribs['xmlns:'+q] = n
namespaces[n] = q
else :
for c in base :
(lt, lq, lns) = self._localisens(c.tag)
if lns and lns not in namespaces :
namespaces[lns] = q
localattribs['xmlns:'+lq] = lns
self._nsprotectattribs(getattr(base, 'attrib', None), localattribs, namespaces)
if '_comments' in base.attrib :
for c in base.attrib['_comments'].split(",") :
write(u'{}<!--{}-->\n'.format(indent, c))
del base.attrib["_comments"]
write(u'{}<{}'.format(indent, tag))
if len(localattribs) :
maxAts = len(self.attributeOrder) + 1
def cmpattrib(x, y) :
return cmp(self.attributeOrder.get(x, maxAts), self.attributeOrder.get(y, maxAts)) or cmp(x, y)
for k in sorted(localattribs.keys(), cmp=cmpattrib) :
write(u' {}="{}"'.format(self._localisens(k)[0], self._protect(localattribs[k])))
if len(base) :
write('>\n')
for b in base :
if base == self.root:
incr = self.indentFirst
else:
incr = self.indentIncr
self.serialize_nsxml(write, base=b, indent=indent + incr, topns=topns, namespaces=namespaces.copy())
write('{}</{}>\n'.format(indent, tag))
elif base.text :
if base.text.strip() :
if tag not in self.takesCData :
t = base.text
if self.indentML : t = t.replace('\n', '\n' + indent)
t = self._protect(t, base=_elementprotect)
else :
t = "<![CDATA[\n\t" + indent + base.text.replace('\n', '\n\t' + indent) + "\n" + indent + "]]>"
write(u'>{}</{}>\n'.format(t, tag))
else :
write('/>\n')
else :
write('/>\n')
if '_commentsafter' in base.attrib :
for c in base.attrib['_commentsafter'].split(",") :
write(u'{}<!--{}-->\n'.format(indent, c))
del base.attrib["_commentsafter"]
def add_namespace(self, q, ns) :
if ns in self.namespaces : return self.namespaces[ns]
self.namespaces[ns] = q
return q
class _container(object) :
# Parent class for other objects
def __init_(self) :
self._contents = {}
# Define methods so it acts like an imutable container
# (changes should be made via object functions etc)
def __len__(self):
return len(self._contents)
def __getitem__(self, key):
return self._contents[key]
def __iter__(self):
return iter(self._contents)
def keys(self) :
return self._contents.keys()
class xmlitem(_container):
""" The xml data item for an xml file"""
def __init__(self, dirn = None, filen = None, parse = True, logger=None) :
self.logger = logger if logger else silfont.core.loggerobj()
self._contents = {}
self.dirn = dirn
self.filen = filen
self.inxmlstr = ""
self.outxmlstr = ""
self.etree = None
self.type = None
if filen and dirn :
fulln = os.path.join( dirn, filen)
with open(fulln, "r") as inxml:
for line in inxml.readlines() :
self.inxmlstr = self.inxmlstr + line
if parse :
try:
self.etree = ET.fromstring(self.inxmlstr)
except Exception as e:
self.logger.log("Failed to parse xml for " + fulln, "E")
self.logger.log(str(e), "S")
def write_to_xml(self,text) : # Used by ETWriter.serialize_xml()
self.outxmlstr = self.outxmlstr + text
def write_to_file(self,dirn,filen) :
outfile=codecs.open(os.path.join(dirn,filen),'w','utf-8')
outfile.write(self.outxmlstr)
outfile.close
class ETelement(_container):
# Class for an etree element. Mainly used as a parent class
# For each tag in the element, ETelement[tag] returns a list of sub-elements with that tag
# process_subelements can set attributes for each tag based on a supplied spec
def __init__(self,element) :
self.element = element
self._contents = {}
self.reindex()
def reindex(self) :
self._contents = collections.defaultdict(list)
for e in self.element :
self._contents[e.tag].append(e)
def remove(self,subelement) :
self._contents[subelement.tag].remove(subelement)
self.element.remove(subelement)
def append(self,subelement) :
self._contents[subelement.tag].append(subelement)
self.element.append(subelement)
def insert(self,index,subelement) :
self._contents[subelement.tag].insert(index,subelement)
self.element.insert(index,subelement)
def replace(self,index,subelement) :
self._contents[subelement.tag][index] = subelement
self.element[index] = subelement
def process_attributes(self, attrspec, others = False) :
# Process attributes based on list of attributes in the format:
# (element attr name, object attr name, required)
# If attr does not exist and is not required, set to None
# If others is True, attributes not in the list are allowed
# Attributes should be listed in the order they should be output if writing xml out
if not hasattr(self,"parseerrors") or self.parseerrors is None: self.parseerrors=[]
speclist = {}
for (i,spec) in enumerate(attrspec) : speclist[spec[0]] = attrspec[i]
for eaname in speclist :
(eaname,oaname,req) = speclist[eaname]
setattr(self, oaname, getattrib(self.element,eaname))
if req and getattr(self, oaname) is None : self.parseerrors.append("Required attribute " + eaname + " missing")
# check for any other attributes
for att in self.element.attrib :
if att not in speclist :
if others:
setattr(self, att, getattrib(self.element,att))
else :
self.parseerrors.append("Invalid attribute " + att)
def process_subelements(self,subspec, offspec = False) :
# Process all subelements based on spec of expected elements
# subspec is a list of elements, with each list in the format:
# (element name, attribute name, class name, required, multiple valeus allowed)
# If cl is set, attribute is set to an object made with that class; otherwise just text of the element
if not hasattr(self,"parseerrors") or self.parseerrors is None : self.parseerrors=[]
def make_obj(self,cl,element) : # Create object from element and cascade parse errors down
if cl is None : return element.text
if cl is ETelement :
obj = cl(element) # ETelement does not require parent object, ie self
else :
obj = cl(self,element)
if hasattr(obj,"parseerrors") and obj.parseerrors != [] :
if hasattr(obj,"name") and obj.name is not None : # Try to find a name for error reporting
name = obj.name
elif hasattr(obj,"label") and obj.label is not None :
name = obj.label
else :
name = ""
self.parseerrors.append("Errors parsing " + element.tag + " element: " + name)
for error in obj.parseerrors :
self.parseerrors.append(" " + error)
return obj
speclist = {}
for (i,spec) in enumerate(subspec) : speclist[spec[0]] = subspec[i]
for ename in speclist :
(ename,aname,cl,req,multi) = speclist[ename]
initval = [] if multi else None
setattr(self,aname,initval)
for ename in self : # Process all elements
if ename in speclist :
(ename,aname,cl,req,multi) = speclist[ename]
elements = self[ename]
if multi :
for elem in elements : getattr(self,aname).append(make_obj(self,cl,elem))
else :
setattr(self,aname,make_obj(self,cl,elements[0]))
if len(elements) > 1 : self.parseerrors.append("Multiple " + ename + " elements not allowed")
else:
if offspec: # Elements not in spec are allowed so create list of sub-elemente.
setattr(self,ename,[])
for elem in elements : getattr(self,ename).append(ETelement(elem))
else :
self.parseerrors.append("Invalid element: " + ename)
for ename in speclist : # Check values exist for required elements etc
(ename,aname,cl,req,multi) = speclist[ename]
val = getattr(self,aname)
if req :
if multi and val == [] : self.parseerrors.append("No " + ename + " elements ")
if not multi and val == None : self.parseerrors.append("No " + ename + " element")
def makeAttribOrder(attriblist) : # Turn a list of attrib names into an attributeOrder dict for ETWriter
return dict(map(lambda x:(x[1], x[0]), enumerate(attriblist)))
def getattrib(element,attrib) : return element.attrib[attrib] if attrib in element.attrib else None
|
|
import configparser
import ast
import numbers
import numpy as np
from unipath import Path
from .utils import BASE_DIR, DATA_DIR, DEFAULT_SETTINGS
from .geometry import Geometry
from .mesh import Mesh
from .material import Material
from .initial import Initial
from .source import Source
from .boundary import Boundary
from .solution import Solution
class Model:
''' The model class
'''
def __init__(self, output=DATA_DIR.child("lastSolution.vtu")):
if self.checkOutputFilePath(output):
self.output = output
self.geometry = Geometry()
self.mesh = Mesh()
self.material = Material()
self.initial = Initial()
self.source = Source()
self.boundary = Boundary()
self.solution = Solution()
# The model class is always initialized reading the configuration file
self.readConfig()
def checkOutputFilePath(self, filepath):
'''Check if the output path is valid and add the vtu extension if missing.
'''
try:
if not filepath[-4:] == '.vtu':
# If no .vtu extension add it at the end
filepath =filepath+'.vtu'
with open(filepath, 'w') as f:
pass
self.output = filepath
return True
except:
raise OSError('The output path is not valid.')
def getSettings(self):
'''Generate the settings dictionary for file I/O
'''
settings = {}
settings['geometry'] = self.geometry.getSettings()
settings['mesh'] = self.mesh.getSettings()
settings['material'] = self.material.getSettings()
settings['initial'] = self.initial.getSettings()
settings['source'] = self.source.getSettings()
settings['boundary'] = self.boundary.getSettings()
return settings
def updateModel(self):
'''Updtate the model attributes.
'''
pass
def saveSettings(self):
'''Write the current settings to the configuration file.
'''
settings = self.getSettings()
Model.writeConfig(settings, self.output)
@staticmethod
def writeDefaultConfig():
'''Write default configuration file.
'''
settings = DEFAULT_SETTINGS
Model.writeConfig(settings)
@staticmethod
def writeConfig(settings, output=DATA_DIR.child("lastSolution.vtu")):
'''Write or overwrite config.ini using the provided settings.
'''
g = settings['geometry']
m = settings['mesh']
ma = settings['material']
ini = settings['initial']
src = settings['source']
bnd = settings['boundary']
with open(BASE_DIR.child("config.ini"),'w') as cfgfile:
Config = configparser.ConfigParser(allow_no_value = True)
Config.add_section('File')
Config.set('File', '# Path to the solution file.')
Config.set('File', 'path', output)
Config.add_section('Geometry')
Config.set('Geometry', '# Set the geometry centered at the origin. The dimension d = 1, 2 or 3')
Config.set('Geometry', '# l is a vector giving the length of the geometry in meters for each')
Config.set('Geometry', '# dimensions. For now different side lengths are not suported, i.e.')
Config.set('Geometry', '# the geometry can only be a line, a square or a cube of length')
Config.set('Geometry', '# l[0]. l[1] and l[2] are set to l[0] in the code.')
Config.set('Geometry', 'd', str(g[0]))
Config.set('Geometry', 'l', '[{0}, {1}, {2}]'.format(g[1],g[2],g[3]))
Config.add_section('Mesh')
Config.set('Mesh', '# Set the mesh size: "coarse", "fine" or "normal".')
Config.set('Mesh', 'size', str(m))
Config.add_section('Material')
Config.set('Material', '# Set the material properties.')
Config.set('Material', '# name, e.g. Copper')
Config.set('Material', '# density "rho" in kg/m^3.')
Config.set('Material', '# thermal conductivity "k" in W/m/K.')
Config.set('Material', '# heat capacity "Cp" in J/kg/K.')
Config.set('Material', 'name', ma[0])
Config.set('Material', 'rho', str(ma[1]))
Config.set('Material', 'k', str(ma[2]))
Config.set('Material', 'Cp', str(ma[3]))
Config.add_section('Initial')
Config.set('Initial', '# Set the initial temperature distribution.')
Config.set('Initial', '# uniform T(x,y,z) = a.')
Config.set('Initial', '# linear T(x_i) = a*(x_i/l - b), a> = 0, 0 <= b >=1 .')
Config.set('Initial', '# exponential T(x_i) = a*exp(-(x_i/l - b)/c), c > 0.')
Config.set('Initial', '# gaussian T(x_i) = a*exp(-(x_i/l - b)^2/(2*c^2)).')
Config.set('Initial', '# The values of b, and c are ignored if not used in the distribution.')
Config.set('Initial', 'dist', ini[0])
Config.set('Initial', 'a', str(ini[1]))
Config.set('Initial', 'b', str(ini[2]))
Config.set('Initial', 'c', str(ini[3]))
Config.add_section('Source')
Config.set('Source', '# Set a localized time dependent heat source in the geometry.')
Config.set('Source', '# location is a vector specifying the center of the source in.')
Config.set('Source', '# units of length "l". Each compoonent must have an absolute value')
Config.set('Source', '# smaller or equal than "1/2", e.g. [-1/2, 0, 1/2].')
Config.set('Source', '# fwhm is the full width at half maximum of the source in units of "l".')
Config.set('Source', '# fct is the time dependent function describing the source in W/m^d.')
Config.set('Source', '# uniform Q(t) = a.')
Config.set('Source', '# linear Q(t) = a*t + b.')
Config.set('Source', '# exponential Q(t) = a*exp(-t/b), b > 0.')
Config.set('Source', '# The values of b is ignored if not used in the function.')
Config.set('Source', 'location', "[{0}, {1}, {2}]".format(src[0], src[1], src[2]))
Config.set('Source', 'fwhm', str(src[3]))
Config.set('Source', 'fct', src[4])
Config.set('Source', 'a', str(src[5]))
Config.set('Source', 'b', str(src[6]))
Config.add_section('Boundary')
Config.set('Boundary', '# Set the boundary conditions for the model.')
Config.set('Boundary', '# The boundary conditions comes in pairs. If d = 1')
Config.set('Boundary', '# their is only one pair, if d = 2 two pairs, and if d = 3')
Config.set('Boundary', '# three pairs. There are 5 type of boundary conditions:')
Config.set('Boundary', '# dirichlet, neumann, robin, mixedI, mixedII.')
Config.set('Boundary', '# the variable g1 and g2 can vary in time and the following')
Config.set('Boundary', '# functios can be used to describe them:')
Config.set('Boundary', '# uniform g_i = a.')
Config.set('Boundary', '# linear g_i = a*t + b.')
Config.set('Boundary', '# exponential g_i = a*exp(-t/b), b > 0.')
Config.set('Boundary', '# The values of b is ignored if not used in the function.')
Config.set('Boundary', '# The parameters k1, and k2 are used for the robin boundary condition.')
Config.set('Boundary', '# The values of k1, and k2 are ignored if not used.')
Config.set('Boundary', 'type', "[{0}, {1}, {2}]".format(bnd[0], bnd[1], bnd[2]))
Config.set('Boundary', 'g1', "[{0}, {1}, {2}]".format(bnd[3], bnd[4], bnd[5]))
Config.set('Boundary', 'a1', "[{0}, {1}, {2}]".format(bnd[6], bnd[7], bnd[8]))
Config.set('Boundary', 'b1', "[{0}, {1}, {2}]".format(bnd[9], bnd[10], bnd[11]))
Config.set('Boundary', 'k1', "[{0}, {1}, {2}]".format(bnd[12], bnd[13], bnd[14]))
Config.set('Boundary', 'g2', "[{0}, {1}, {2}]".format(bnd[15], bnd[16], bnd[17]))
Config.set('Boundary', 'a2', "[{0}, {1}, {2}]".format(bnd[18], bnd[19], bnd[20]))
Config.set('Boundary', 'b2', "[{0}, {1}, {2}]".format(bnd[21], bnd[22], bnd[23]))
Config.set('Boundary', 'k2', "[{0}, {1}, {2}]".format(bnd[24], bnd[25], bnd[26]))
Config.write(cfgfile)
def compute(self):
'''Compute: compute the solution.
'''
tArray = self.getTimeList()
alpha = self.material.getAlpha()
Coords = self.mesh.getCoords()
bc = self.boundary.bcType
# Temporary for testing:
print('Computing:')
initTerm = self.initial.compute(bc, Coords, tArray, alpha)
bndTerm = self.boundary.compute(tArray, alpha)
'''
srcTerm = self.source.compute(bc[0], Coords[dim[0]], tArray, alpha)
'''
sol = initTerm + bndTerm # + srcTerm
self.solution.sol = sol # np.random.rand(self.mesh.getNumNodes())
def getTimeList(self, coeff=16040.0, length=101):
'''Return the simulation time list
'''
pass
lmax = self.geometry.getMaxLength()
k = self.material.k
tmax = coeff*lmax**2/(4*k)
return np.linspace(0, tmax, length)
def readConfig(self):
"""Initialize the model attributes
"""
Config = configparser.ConfigParser(allow_no_value = True)
Config.read(BASE_DIR.child("config.ini"))
sections = Config.sections()
opt = [False, False, False, False, False, False]
for section in sections:
if section=='Geometry':
options = Config.options(section)
self.validateGeometry(Config, section, options)
opt[0] = True
elif section=='Mesh':
options = Config.options(section)
self.validateMesh(Config, section, options)
opt[1] = True
elif section=='Material':
options = Config.options(section)
self.validateMaterial(Config, section, options)
opt[2] = True
elif section=='Initial':
options = Config.options(section)
self.validateInitial(Config, section, options)
opt[3] = True
elif section=='Source':
options = Config.options(section)
self.validateSource(Config, section, options)
opt[4] = True
elif section=='Boundary':
options = Config.options(section)
self.validateBoundary(Config, section, options)
opt[5] = True
elif section=='File':
pass
else:
raise ValueError('Configuration file: [{0}] is not valid section.'.format(section))
if False in opt:
ind=np.where(np.invert(opt))
s = ['Geometry', 'Mesh', 'Material', 'Initial', 'Source', 'Boundary']
# Display the first missing section
raise ValueError('The configuration file is not valid the section [{0}] is missing.'
.format(s[ind[0][0]]))
def checkLengthOption(self, section, option, optionList, length):
if not len(optionList)==length:
raise ValueError('Configuration file [{0}]: Invalid length vector. '\
'the option "{1}" must be a list of {2} components.'
.format(section, option, length))
def validateGeometry(self, Config, section, options):
"""Initilize the geometry attribute
"""
dic = {}
for option in options:
if not (option=='d' or option=='l'):
raise ValueError('Configuration file [{0}]: "{1}"" is not a valid option.'
.format(section, option))
try:
dic[option] = ast.literal_eval(Config.get(section, option))
if option=='l':
self.checkLengthOption(section, option, dic[option], 3)
except:
raise ValueError("Congiguration file [{0}]: exception on {1}."
.format(section, option))
try:
# For now only lines, square and cube are supported.
self.geometry = Geometry(dic['d'],
dic['l'][0],
dic['l'][0], #[1]
dic['l'][0]) #[2]
except:
raise ValueError('Configuration file [{0}]: One or more options is missing.'
.format(section))
def validateMesh(self, Config, section, options):
"""Initilize the mesh attribute
"""
dic = {}
for option in options:
if not (option=='size'):
raise ValueError('Configuration file [{0}]: "{1}"" is not a valid option.'
.format(section, option))
try:
dic[option] = Config.get(section, option)
except:
raise ValueError("Congiguration file [{0}]: exception on {1}."
.format(section, option))
try:
self.mesh = Mesh(dic['size'], self.geometry)
except:
raise ValueError('Configuration file [{0}]: One or more options is missing.'
.format(section))
def validateMaterial(self, Config, section, options):
"""Initilize the material attribute
"""
dic = {}
for option in options:
if not (option=='name' or option=='rho' or option=='k' or option=='cp'):
raise ValueError('Configuration file [{0}]: "{1}"" is not a valid option.'
.format(section, option))
try:
if option=='name':
dic[option] = Config.get(section, option)
else:
dic[option] = ast.literal_eval(Config.get(section, option))
except:
raise ValueError("Congiguration file [{0}]: exception on {1}."
.format(section, option))
try:
self.material = Material(dic['name'],
dic['rho'],
dic['k'],
dic['cp'])
except:
raise ValueError('Configuration file [{0}]: One or more options is missing.'
.format(section))
def validateInitial(self, Config, section, options):
"""Initilize the initial attribute
"""
dic = {}
for option in options:
if not (option=='dist' or option=='a' or option=='b' or option=='c'):
raise ValueError('Configuration file [{0}]: "{1}"" is not a valid option.'
.format(section, option))
try:
if option=='dist':
dic[option] = Config.get(section, option)
else:
dic[option] = ast.literal_eval(Config.get(section, option))
except:
raise ValueError("Congiguration file [{0}]: exception on {1}."
.format(section, option))
try:
self.initial = Initial(self.mesh,
dic['dist'],
dic['a'],
dic['b'],
dic['c'])
except:
raise ValueError('Configuration file [{0}]: One or more options is missing.'
.format(section))
def validateSource(self, Config, section, options):
'''Initilize the source attribute
'''
dic = {}
for option in options:
if not (option=='location' or option=='fwhm' or option=='fct' or
option=='a' or option=='b'):
raise ValueError('Configuration file [{0}]: "{1}"" is not a valid option.'
.format(section, option))
try:
if option=='location':
dic[option] = ast.literal_eval(Config.get(section, option))
self.checkLengthOption(section, option, dic[option], 3)
elif option=='fct':
dic[option] = Config.get(section, option)
else: # fwhm, a, b
dic[option] = ast.literal_eval(Config.get(section, option))
except:
raise ValueError("Congiguration file [{0}]: exception on {1}."
.format(section, option))
try:
self.source = Source(self.geometry,
dic['location'],
dic['fwhm'],
dic['fct'],
dic['a'],
dic['b'])
except:
raise ValueError('Configuration file [{0}]: One or more options is missing.'
.format(section))
def validateBoundary(self, Config, section, options):
'''Initilize the boundary attribute
'''
dic = {}
for option in options:
if not (option=='type' or option=='g1' or option=='a1' or option=='b1' or
option=='g2' or option=='a2' or option=='b2' or
option=='k1' or option=='k2'):
raise ValueError('Configuration file [{0}]: "{1}"" is not a valid option.'
.format(section, option))
try:
if (option=='type' or option=='g1' or option=='g2'):
dic[option] = Config.get(section, option)
dic[option]=dic[option].strip("[]").split(',')
for i in range(0, len(dic[option])):
# remove remaining whitespaces from beginning and end of the string
dic[option][i]=dic[option][i].strip()
else:
dic[option] = ast.literal_eval(Config.get(section, option))
self.checkLengthOption(section, option, dic[option], 3)
except:
raise ValueError("Congiguration file [{0}]: exception on {1}."
.format(section, option))
try:
self.boundary = Boundary(self.mesh,
dic['type'],
dic['g1'],
dic['a1'],
dic['b1'],
dic['k1'],
dic['g2'],
dic['a2'],
dic['b2'],
dic['k2'])
except:
raise ValueError('Configuration file [{0}]: One or more options is missing.'
.format(section))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import optparse
import mock
from glance.cmd import cache_manage
from glance.common import exception
import glance.common.utils
import glance.image_cache.client
from glance.tests import utils as test_utils
class TestGlanceCmdManage(test_utils.BaseTestCase):
@mock.patch.object(glance.image_cache.client.CacheClient,
'get_cached_images')
@mock.patch.object(glance.common.utils.PrettyTable, 'make_row')
def test_list_cached_images(self, mock_row_create, mock_images):
"""
Verify that list_cached() method correctly processes images with all
filled data and images with not filled 'last_accessed' field.
"""
mock_images.return_value = [
{'last_accessed': float(0),
'last_modified': float(1378985797.124511),
'image_id': '1', 'size': '128', 'hits': '1'},
{'last_accessed': float(1378985797.124511),
'last_modified': float(1378985797.124511),
'image_id': '2', 'size': '255', 'hits': '2'}]
cache_manage.list_cached(mock.Mock(), '')
self.assertEqual(len(mock_images.return_value),
mock_row_create.call_count)
@mock.patch.object(glance.image_cache.client.CacheClient,
'get_cached_images')
def test_list_cached_images_empty(self, mock_images):
"""
Verify that list_cached() method handles a case when no images are
cached without errors.
"""
mock_images.return_value = []
self.assertEqual(cache_manage.SUCCESS,
cache_manage.list_cached(mock.Mock(), ''))
@mock.patch.object(glance.image_cache.client.CacheClient,
'get_queued_images')
@mock.patch.object(glance.common.utils.PrettyTable, 'make_row')
def test_list_queued_images(self, mock_row_create, mock_images):
"""Verify that list_queued() method correctly processes images."""
mock_images.return_value = [
{'image_id': '1'}, {'image_id': '2'}]
cache_manage.list_queued(mock.Mock(), '')
self.assertEqual(len(mock_images.return_value),
mock_row_create.call_count)
@mock.patch.object(glance.image_cache.client.CacheClient,
'get_queued_images')
def test_list_queued_images_empty(self, mock_images):
"""
Verify that list_queued() method handles a case when no images were
queued without errors.
"""
mock_images.return_value = []
self.assertEqual(cache_manage.SUCCESS,
cache_manage.list_queued(mock.Mock(), ''))
def test_queue_image_without_index(self):
self.assertEqual(cache_manage.FAILURE,
cache_manage.queue_image(mock.Mock(), []))
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_queue_image_not_forced_not_confirmed(self,
mock_client, mock_confirm):
# options.forced set to False and queue confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(cache_manage.SUCCESS,
cache_manage.queue_image(mock_options, ['img_id']))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_queue_image_not_forced_confirmed(self, mock_client, mock_confirm):
# options.forced set to False and queue confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(cache_manage.SUCCESS,
cache_manage.queue_image(mock_options, ['img_id']))
self.assertTrue(mock_client.called)
self.assertIn(
mock.call.mock_client().queue_image_for_caching('img_id'),
manager.mock_calls)
def test_delete_cached_image_without_index(self):
self.assertEqual(cache_manage.FAILURE,
cache_manage.delete_cached_image(mock.Mock(), []))
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_cached_image_not_forced_not_confirmed(self,
mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_cached_image(mock_options, ['img_id']))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_cached_image_not_forced_confirmed(self, mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_cached_image(mock_options, ['img_id']))
self.assertIn(
mock.call.mock_client().delete_cached_image('img_id'),
manager.mock_calls)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_cached_images_not_forced_not_confirmed(self,
mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_all_cached_images(mock_options, None))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_cached_images_not_forced_confirmed(self, mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_all_cached_images(mock_options, None))
self.assertTrue(mock_client.called)
self.assertIn(
mock.call.mock_client().delete_all_cached_images(),
manager.mock_calls)
def test_delete_queued_image_without_index(self):
self.assertEqual(cache_manage.FAILURE,
cache_manage.delete_queued_image(mock.Mock(), []))
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_queued_image_not_forced_not_confirmed(self,
mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_queued_image(mock_options, ['img_id']))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_queued_image_not_forced_confirmed(self, mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_queued_image(mock_options, ['img_id']))
self.assertTrue(mock_client.called)
self.assertIn(
mock.call.mock_client().delete_queued_image('img_id'),
manager.mock_calls)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_queued_images_not_forced_not_confirmed(self,
mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to False.
mock_confirm.return_value = False
mock_options = mock.Mock()
mock_options.force = False
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_all_queued_images(mock_options, None))
self.assertFalse(mock_client.called)
@mock.patch.object(glance.cmd.cache_manage, 'user_confirm')
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_delete_queued_images_not_forced_confirmed(self, mock_client,
mock_confirm):
# options.forced set to False and delete confirmation set to True.
mock_confirm.return_value = True
mock_options = mock.Mock()
mock_options.force = False
mock_options.verbose = True # to cover additional condition and line
manager = mock.MagicMock()
manager.attach_mock(mock_client, 'mock_client')
self.assertEqual(
cache_manage.SUCCESS,
cache_manage.delete_all_queued_images(mock_options, None))
self.assertTrue(mock_client.called)
self.assertIn(
mock.call.mock_client().delete_all_queued_images(),
manager.mock_calls)
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_catch_error_not_found(self, mock_function):
mock_function.side_effect = exception.NotFound()
self.assertEqual(cache_manage.FAILURE,
cache_manage.list_cached(mock.Mock(), None))
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_catch_error_forbidden(self, mock_function):
mock_function.side_effect = exception.Forbidden()
self.assertEqual(cache_manage.FAILURE,
cache_manage.list_cached(mock.Mock(), None))
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_catch_error_unhandled(self, mock_function):
mock_function.side_effect = exception.Duplicate()
my_mock = mock.Mock()
my_mock.debug = False
self.assertEqual(cache_manage.FAILURE,
cache_manage.list_cached(my_mock, None))
@mock.patch.object(glance.cmd.cache_manage, 'get_client')
def test_catch_error_unhandled_debug_mode(self, mock_function):
mock_function.side_effect = exception.Duplicate()
my_mock = mock.Mock()
my_mock.debug = True
self.assertRaises(exception.Duplicate,
cache_manage.list_cached, my_mock, None)
def test_cache_manage_env(self):
def_value = 'sometext12345678900987654321'
self.assertNotEqual(def_value,
cache_manage.env('PATH', default=def_value))
def test_cache_manage_env_default(self):
def_value = 'sometext12345678900987654321'
self.assertEqual(def_value,
cache_manage.env('TMPVALUE1234567890',
default=def_value))
def test_create_option(self):
oparser = optparse.OptionParser()
cache_manage.create_options(oparser)
self.assertTrue(len(oparser.option_list) > 0)
@mock.patch.object(glance.cmd.cache_manage, 'lookup_command')
def test_parse_options_no_parameters(self, mock_lookup):
oparser = optparse.OptionParser()
cache_manage.create_options(oparser)
result = self.assertRaises(SystemExit, cache_manage.parse_options,
oparser, [])
self.assertEqual(0, result.code)
self.assertFalse(mock_lookup.called)
@mock.patch.object(optparse.OptionParser, 'print_usage')
def test_parse_options_no_arguments(self, mock_printout):
oparser = optparse.OptionParser()
cache_manage.create_options(oparser)
result = self.assertRaises(SystemExit, cache_manage.parse_options,
oparser, ['-p', '1212'])
self.assertEqual(0, result.code)
self.assertTrue(mock_printout.called)
@mock.patch.object(glance.cmd.cache_manage, 'lookup_command')
def test_parse_options_retrieve_command(self, mock_lookup):
mock_lookup.return_value = True
oparser = optparse.OptionParser()
cache_manage.create_options(oparser)
(options, command, args) = cache_manage.parse_options(oparser,
['-p', '1212',
'list-cached'])
self.assertTrue(command)
def test_lookup_command_unsupported_command(self):
self.assertRaises(SystemExit, cache_manage.lookup_command, mock.Mock(),
'unsupported_command')
def test_lookup_command_supported_command(self):
command = cache_manage.lookup_command(mock.Mock(), 'list-cached')
self.assertEqual(cache_manage.list_cached, command)
|
|
from __future__ import absolute_import, print_function
import logging
from django.conf import settings
from django.db import connections
from django.db.utils import OperationalError
from django.db.models.signals import post_syncdb, post_save
from functools import wraps
from pkg_resources import parse_version as Version
from sentry import options
from sentry.models import (
Organization, OrganizationMemberType, Project, User, Team, ProjectKey,
TagKey, TagValue, GroupTagValue, GroupTagKey, Activity,
Alert
)
from sentry.signals import buffer_incr_complete, regression_signal
from sentry.utils import db
from sentry.utils.safe import safe_execute
PROJECT_SEQUENCE_FIX = """
SELECT setval('sentry_project_id_seq', (
SELECT GREATEST(MAX(id) + 1, nextval('sentry_project_id_seq')) - 1
FROM sentry_project))
"""
def handle_db_failure(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except OperationalError:
logging.exception('Failed processing signal %s', func.__name__)
return
return wrapped
def create_default_projects(created_models, verbosity=2, **kwargs):
if Project not in created_models:
return
create_default_project(
id=settings.SENTRY_PROJECT,
name='Internal',
slug='internal',
verbosity=verbosity,
platform='django',
)
if settings.SENTRY_FRONTEND_PROJECT:
project = create_default_project(
id=settings.SENTRY_FRONTEND_PROJECT,
name='Frontend',
slug='frontend',
verbosity=verbosity,
platform='javascript'
)
def create_default_project(id, name, slug, verbosity=2, **kwargs):
if Project.objects.filter(id=id).exists():
return
try:
user = User.objects.filter(is_superuser=True)[0]
except IndexError:
user, _ = User.objects.get_or_create(
username='sentry',
defaults={
'email': 'sentry@localhost',
}
)
org, _ = Organization.objects.get_or_create(
slug='sentry',
defaults={
'owner': user,
'name': 'Sentry',
}
)
team, _ = Team.objects.get_or_create(
organization=org,
slug='sentry',
defaults={
'name': 'Sentry',
}
)
project = Project.objects.create(
id=id,
public=False,
name=name,
slug=slug,
team=team,
organization=team.organization,
**kwargs
)
# HACK: manually update the ID after insert due to Postgres
# sequence issues. Seriously, fuck everything about this.
if db.is_postgres(project._state.db):
connection = connections[project._state.db]
cursor = connection.cursor()
cursor.execute(PROJECT_SEQUENCE_FIX)
project.update_option('sentry:origins', ['*'])
if verbosity > 0:
print('Created internal Sentry project (slug=%s, id=%s)' % (project.slug, project.id))
return project
def set_sentry_version(latest=None, **kwargs):
import sentry
current = sentry.VERSION
version = options.get('sentry:latest_version')
for ver in (current, version):
if Version(ver) >= Version(latest):
latest = ver
if latest == version:
return
options.set('sentry:latest_version', (latest or current))
def create_keys_for_project(instance, created, **kwargs):
if not created or kwargs.get('raw'):
return
if not ProjectKey.objects.filter(project=instance).exists():
ProjectKey.objects.create(
project=instance,
label='Default',
)
def create_org_member_for_owner(instance, created, **kwargs):
if not created:
return
if not instance.owner:
return
instance.member_set.get_or_create(
user=instance.owner,
type=OrganizationMemberType.OWNER,
has_global_access=True,
)
@buffer_incr_complete.connect(sender=TagValue, weak=False)
def record_project_tag_count(filters, created, **kwargs):
from sentry import app
if not created:
return
# TODO(dcramer): remove in 7.6.x
project_id = filters.get('project_id')
if not project_id:
project_id = filters['project'].id
app.buffer.incr(TagKey, {
'values_seen': 1,
}, {
'project_id': project_id,
'key': filters['key'],
})
@buffer_incr_complete.connect(sender=GroupTagValue, weak=False)
def record_group_tag_count(filters, created, **kwargs):
from sentry import app
if not created:
return
# TODO(dcramer): remove in 7.6.x
project_id = filters.get('project_id')
if not project_id:
project_id = filters['project'].id
group_id = filters.get('group_id')
if not group_id:
group_id = filters['group'].id
app.buffer.incr(GroupTagKey, {
'values_seen': 1,
}, {
'project_id': project_id,
'group_id': group_id,
'key': filters['key'],
})
@regression_signal.connect(weak=False)
def create_regression_activity(instance, **kwargs):
if instance.times_seen == 1:
# this event is new
return
Activity.objects.create(
project=instance.project,
group=instance,
type=Activity.SET_REGRESSION,
)
def on_alert_creation(instance, **kwargs):
from sentry.plugins import plugins
for plugin in plugins.for_project(instance.project):
safe_execute(plugin.on_alert, alert=instance)
# Anything that relies on default objects that may not exist with default
# fields should be wrapped in handle_db_failure
post_syncdb.connect(
handle_db_failure(create_default_projects),
dispatch_uid="create_default_project",
weak=False,
)
post_save.connect(
handle_db_failure(create_keys_for_project),
sender=Project,
dispatch_uid="create_keys_for_project",
weak=False,
)
post_save.connect(
handle_db_failure(create_org_member_for_owner),
sender=Organization,
dispatch_uid="create_org_member_for_owner",
weak=False,
)
post_save.connect(
on_alert_creation,
sender=Alert,
dispatch_uid="on_alert_creation",
weak=False,
)
|
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, Iterator, List, Optional, TYPE_CHECKING, Tuple
from .asset import Asset, AssetMixin
from .utils import SnowflakeList, snowflake_time, MISSING
from .partial_emoji import _EmojiTag, PartialEmoji
from .user import User
# fmt: off
__all__ = (
'Emoji',
)
# fmt: on
if TYPE_CHECKING:
from .types.emoji import Emoji as EmojiPayload
from .guild import Guild
from .state import ConnectionState
from .abc import Snowflake
from .role import Role
from datetime import datetime
class Emoji(_EmojiTag, AssetMixin):
"""Represents a custom emoji.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two emoji are the same.
.. describe:: x != y
Checks if two emoji are not the same.
.. describe:: hash(x)
Return the emoji's hash.
.. describe:: iter(x)
Returns an iterator of ``(field, value)`` pairs. This allows this class
to be used as an iterable in list/dict/etc constructions.
.. describe:: str(x)
Returns the emoji rendered for discord.
Attributes
-----------
name: :class:`str`
The name of the emoji.
id: :class:`int`
The emoji's ID.
require_colons: :class:`bool`
If colons are required to use this emoji in the client (:PJSalt: vs PJSalt).
animated: :class:`bool`
Whether an emoji is animated or not.
managed: :class:`bool`
If this emoji is managed by a Twitch integration.
guild_id: :class:`int`
The guild ID the emoji belongs to.
available: :class:`bool`
Whether the emoji is available for use.
user: Optional[:class:`User`]
The user that created the emoji. This can only be retrieved using :meth:`Guild.fetch_emoji` and
having the :attr:`~Permissions.manage_emojis` permission.
"""
__slots__: Tuple[str, ...] = (
'require_colons',
'animated',
'managed',
'id',
'name',
'_roles',
'guild_id',
'_state',
'user',
'available',
)
def __init__(self, *, guild: Guild, state: ConnectionState, data: EmojiPayload):
self.guild_id: int = guild.id
self._state: ConnectionState = state
self._from_data(data)
def _from_data(self, emoji: EmojiPayload):
self.require_colons: bool = emoji.get('require_colons', False)
self.managed: bool = emoji.get('managed', False)
self.id: int = int(emoji['id']) # type: ignore - This won't be None for full emoji objects.
self.name: str = emoji['name'] # type: ignore - This won't be None for full emoji objects.
self.animated: bool = emoji.get('animated', False)
self.available: bool = emoji.get('available', True)
self._roles: SnowflakeList = SnowflakeList(map(int, emoji.get('roles', [])))
user = emoji.get('user')
self.user: Optional[User] = User(state=self._state, data=user) if user else None
def _to_partial(self) -> PartialEmoji:
return PartialEmoji(name=self.name, animated=self.animated, id=self.id)
def __iter__(self) -> Iterator[Tuple[str, Any]]:
for attr in self.__slots__:
if attr[0] != '_':
value = getattr(self, attr, None)
if value is not None:
yield (attr, value)
def __str__(self) -> str:
if self.animated:
return f'<a:{self.name}:{self.id}>'
return f'<:{self.name}:{self.id}>'
def __repr__(self) -> str:
return f'<Emoji id={self.id} name={self.name!r} animated={self.animated} managed={self.managed}>'
def __eq__(self, other: Any) -> bool:
return isinstance(other, _EmojiTag) and self.id == other.id
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return self.id >> 22
@property
def created_at(self) -> datetime:
""":class:`datetime.datetime`: Returns the emoji's creation time in UTC."""
return snowflake_time(self.id)
@property
def url(self) -> str:
""":class:`str`: Returns the URL of the emoji."""
fmt = 'gif' if self.animated else 'png'
return f'{Asset.BASE}/emojis/{self.id}.{fmt}'
@property
def roles(self) -> List[Role]:
"""List[:class:`Role`]: A :class:`list` of roles that is allowed to use this emoji.
If roles is empty, the emoji is unrestricted.
"""
guild = self.guild
if guild is None:
return []
return [role for role in guild.roles if self._roles.has(role.id)]
@property
def guild(self) -> Optional[Guild]:
""":class:`Guild`: The guild this emoji belongs to."""
return self._state._get_guild(self.guild_id)
def is_usable(self) -> bool:
""":class:`bool`: Whether the bot can use this emoji.
.. versionadded:: 1.3
"""
if not self.available or not self.guild or self.guild.unavailable:
return False
if not self._roles:
return True
emoji_roles, my_roles = self._roles, self.guild.me._roles
return any(my_roles.has(role_id) for role_id in emoji_roles)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom emoji.
You must have :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete emojis.
HTTPException
An error occurred deleting the emoji.
"""
await self._state.http.delete_custom_emoji(self.guild_id, self.id, reason=reason)
async def edit(self, *, name: str = MISSING, roles: List[Snowflake] = MISSING, reason: Optional[str] = None) -> Emoji:
r"""|coro|
Edits the custom emoji.
You must have :attr:`~Permissions.manage_emojis` permission to
do this.
.. versionchanged:: 2.0
The newly updated emoji is returned.
Parameters
-----------
name: :class:`str`
The new emoji name.
roles: List[:class:`~discord.abc.Snowflake`]
A list of roles that can use this emoji. An empty list can be passed to make it available to everyone.
reason: Optional[:class:`str`]
The reason for editing this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to edit emojis.
HTTPException
An error occurred editing the emoji.
Returns
--------
:class:`Emoji`
The newly updated emoji.
"""
payload = {}
if name is not MISSING:
payload['name'] = name
if roles is not MISSING:
payload['roles'] = [role.id for role in roles]
data = await self._state.http.edit_custom_emoji(self.guild_id, self.id, payload=payload, reason=reason)
return Emoji(guild=self.guild, data=data, state=self._state) # type: ignore - if guild is None, the http request would have failed
|
|
# -- coding: iso8859-1
"""Generic option parser class. This class can be used
to write code that will parse command line options for
an application by invoking one of the standard Python
library command argument parser modules optparse or
getopt.
The class first tries to use optparse. It it is not there
(< Python 2.3), it invokes getopt. However, this is
transparent to the application which uses the class.
The class requires a dictionary with entries of the following
form for each command line option.
'option_var' : ('short=<short option>','long=<long option>',
'help=<help string>', 'meta=<meta variable>',
'default=<default value>', 'type=<option type>')
where, 'option_var' is the key for the option in the final
dictionary of option-value pairs. The value is a tuple of
strings, where each string consists of entries of the form,
'key=value', where 'key' is borrowed from the way optparse
represents each variables for an option setting.
To parse the arguments, call the method 'parse_arguments'.
The return value is a dictionary of the option-value pairs."""
import sys
__author__="Anand Pillai"
class GenericOptionParserError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return str(self.value)
class GenericOptionParser:
""" Generic option parser using
either optparse or getopt """
def __init__(self, optmap):
self._optmap = self._parse_optmap(optmap)
self._optdict = {}
self.maxw = 24
def _parse_optmap(self, map):
""" Internal method -> Parse option
map containing tuples and convert the
tuples to a dictionary """
optmap = {}
for key,value in map.items():
d = {}
for item in value:
if not item: continue
var,val=item.split('=')
d[var]=val
optmap[key] = d
return optmap
def parse_arguments(self):
""" Parse command line arguments and
return a dictionary of option-value pairs """
try:
self.optparse = __import__('optparse')
# For invoking help, when no arguments
# are passed.
if len(sys.argv)==1:
sys.argv.append('-h')
self._parse_arguments1()
except ImportError:
try:
import getopt
self.getopt = __import__('getopt')
self._parse_arguments2()
except ImportError:
raise GenericOptionParserError,'Fatal Error: No optparse or getopt modules found'
return self._optdict
def _parse_arguments1(self):
""" Parse command-line arguments using optparse """
p = self.optparse.OptionParser()
for key,value in self._optmap.items():
# Option destination is the key itself
option = key
# Default action is 'store'
action = 'store'
# Short option string
sopt = value.get('short','')
# Long option string
lopt = value.get('long','')
# Help string
helpstr = value.get('help','')
# Meta var
meta = value.get('meta','')
# Default value
defl = value.get('default','')
# Default type is 'string'
typ = value.get('type','string')
# If bool type...
if typ == 'bool':
action = 'store_true'
defl = bool(str(defl) == 'True')
if sopt: sopt = '-' + sopt
if lopt: lopt = '--' + lopt
# Add option
p.add_option(sopt,lopt,dest=option,help=helpstr,metavar=meta,action=action,
default=defl)
(options,args) = p.parse_args()
self._optdict = options.__dict__
def _parse_arguments2(self):
""" Parse command-line arguments using getopt """
# getopt requires help string to
# be generated.
if len(sys.argv)==1:
sys.exit(self._usage())
shortopt,longopt='h',['help']
# Create short option string and long option
# list for getopt
for key, value in self._optmap.items():
sopt = value.get('short','')
lopt = value.get('long','')
typ = value.get('type','string')
defl = value.get('default','')
# If bool type...
if typ == 'bool':
defl = bool(str(defl) == 'True')
# Set default value
self._optdict[key] = defl
if typ=='bool':
if sopt: shortopt += sopt
if lopt: longopt.append(lopt)
else:
if sopt: shortopt = "".join((shortopt,sopt,':'))
if lopt: longopt.append(lopt+'=')
# Parse
(optlist,args) = self.getopt.getopt(sys.argv[1:],shortopt,longopt)
# Match options
for opt,val in optlist:
# Invoke help
if opt in ('-h','--help'):
sys.exit(self._usage())
for key,value in self._optmap.items():
sopt = '-' + value.get('short','')
lopt = '--' + value.get('long','')
typ = value.get('type','string')
if opt in (sopt,lopt):
if typ=='bool': val = True
self._optdict[key]=val
del self._optmap[key]
break
def _usage(self):
""" Generate and return a help string
for the program, similar to the one
generated by optparse """
usage = ["usage: %s [options]\n\n" % sys.argv[0]]
usage.append("options:\n")
options = [(' -h, --help', 'show this help message and exit\n')]
maxlen = 0
for value in self._optmap.values():
sopt = value.get('short','')
lopt = value.get('long','')
help = value.get('help','')
meta = value.get('meta','')
optstr = ""
if sopt: optstr="".join((' -',sopt,meta))
if lopt: optstr="".join((optstr,', --',lopt))
if meta: optstr="".join((optstr,'=',meta))
l = len(optstr)
if l>maxlen: maxlen=l
options.append((optstr,help))
for x in range(len(options)):
optstr = options[x][0]
helpstr = options[x][1]
if maxlen<self.maxw - 1:
usage.append("".join((optstr,(maxlen-len(optstr) + 2)*' ', helpstr,'\n')))
elif len(optstr)<self.maxw - 1:
usage.append("".join((optstr,(self.maxw-len(optstr))*' ', helpstr,'\n')))
else:
usage.append("".join((optstr,'\n',self.maxw*' ', helpstr,'\n')))
return "".join(usage)
if __name__=="__main__":
d={ 'infile' : ('short=i','long=in','help=Input file for the program',
'meta=IN'),
'outfile': ('short=o','long=out','help=Output file for the program',
'meta=OUT'),
'verbose': ('short=V','long=verbose','help=Be verbose in output',
'type=bool') }
g=GenericOptionParser(d)
optdict = g.parse_arguments()
for key,value in optdict.items():
# Use the option and the value in
# your program
...
|
|
"""prawcore.sessions: Provides prawcore.Session and prawcore.session."""
import logging
import random
import time
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from urllib.parse import urljoin
from requests.exceptions import ChunkedEncodingError, ConnectionError, ReadTimeout
from requests.status_codes import codes
from .auth import BaseAuthorizer
from .const import TIMEOUT
from .exceptions import (
BadJSON,
BadRequest,
Conflict,
InvalidInvocation,
NotFound,
Redirect,
RequestException,
ServerError,
SpecialError,
TooLarge,
TooManyRequests,
UnavailableForLegalReasons,
URITooLong,
)
from .rate_limit import RateLimiter
from .util import authorization_error_class
if TYPE_CHECKING: # pragma: no cover
from io import BufferedReader
from requests.models import Response
from .auth import Authorizer
from .requestor import Requestor
log = logging.getLogger(__package__)
class RetryStrategy(object):
"""An abstract class for scheduling request retries.
The strategy controls both the number and frequency of retry attempts.
Instances of this class are immutable.
"""
def sleep(self) -> None:
"""Sleep until we are ready to attempt the request."""
sleep_seconds = self._sleep_seconds()
if sleep_seconds is not None:
message = f"Sleeping: {sleep_seconds:0.2f} seconds prior to retry"
log.debug(message)
time.sleep(sleep_seconds)
class FiniteRetryStrategy(RetryStrategy):
"""A ``RetryStrategy`` that retries requests a finite number of times."""
def _sleep_seconds(self) -> Optional[float]:
if self._retries < 3:
base = 0 if self._retries == 2 else 2
return base + 2 * random.random()
return None
def __init__(self, retries: int = 3) -> None:
"""Initialize the strategy.
:param retries: Number of times to attempt a request (default: ``3``).
"""
self._retries = retries
def consume_available_retry(self) -> "FiniteRetryStrategy":
"""Allow one fewer retry."""
return type(self)(self._retries - 1)
def should_retry_on_failure(self) -> bool:
"""Return ``True`` if and only if the strategy will allow another retry."""
return self._retries > 1
class Session(object):
"""The low-level connection interface to Reddit's API."""
RETRY_EXCEPTIONS = (ChunkedEncodingError, ConnectionError, ReadTimeout)
RETRY_STATUSES = {
520,
522,
codes["bad_gateway"],
codes["gateway_timeout"],
codes["internal_server_error"],
codes["request_timeout"],
codes["service_unavailable"],
}
STATUS_EXCEPTIONS = {
codes["bad_gateway"]: ServerError,
codes["bad_request"]: BadRequest,
codes["conflict"]: Conflict,
codes["found"]: Redirect,
codes["forbidden"]: authorization_error_class,
codes["gateway_timeout"]: ServerError,
codes["internal_server_error"]: ServerError,
codes["media_type"]: SpecialError,
codes["moved_permanently"]: Redirect,
codes["not_found"]: NotFound,
codes["request_entity_too_large"]: TooLarge,
codes["request_uri_too_large"]: URITooLong,
codes["service_unavailable"]: ServerError,
codes["too_many_requests"]: TooManyRequests,
codes["unauthorized"]: authorization_error_class,
codes[
"unavailable_for_legal_reasons"
]: UnavailableForLegalReasons, # Cloudflare status (not named in requests)
520: ServerError,
522: ServerError,
}
SUCCESS_STATUSES = {codes["accepted"], codes["created"], codes["ok"]}
@staticmethod
def _log_request(
data: Optional[List[Tuple[str, str]]],
method: str,
params: Dict[str, int],
url: str,
) -> None:
log.debug(f"Fetching: {method} {url}")
log.debug(f"Data: {data}")
log.debug(f"Params: {params}")
def __init__(
self,
authorizer: Optional[BaseAuthorizer],
) -> None:
"""Prepare the connection to Reddit's API.
:param authorizer: An instance of :class:`.Authorizer`.
"""
if not isinstance(authorizer, BaseAuthorizer):
raise InvalidInvocation(f"invalid Authorizer: {authorizer}")
self._authorizer = authorizer
self._rate_limiter = RateLimiter()
self._retry_strategy_class = FiniteRetryStrategy
def __enter__(self) -> "Session":
"""Allow this object to be used as a context manager."""
return self
def __exit__(self, *_args) -> None:
"""Allow this object to be used as a context manager."""
self.close()
def _do_retry(
self,
data: List[Tuple[str, Any]],
files: Dict[str, "BufferedReader"],
json: Dict[str, Any],
method: str,
params: Dict[str, int],
response: Optional["Response"],
retry_strategy_state: "FiniteRetryStrategy",
saved_exception: Optional[Exception],
timeout: float,
url: str,
) -> Optional[Union[Dict[Any, Any], str]]:
if saved_exception:
status = repr(saved_exception)
else:
status = response.status_code
log.warning(f"Retrying due to {status} status: {method} {url}")
return self._request_with_retries(
data=data,
files=files,
json=json,
method=method,
params=params,
timeout=timeout,
url=url,
retry_strategy_state=retry_strategy_state.consume_available_retry(),
# noqa: E501
)
def _make_request(
self,
data: List[Tuple[str, Any]],
files: Dict[str, "BufferedReader"],
json: Dict[str, Any],
method: str,
params: Dict[str, Any],
retry_strategy_state: "FiniteRetryStrategy",
timeout: float,
url: str,
) -> Union[Tuple["Response", None], Tuple[None, Exception]]:
try:
response = self._rate_limiter.call(
self._requestor.request,
self._set_header_callback,
method,
url,
allow_redirects=False,
data=data,
files=files,
json=json,
params=params,
timeout=timeout,
)
log.debug(
f"Response: {response.status_code}"
f" ({response.headers.get('content-length')} bytes)"
)
return response, None
except RequestException as exception:
if (
not retry_strategy_state.should_retry_on_failure()
or not isinstance( # noqa: E501
exception.original_exception, self.RETRY_EXCEPTIONS
)
):
raise
return None, exception.original_exception
def _request_with_retries(
self,
data: List[Tuple[str, Any]],
files: Dict[str, "BufferedReader"],
json: Dict[str, Any],
method: str,
params: Dict[str, int],
timeout: float,
url: str,
retry_strategy_state: Optional["FiniteRetryStrategy"] = None,
) -> Optional[Union[Dict[Any, Any], str]]:
if retry_strategy_state is None:
retry_strategy_state = self._retry_strategy_class()
retry_strategy_state.sleep()
self._log_request(data, method, params, url)
response, saved_exception = self._make_request(
data,
files,
json,
method,
params,
retry_strategy_state,
timeout,
url,
)
do_retry = False
if response is not None and response.status_code == codes["unauthorized"]:
self._authorizer._clear_access_token()
if hasattr(self._authorizer, "refresh"):
do_retry = True
if retry_strategy_state.should_retry_on_failure() and (
do_retry or response is None or response.status_code in self.RETRY_STATUSES
):
return self._do_retry(
data,
files,
json,
method,
params,
response,
retry_strategy_state,
saved_exception,
timeout,
url,
)
elif response.status_code in self.STATUS_EXCEPTIONS:
raise self.STATUS_EXCEPTIONS[response.status_code](response)
elif response.status_code == codes["no_content"]:
return
assert (
response.status_code in self.SUCCESS_STATUSES
), f"Unexpected status code: {response.status_code}"
if response.headers.get("content-length") == "0":
return ""
try:
return response.json()
except ValueError:
raise BadJSON(response)
def _set_header_callback(self) -> Dict[str, str]:
if not self._authorizer.is_valid() and hasattr(self._authorizer, "refresh"):
self._authorizer.refresh()
return {"Authorization": f"bearer {self._authorizer.access_token}"}
@property
def _requestor(self) -> "Requestor":
return self._authorizer._authenticator._requestor
def close(self) -> None:
"""Close the session and perform any clean up."""
self._requestor.close()
def request(
self,
method: str,
path: str,
data: Optional[Dict[str, Any]] = None,
files: Optional[Dict[str, "BufferedReader"]] = None,
json: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, Any]] = None,
timeout: float = TIMEOUT,
) -> Optional[Union[Dict[Any, Any], str]]:
"""Return the json content from the resource at ``path``.
:param method: The request verb. E.g., ``"GET"``, ``"POST"``, ``"PUT"``.
:param path: The path of the request. This path will be combined with the
``oauth_url`` of the Requestor.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request.
:param files: Dictionary, mapping ``filename`` to file-like object.
:param json: Object to be serialized to JSON in the body of the request.
:param params: The query parameters to send with the request.
Automatically refreshes the access token if it becomes invalid and a refresh
token is available.
:raises: :class:`.InvalidInvocation` in such a case if a refresh token is not
available.
"""
params = deepcopy(params) or {}
params["raw_json"] = 1
if isinstance(data, dict):
data = deepcopy(data)
data["api_type"] = "json"
data = sorted(data.items())
if isinstance(json, dict):
json = deepcopy(json)
json["api_type"] = "json"
url = urljoin(self._requestor.oauth_url, path)
return self._request_with_retries(
data=data,
files=files,
json=json,
method=method,
params=params,
timeout=timeout,
url=url,
)
def session(authorizer: "Authorizer" = None) -> Session:
"""Return a :class:`.Session` instance.
:param authorizer: An instance of :class:`.Authorizer`.
"""
return Session(authorizer=authorizer)
|
|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import hou
import IECore
import IECoreHoudini
import unittest
class TestCortexConverterSop( IECoreHoudini.TestCase ):
# make sure we can create the op
def testCreateCortexConverter(self) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
op = geo.createNode( "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load( "cobReader", 1)()
cl['filename'] = "test/IECoreHoudini/data/torus.cob"
fn = IECoreHoudini.FnOpHolder(op)
fn.setParameterised(cl)
IECoreHoudini.Utils.syncSopParametersWithOp(op)
op.cook()
self.assertEqual( cl.resultParameter().getValue().typeId(), IECore.TypeId.MeshPrimitive )
return (op, fn)
# check it works for points
def testPointConversion(self):
(op,fn) = self.testCreateCortexConverter()
torus = op.createOutputNode( "ieCortexConverter" )
scatter = torus.createOutputNode( "scatter" )
attr = scatter.createOutputNode( "attribcreate", exact_type_name=True )
attr.parm("name").set("testAttribute")
attr.parm("value1").setExpression("$PT")
to_cortex = attr.createOutputNode( "ieOpHolder" )
cl = IECoreHoudini.Utils.op("objectDebug",1)
fn = IECoreHoudini.FnOpHolder(to_cortex)
fn.setParameterised(cl)
to_cortex.parm("parm_quiet").set(True)
to_houdini = to_cortex.createOutputNode("ieCortexConverter")
geo = to_houdini.geometry()
attrNames = [ p.name() for p in geo.pointAttribs() ]
attrNames.sort()
self.assertEqual( attrNames, ["P", "Pw", "testAttribute"] )
self.assertEqual( len(geo.points()), 5000 )
self.assertEqual( len(geo.prims()), 1 )
# check it works for polygons
def testPolygonConversion(self):
(op,fn) = self.testCreateCortexConverter()
torus = op.createOutputNode( "ieCortexConverter" )
geo = torus.geometry()
self.assertEqual( len(geo.points()), 100 )
self.assertEqual( len(geo.prims()), 100 )
attrNames = [ p.name() for p in geo.pointAttribs() ]
attrNames.sort()
self.assertEqual( attrNames, ["P", "Pw"] )
for p in geo.prims():
self.assertEqual( p.numVertices(), 4 )
self.assertEqual( p.type(), hou.primType.Polygon )
n = hou.node("/obj/geo1")
h_torus = n.createNode( "torus" )
h_geo = h_torus.geometry()
self.assertEqual( len(geo.pointAttribs()), len(h_geo.pointAttribs()) )
self.assertEqual( len(geo.prims()), len(h_geo.prims()) )
# test converting a procedural
def testProceduralConversion( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
holder = geo.createNode( "ieProceduralHolder" )
fn = IECoreHoudini.FnProceduralHolder( holder )
fn.setProcedural( "pointRender", 1 )
holder.parm( "parm_npoints" ).set( 123 )
converter = holder.createOutputNode( "ieCortexConverter" )
geo = converter.geometry()
self.assertEqual( len(geo.points()), 123 )
self.assertEqual( len(geo.prims()), 1 )
fn.setProcedural( "meshRender", 1 )
holder.parm( "parm_path" ).set( "test/IECoreHoudini/data/torus_with_normals.cob" )
geo = converter.geometry()
self.assertEqual( len(geo.points()), 100 )
self.assertEqual( len(geo.prims()), 100 )
self.assertEqual( sorted([ x.name() for x in geo.pointAttribs() ]), [ "N", "P", "Pw" ] )
self.assertTrue( geo.findPointAttrib( "N" ).isTransformedAsNormal() )
def scene( self ) :
geo = hou.node( "/obj" ).createNode( "geo", run_init_scripts=False )
boxA = geo.createNode( "box" )
nameA = boxA.createOutputNode( "name" )
nameA.parm( "name1" ).set( "boxA" )
boxB = geo.createNode( "box" )
nameB = boxB.createOutputNode( "name" )
nameB.parm( "name1" ).set( "boxB" )
torus = geo.createNode( "torus" )
nameC = torus.createOutputNode( "name" )
nameC.parm( "name1" ).set( "torus" )
merge = geo.createNode( "merge" )
merge.setInput( 0, nameA )
merge.setInput( 1, nameB )
merge.setInput( 2, nameC )
return merge.createOutputNode( "ieCortexConverter" )
def testNameFilter( self ) :
node = self.scene()
# it all converts to Cortex prims
node.parm( "resultType" ).set( 0 )
geo = node.geometry()
prims = geo.prims()
self.assertEqual( len(prims), 3 )
self.assertEqual( [ x.type() for x in prims ], [ hou.primType.Custom ] * 3 )
nameAttr = geo.findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ 'boxA', 'boxB', 'torus' ] ) )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxA' ]), 1 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxB' ]), 1 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'torus' ]), 1 )
# filter the middle shape only
node.parm( "nameFilter" ).set( "* ^boxB" )
geo = node.geometry()
prims = geo.prims()
self.assertEqual( len(prims), 8 )
self.assertEqual( [ x.type() for x in prims ], [ hou.primType.Custom ] + [ hou.primType.Polygon ] * 6 + [ hou.primType.Custom ] )
nameAttr = geo.findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ 'boxA', 'boxB', 'torus' ] ) )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxA' ]), 1 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxB' ]), 6 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'torus' ]), 1 )
# filters work on Cortex Prims as well
back = node.createOutputNode( "ieCortexConverter" )
back.parm( "nameFilter" ).set( "* ^torus" )
geo = back.geometry()
prims = geo.prims()
self.assertEqual( len(prims), 13 )
self.assertEqual( [ x.type() for x in prims ], [ hou.primType.Polygon ] * 12 + [ hou.primType.Custom ] )
nameAttr = geo.findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ 'boxA', 'boxB', 'torus' ] ) )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxA' ]), 6 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxB' ]), 6 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'torus' ]), 1 )
# test unnamed shapes
delname = back.createOutputNode( "attribute" )
delname.parm( "primdel" ).set( "name" )
unnamed = delname.createOutputNode( "ieCortexConverter" )
geo = unnamed.geometry()
self.assertEqual( len(geo.prims()), 112 )
prims = geo.prims()
self.assertEqual( [ x.type() for x in prims ], [ hou.primType.Polygon ] * 112 )
self.assertEqual( geo.findPrimAttrib( "name" ), None )
# unnamed with no filter is just a pass through
unnamed.parm( "nameFilter" ).set( "" )
geo = unnamed.geometry()
prims = geo.prims()
self.assertEqual( len(prims), 13 )
self.assertEqual( [ x.type() for x in prims ], [ hou.primType.Polygon ] * 12 + [ hou.primType.Custom ] )
self.assertEqual( geo.findPrimAttrib( "name" ), None )
def testResultType( self ) :
node = self.scene()
# it all passes through
geo = node.geometry()
prims = geo.prims()
self.assertEqual( len(prims), 112 )
self.assertEqual( [ x.type() for x in prims ], [ hou.primType.Polygon ] * 112 )
nameAttr = geo.findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ 'boxA', 'boxB', 'torus' ] ) )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxA' ]), 6 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxB' ]), 6 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'torus' ]), 100 )
# it all converts to Cortex prims
node.parm( "resultType" ).set( 0 )
geo = node.geometry()
prims = geo.prims()
self.assertEqual( len(prims), 3 )
self.assertEqual( [ x.type() for x in prims ], [ hou.primType.Custom ] * 3 )
nameAttr = geo.findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ 'boxA', 'boxB', 'torus' ] ) )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxA' ]), 1 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxB' ]), 1 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'torus' ]), 1 )
# it all converts back to Houdini geo
back = node.createOutputNode( "ieCortexConverter" )
geo = back.geometry()
prims = geo.prims()
self.assertEqual( len(prims), 112 )
self.assertEqual( [ x.type() for x in prims ], [ hou.primType.Polygon ] * 112 )
nameAttr = geo.findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ 'boxA', 'boxB', 'torus' ] ) )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxA' ]), 6 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'boxB' ]), 6 )
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == 'torus' ]), 100 )
def testAttributeFilter( self ) :
torus = hou.node("/obj").createNode("geo", run_init_scripts=False).createNode( "torus" )
color = torus.createOutputNode( "color" )
color.parm( "class" ).set( 3 )
color.parm( "colortype" ).set( 2 )
rest = color.createOutputNode( "rest" )
scale = rest.createOutputNode( "attribcreate" )
scale.parm( "name1" ).set( "pscale" )
scale.parm( "value1v1" ).setExpression( "$PT" )
uvunwrap = scale.createOutputNode( "uvunwrap" )
opHolder = uvunwrap.createOutputNode( "ieOpHolder" )
fn = IECoreHoudini.FnOpHolder( opHolder )
fn.setOp( "parameters/primitives/polyParam" )
out = opHolder.createOutputNode( "ieCortexConverter" )
# verify input
inGeo = uvunwrap.geometry()
self.assertEqual( sorted([ x.name() for x in inGeo.pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in inGeo.primAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in inGeo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in inGeo.globalAttribs() ]), ['varmap'] )
# verifty output
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
# verify intermediate op result
result = fn.getOp().resultParameter().getValue()
self.assertEqual( result.keys(), [ "Cs", "P", "Pref", "s", "t", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
# make sure P is forced
out.parm( "attributeFilter" ).set( "* ^P" )
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
# have to filter the source attrs s, t and not uv
out.parm( "attributeFilter" ).set( "* ^uv ^pscale ^rest" )
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
out.parm( "attributeFilter" ).set( "* ^s ^t ^width ^Pref" )
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
# make sure we can filter on both ends
opHolder.parm( "parm_input_attributeFilter" ).set( "* ^s ^t ^width ^Pref" )
result = fn.getOp().resultParameter().getValue()
self.assertEqual( result.keys(), [ "Cs", "P", "Pref", "s", "t", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
opHolder.parm( "parm_input_attributeFilter" ).set( "* ^uv ^pscale ^rest" )
opHolder.cook( True )
result = fn.getOp().resultParameter().getValue()
self.assertEqual( result.keys(), [ "Cs", "P", "varmap" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
# since the vars never made it to the op, the never make it out
out.parm( "attributeFilter" ).set( "*" )
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
def testStandardAttributeConversion( self ) :
torus = hou.node("/obj").createNode("geo", run_init_scripts=False).createNode( "torus" )
color = torus.createOutputNode( "color" )
color.parm( "class" ).set( 3 )
color.parm( "colortype" ).set( 2 )
rest = color.createOutputNode( "rest" )
scale = rest.createOutputNode( "attribcreate" )
scale.parm( "name1" ).set( "pscale" )
scale.parm( "value1v1" ).setExpression( "$PT" )
uvunwrap = scale.createOutputNode( "uvunwrap" )
opHolder = uvunwrap.createOutputNode( "ieOpHolder" )
fn = IECoreHoudini.FnOpHolder( opHolder )
fn.setOp( "parameters/primitives/polyParam" )
out = opHolder.createOutputNode( "ieCortexConverter" )
# verify input
inGeo = uvunwrap.geometry()
self.assertEqual( sorted([ x.name() for x in inGeo.pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in inGeo.primAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in inGeo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in inGeo.globalAttribs() ]), ['varmap'] )
# verifty output
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
# verify intermediate op result
result = fn.getOp().resultParameter().getValue()
self.assertEqual( result.keys(), [ "Cs", "P", "Pref", "s", "t", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["Pref"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
sData = result["s"].data
tData = result["t"].data
inUvs = inGeo.findVertexAttrib( "uv" )
outUvs = outGeo.findVertexAttrib( "uv" )
i = 0
for prim in inGeo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( inUvs )
self.assertAlmostEqual( sData[i], uvValues[0] )
self.assertAlmostEqual( tData[i], 1 - uvValues[1] )
i += 1
i = 0
for prim in outGeo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( outUvs )
self.assertAlmostEqual( sData[i], uvValues[0] )
self.assertAlmostEqual( tData[i], 1 - uvValues[1] )
i += 1
# turn off half the conversion
opHolder.parm( "parm_input_convertStandardAttributes" ).set( False )
# verifty output
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pw', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cd', 'uv'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
# verify intermediate op result
result = fn.getOp().resultParameter().getValue()
self.assertEqual( result.keys(), [ "Cd", "P", "pscale", "rest", "uv", "varmap" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["rest"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
uvData = result["uv"].data
inUvs = inGeo.findVertexAttrib( "uv" )
outUvs = outGeo.findVertexAttrib( "uv" )
i = 0
for prim in inGeo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( inUvs )
self.assertAlmostEqual( uvData[i][0], uvValues[0] )
self.assertAlmostEqual( uvData[i][1], uvValues[1] )
i += 1
i = 0
for prim in outGeo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( outUvs )
self.assertAlmostEqual( uvData[i][0], uvValues[0] )
self.assertAlmostEqual( uvData[i][1], uvValues[1] )
i += 1
# turn off the other half of the conversion
opHolder.parm( "parm_input_convertStandardAttributes" ).set( True )
out.parm( "convertStandardAttributes" ).set( False )
# verifty output
outGeo = out.geometry()
self.assertEqual( sorted([ x.name() for x in outGeo.pointAttribs() ]), ['P', 'Pref', 'Pw', 'width'] )
self.assertEqual( sorted([ x.name() for x in outGeo.primAttribs() ]), ["ieMeshInterpolation"] )
self.assertEqual( sorted([ x.name() for x in outGeo.vertexAttribs() ]), ['Cs', 's', 't'] )
self.assertEqual( sorted([ x.name() for x in outGeo.globalAttribs() ]), ['varmap'] )
# verify intermediate op result
result = fn.getOp().resultParameter().getValue()
self.assertEqual( result.keys(), [ "Cs", "P", "Pref", "s", "t", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["Pref"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
sData = result["s"].data
tData = result["t"].data
inUvs = inGeo.findVertexAttrib( "uv" )
outS = outGeo.findVertexAttrib( "s" )
outT = outGeo.findVertexAttrib( "t" )
i = 0
for prim in inGeo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
uvValues = vert.attribValue( inUvs )
self.assertAlmostEqual( sData[i], uvValues[0] )
self.assertAlmostEqual( tData[i], 1 - uvValues[1] )
i += 1
i = 0
for prim in outGeo.prims() :
verts = list(prim.vertices())
verts.reverse()
for vert in verts :
self.assertAlmostEqual( sData[i], vert.attribValue( outS ) )
self.assertAlmostEqual( tData[i], vert.attribValue( outT ) )
i += 1
if __name__ == "__main__":
unittest.main()
|
|
"""Wrapper/Implementation of the GLU tessellator objects for PyOpenGL"""
from OpenGL.raw import GLU as simple
from OpenGL.platform import GLU,createBaseFunction
from OpenGL.GLU import glustruct
from OpenGL import arrays, constants
from OpenGL.platform import PLATFORM
from OpenGL.lazywrapper import lazy
import ctypes
class GLUtesselator( glustruct.GLUStruct, simple.GLUtesselator):
"""Implementation class for GLUTessellator structures in OpenGL-ctypes"""
FUNCTION_TYPE = PLATFORM.functionTypeFor(PLATFORM.GLU)
CALLBACK_TYPES = {
# mapping from "which" GLU enumeration to a ctypes function type
simple.GLU_TESS_BEGIN: FUNCTION_TYPE( None, simple.GLenum ),
simple.GLU_TESS_BEGIN_DATA: FUNCTION_TYPE(
None, simple.GLenum, ctypes.c_void_p
),
simple.GLU_TESS_EDGE_FLAG: FUNCTION_TYPE( None, simple.GLboolean),
simple.GLU_TESS_EDGE_FLAG_DATA: FUNCTION_TYPE(
None, simple.GLboolean, ctypes.c_void_p
),
simple.GLU_TESS_VERTEX: FUNCTION_TYPE( None, ctypes.c_void_p ),
simple.GLU_TESS_VERTEX_DATA: FUNCTION_TYPE(
None, ctypes.c_void_p, ctypes.c_void_p
),
simple.GLU_TESS_END: FUNCTION_TYPE( None ),
simple.GLU_TESS_END_DATA: FUNCTION_TYPE( None, ctypes.c_void_p),
simple.GLU_TESS_COMBINE: FUNCTION_TYPE(
None,
ctypes.POINTER(simple.GLdouble),
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(simple.GLfloat),
ctypes.POINTER(ctypes.c_void_p)
),
simple.GLU_TESS_COMBINE_DATA: FUNCTION_TYPE(
None,
ctypes.POINTER(simple.GLdouble),
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(simple.GLfloat),
ctypes.POINTER(ctypes.c_void_p),
ctypes.c_void_p,
),
simple.GLU_TESS_ERROR: FUNCTION_TYPE( None, simple.GLenum),
simple.GLU_TESS_ERROR_DATA: FUNCTION_TYPE(
None, simple.GLenum, ctypes.c_void_p
),
simple.GLU_ERROR : FUNCTION_TYPE( None, simple.GLenum )
}
WRAPPER_METHODS = {
simple.GLU_TESS_BEGIN_DATA: 'dataWrapper',
simple.GLU_TESS_EDGE_FLAG_DATA: 'dataWrapper',
simple.GLU_TESS_VERTEX: 'vertexWrapper',
simple.GLU_TESS_VERTEX_DATA: 'vertexWrapper',
simple.GLU_TESS_END_DATA: 'dataWrapper',
simple.GLU_TESS_COMBINE: 'combineWrapper',
simple.GLU_TESS_COMBINE_DATA: 'combineWrapper',
simple.GLU_TESS_ERROR_DATA: 'dataWrapper',
}
def gluTessVertex( self, location, data=None ):
"""Add a vertex to this tessellator, storing data for later lookup"""
vertexCache = getattr( self, 'vertexCache', None )
if vertexCache is None:
self.vertexCache = []
vertexCache = self.vertexCache
location = arrays.GLdoubleArray.asArray( location, constants.GL_DOUBLE )
if arrays.GLdoubleArray.arraySize( location ) != 3:
raise ValueError( """Require 3 doubles for array location, got: %s"""%(location,))
oorValue = self.noteObject(data)
vp = ctypes.c_void_p( oorValue )
self.vertexCache.append( location )
return gluTessVertexBase( self, location, vp )
def gluTessBeginPolygon( self, data ):
"""Note the object pointer to return it as a Python object"""
return simple.gluTessBeginPolygon(
self, ctypes.c_void_p(self.noteObject( data ))
)
def combineWrapper( self, function ):
"""Wrap a Python function with ctypes-compatible wrapper for combine callback
For a Python combine callback, the signature looks like this:
def combine(
GLdouble coords[3],
void *vertex_data[4],
GLfloat weight[4]
):
return data
While the C signature looks like this:
void combine(
GLdouble coords[3],
void *vertex_data[4],
GLfloat weight[4],
void **outData
)
"""
if (function is not None) and (not hasattr( function,'__call__' )):
raise TypeError( """Require a callable callback, got: %s"""%(function,))
def wrap( coords, vertex_data, weight, outData, *args ):
"""The run-time wrapper around the function"""
coords = self.ptrAsArray( coords, 3, arrays.GLdoubleArray )
weight = self.ptrAsArray( weight, 4, arrays.GLfloatArray )
# find the original python objects for vertex data
vertex_data = [ self.originalObject( vertex_data[i] ) for i in range(4) ]
args = tuple( [ self.originalObject( x ) for x in args ] )
try:
result = function( coords, vertex_data, weight, *args )
except Exception, err:
raise err.__class__(
"""Failure during combine callback %r with args( %s,%s,%s,*%s):\n%s"""%(
function, coords, vertex_data, weight, args, str(err),
)
)
outP = ctypes.c_void_p(self.noteObject(result))
outData[0] = outP
return None
return wrap
def dataWrapper( self, function ):
"""Wrap a function which only has the one data-pointer as last arg"""
if (function is not None) and (not hasattr( function,'__call__' )):
raise TypeError( """Require a callable callback, got: %s"""%(function,))
def wrap( *args ):
"""Just return the original object for polygon_data"""
args = args[:-1] + ( self.originalObject(args[-1]), )
try:
return function( *args )
except Exception, err:
err.args += (function,args)
raise
return wrap
def dataWrapper2( self, function ):
"""Wrap a function which has two data-pointers as last args"""
if (function is not None) and (not hasattr( function,'__call__' )):
raise TypeError( """Require a callable callback, got: %s"""%(function,))
def wrap( *args ):
"""Just return the original object for polygon_data"""
args = args[:-2] + ( self.originalObject(args[-2]), self.originalObject(args[-1]), )
try:
return function( *args )
except Exception, err:
err.args += (function,args)
raise
return wrap
def vertexWrapper( self, function ):
"""Converts a vertex-pointer into an OOR vertex for processing"""
if (function is not None) and (not hasattr( function,'__call__' )):
raise TypeError( """Require a callable callback, got: %s"""%(function,))
def wrap( vertex, data=None ):
"""Just return the original object for polygon_data"""
vertex = self.originalObject(vertex)
try:
if data is not None:
data = self.originalObject(data)
return function( vertex, data )
else:
return function( vertex )
except Exception, err:
err.args += (function,(vertex,data))
raise
return wrap
GLUtesselator.CALLBACK_FUNCTION_REGISTRARS = dict([
(c,createBaseFunction(
'gluTessCallback', dll=GLU, resultType=None,
argTypes=[ctypes.POINTER(GLUtesselator), simple.GLenum,funcType],
doc='gluTessCallback( POINTER(GLUtesselator)(tess), GLenum(which), _GLUfuncptr(CallBackFunc) ) -> None',
argNames=('tess', 'which', 'CallBackFunc'),
))
for (c,funcType) in GLUtesselator.CALLBACK_TYPES.items()
])
try:
del c, funcType
except NameError, err:
pass
def gluTessCallback( tess, which, function ):
"""Set a given gluTessellator callback for the given tessellator"""
return tess.addCallback( which, function )
def gluTessBeginPolygon( tess, data ):
"""Start definition of polygon in the tessellator"""
return tess.gluTessBeginPolygon( data )
def gluTessVertex( tess, location, data=None ):
"""Add a vertex to the tessellator's current polygon"""
return tess.gluTessVertex( location, data )
# /usr/include/GL/glu.h 293
@lazy(
createBaseFunction(
'gluNewTess', dll=GLU, resultType=ctypes.POINTER(GLUtesselator),
doc='gluNewTess( ) -> POINTER(GLUtesselator)',
)
)
def gluNewTess( baseFunction ):
"""Get a new tessellator object (just unpacks the pointer for you)"""
return baseFunction()[0]
@lazy( simple.gluGetTessProperty )
def gluGetTessProperty( baseFunction, tess, which, data=None ):
"""Retrieve single double for a tessellator property"""
if data is None:
data = simple.GLdouble( 0.0 )
baseFunction( tess, which, data )
return data.value
else:
return baseFunction( tess, which, data )
gluTessVertexBase = arrays.setInputArraySizeType(
simple.gluTessVertex,
3,
arrays.GLdoubleArray,
'location',
)
__all__ = (
'gluNewTess',
'gluGetTessProperty',
'gluTessBeginPolygon',
'gluTessCallback',
'gluTessVertex',
)
|
|
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import PageNotAnInteger, EmptyPage
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http.response import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.text import slugify
from django.utils.translation import ugettext as _
from models import Place, Rating, Review
# Instantiate logger.
logger = logging.getLogger(__name__)
def index(request):
"""
Index page.
"""
# Page title.
try:
title = settings.YPLACES['index_title']
except KeyError:
title = 'YPLACES'
# Page description.
try:
description = settings.YPLACES['index_description']
except KeyError:
description = ''
# Top places.
top_rating = Rating.objects.all().order_by('-relative')[:5]
# Fetch latest reviews.
reviews = Review.objects.all().order_by('-date')[:5]
# Render page.
return render_to_response('yplaces/index.html',
{ 'title': title,
'description': description,
'top_rating': top_rating,
'reviews': reviews,
'places_api_url': settings.HOST_URL + reverse(settings.YPLACES['api_url_namespace'] + ':yplaces:index') },
context_instance=RequestContext(request))
@login_required
def add(request):
"""
Add new Place.
"""
return render_to_response('yplaces/edit.html',
{ 'title': _('Add Place'),
'api_url': settings.HOST_URL + reverse(settings.YPLACES['api_url_namespace'] + ':yplaces:index'),
'action': 'POST',
'next': reverse('yplaces:index') },
context_instance=RequestContext(request))
def search(request):
"""
Place search.
"""
# Lets start with all..
results = Place.objects.filter(active=True).order_by('name')
# Search by name.
try:
name = request.GET['name']
results = results.filter(Q(name__icontains=name))
except KeyError:
name = ''
# Search by location.
try:
location = request.GET['location']
results = results.filter(Q(address__icontains=location) | Q(city__icontains=location) | Q(state__icontains=location) | Q(country__icontains=location))
except KeyError:
location = ''
# Fetch X items and paginate.
paginator = Paginator(results, 10)
try:
page = request.GET.get('page')
result_page = paginator.page(page)
page = int(page)
# If page is not an integer, deliver first page.
except PageNotAnInteger:
page = 1
result_page = paginator.page(page)
# If page is out of range (e.g. 9999), deliver last page of results.
except EmptyPage:
page = paginator.num_pages
result_page = paginator.page(page)
# Render page.
return render_to_response('yplaces/search.html',
{ 'search_name': name,
'search_location': location,
'search_results': result_page },
context_instance=RequestContext(request))
def place_id(request, pk):
"""
Checks if the place with given ID exists and, if it does, redirect to the page with respective slug.
"""
# Check if Place with given ID exists.
try:
place = Place.objects.get(pk=pk)
# **************** IMPORTANT ***************
# Only show inactive places to _staff_ users.
# ******************************************
if not place.active and (not request.user or not request.user.is_staff):
raise Http404
# Redirect to Place slug.
return HttpResponseRedirect(reverse('yplaces:slug', args=[place.pk, slugify(place.name)]))
# Invalid ID.
except ObjectDoesNotExist:
raise Http404
def place_slug(request, pk, slug):
"""
Returns page for the place with the given ID.
"""
# Check if Place with given ID exists.
try:
place = Place.objects.get(pk=pk)
# **************** IMPORTANT ***************
# Only show inactive places to _staff_ users.
# ******************************************
if not place.active and (not request.user or not request.user.is_staff):
raise Http404
# Invalid ID.
except ObjectDoesNotExist:
raise Http404
# Highlighted Photos.
photos = [None, None, None]
no_photos = True
for idx, photo in enumerate(place.photo_set.all()):
if idx < 3:
photos[idx] = photo
no_photos = False
else:
break
# Render page
return render_to_response('yplaces/place.html',
{ 'place': place,
'rating': place.get_rating(),
'photos': photos, 'no_photos': no_photos,
'reviews_api_url': settings.HOST_URL + reverse(settings.YPLACES['api_url_namespace'] + ':yplaces:reviews', args=[place.pk]),
'host_url': settings.HOST_URL },
context_instance=RequestContext(request))
@login_required
def edit(request, pk, slug):
"""
Edit Place's information.
"""
# Only staff members.
if not request.user.is_staff:
raise Http404
# Fetch Place with given ID.
try:
place = Place.objects.get(pk=pk)
except ObjectDoesNotExist:
raise Http404
# Render page.
return render_to_response('yplaces/edit.html',
{ 'place': place,
'title': _('Edit Place'),
'api_url': settings.HOST_URL + reverse(settings.YPLACES['api_url_namespace'] + ':yplaces:id', args=[place.pk]),
'action': 'PUT' },
context_instance=RequestContext(request))
def photos(request, pk, slug):
"""
Renders the Place's photo gallery.
"""
# Fetch Place with given ID.
try:
place = Place.objects.get(pk=pk, active=True)
except ObjectDoesNotExist:
raise Http404
# Render page.
return render_to_response('yplaces/photos.html',
{ 'place': place,
'rating': place.get_rating(),
'photos_api_url': settings.HOST_URL + reverse(settings.YPLACES['api_url_namespace'] + ':yplaces:photos', args=[place.pk]) },
context_instance=RequestContext(request))
|
|
'''
Created on Dec 11, 2013
@author: gpratt
'''
from collections import defaultdict, Counter
from itertools import groupby, permutations
from functools import partial
from optparse import OptionParser
import os
import HTSeq
import pandas as pd
import pysam
import tempfile
import pysam
import pybedtools
from collections import defaultdict, Counter
from itertools import permutations
import pandas as pd
def editor(read):
if read.strand == "-":
read.start = read.stop - 1
else:
read.stop = read.start + 1
return read
#assumes properly sorted, barcode collapsed reads, otherwise everything breaks
def tags_to_dict(tags):
"""
Converts the tag set to a dictionary
"""
return {key: value for key, value in tags}
def append_read_group(base, read_group):
try:
base.add(read_group)
return base
except:
return set([read_group])
def bedtools_count_contamination(bam_file):
#Hopefully a faster way of doing this...
handle, fn = tempfile.mkstemp()
with pysam.AlignmentFile(bam_file, 'rb') as reads:
with pysam.AlignmentFile(fn, 'wb', template=reads) as writer:
for x, read in enumerate(reads):
read.qname = read.qname + ":" + read.tags[-1][1]
writer.write(read)
bedtool = pybedtools.BedTool(fn).bam_to_bed()
bedtool = bedtool.each(editor).sort().saveas()
#can be a bit sloppy because I'm processing base by base
#should build in strandedness, but this should be good enough, probablbity is still low...
total = Counter()
combinations = defaultdict(Counter)
prev_base = 0
base_dict = defaultdict(list)
for x, interval in enumerate(bedtool):
#reset counter, and process
if interval.start != prev_base:
for randomer, read_groups in base_dict.items():
for rg1, rg2 in permutations(read_groups, 2):
combinations[rg1][rg2] += 1
#processing code goes here
base_dict = defaultdict(list)
name = interval.name.split(":")
randomer, read_group = name[0], name[-1]
base_dict[randomer].append(read_group)
total[read_group] += 1
prev_base = interval.start
return pd.DataFrame(combinations), pd.Series(total)
def genome_count_contamination(bam_file):
#Load all values into dict
reads_at_location = defaultdict(lambda: HTSeq.GenomicArray("auto", typecode="O", stranded=True))
reads = HTSeq.BAM_Reader(bam_file)
for read in reads:
try:
randomer = read.read.name.split(":")[0]
read_group = tags_to_dict(read.optional_fields)['RG']
reads_at_location[randomer][read.iv].apply(partial(append_read_group, read_group=read_group))
except AttributeError as e:
pass
#Get them out
combinations = defaultdict(Counter)
total = Counter()
for randomer in reads_at_location.keys():
#print reads_at_location[randomer]
for genomic_interval, read_groups in reads_at_location[randomer].steps():
if not read_groups:
continue
for read_group in read_groups:
total[read_group] += 1
for rg1, rg2 in permutations(read_groups, 2):
combinations[rg1][rg2] += 1
return pd.DataFrame(combinations), pd.Series(total)
def mark_overlap_for_base(reads):
"""
Given a iterable of reads returns a boolean matrix of read groups by barcodes of what readgroup has what barcode
"""
counts = defaultdict(dict)
for read in reads:
randomer = read.qname.split(":")[0]
read_group = tags_to_dict(read.tags)["RG"]
counts[randomer][read_group] = True
return pd.DataFrame(counts).fillna(False)
def reads_starting_at_location(reads, loc):
"""
given list of reads returns all reads starting a given loc (sepereated by positive and negative strand that start at a given location)
"""
pos_reads = []
neg_reads = []
for read in reads:
read_start = read.positions[-1] if read.is_reverse else read.positions[0]
if read_start == loc:
if read.is_reverse:
neg_reads.append(read)
else:
pos_reads.append(read)
return pos_reads, neg_reads
def count_contamination(bam_file):
"""
Given bam file with readgroups and barcodes counts numboer of overlapping barcodes per readgroup
return dataframe of overlaps and series of total counts
"""
combinations = defaultdict(Counter)
total = Counter()
with pysam.Samfile(bam_file) as bam_group:
#pipeup ignores multimapped reads by default, be careful
for base in bam_group.pileup():
#given a base, get all reads that start there, either on the positive or negative strands
read_start = base.pos
reads = (read.alignment for read in base.pileups)
pos_reads, neg_reads = reads_starting_at_location(reads, read_start)
for read in pos_reads:
read_group = tags_to_dict(read.tags)["RG"]
total[read_group] += 1
for read in neg_reads:
read_group = tags_to_dict(read.tags)["RG"]
total[read_group] += 1
pos_overlap = mark_overlap_for_base(pos_reads)
neg_overlap = mark_overlap_for_base(neg_reads)
#Count both the negative and positive overlap
for randomer in pos_overlap.columns:
for rg1, rg2 in permutations(pos_overlap[pos_overlap[randomer]].index, 2):
combinations[rg1][rg2] += 1
for randomer in neg_overlap.columns:
for rg1, rg2 in permutations(neg_overlap[neg_overlap[randomer]].index, 2):
combinations[rg1][rg2] += 1
return pd.DataFrame(combinations), pd.Series(total)
##Need to write up testing code for this
def correlation(bam_1, bam_2, outbam):
"""
bam_1: path to indexed bam file
bam_2: path to indexed bam file
returns number of matched reads between first and second bam file
and total number of reads in the first bam file
"""
total_count = 0
matched_count = 0
with pysam.Samfile(bam_1) as bam_1, pysam.Samfile(bam_2) as bam_2:
outbam = pysam.Samfile(outbam, 'wh', bam_1)
for read in bam_1:
total_count += 1
read_start = read.positions[-1] if read.is_reverse else read.positions[0]
fetched_reads = list(bam_2.fetch(bam_1.getrname(read.tid),
read_start,
read_start + 1))
for fetched_read in fetched_reads:
fetched_start = fetched_read.positions[-1] if fetched_read.is_reverse else fetched_read.positions[0]
if read.qname.split(":")[0] == fetched_read.qname.split(":")[0] and read_start == fetched_start:
matched_count += 1
outbam.write(read)
break
outbam.close()
return matched_count, total_count
if __name__ == '__main__':
usage = """ detects cross contamination between two samples demultiplexed
via the demultiplex barcoded fastq script, after alignment """
parser = OptionParser(usage)
parser.add_option("-f", "--bam_1", dest="bam_1",
help="first (barcoded) bam file to look for cross contamination with")
parser.add_option("-b", "--bam_2", dest="bam_2",
help="second (barcoded) bam file to look for cross contamination with")
parser.add_option("-o", "--out_file", dest="out_file")
(options, args) = parser.parse_args()
outbam = pysam.Samfile(os.path.splitext(options.out_file)[0] + ".sam", "wh", pysam.Samfile(options.bam_1))
matched_count, total_count = correlation(options.bam_1, options.bam_2, outbam)
outbam.close()
name1 = os.path.basename(".".join(options.bam_1.split(".")[:2]))
name2 = os.path.basename(".".join(options.bam_2.split(".")[:2]))
with open(os.path.join(options.out_file), 'w') as outfile:
outfile.write("\t".join(map(str, [name1, name2, matched_count, total_count])) + "\n")
|
|
# Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright Lockheed Martin 2012
#
# Client Library for laikaboss framework.
#
########################################
import os, sys
import zlib, cPickle as pickle
import logging
from random import randint
import json
import traceback
import uuid
from laikaboss.objectmodel import QuitScanException
from copy import deepcopy as clone_object
REQ_TYPE_PICKLE = '1'
REQ_TYPE_PICKLE_ZLIB = '2'
def dispositionFromResult(result):
'''
This function examines the DISPOSITIONER module metadata in the scan results
to determine disposition.
'''
try:
matches = result.files[result.rootUID].moduleMetadata['DISPOSITIONER']['Disposition']['Matches']
return sorted(matches)
except QuitScanException:
raise
except:
logging.debug("Unable to disposition the result")
return ['Error']
def finalDispositionFromResult(result):
'''
This function examines the DISPOSITIONER module metadata in the scan results
to determine disposition.
'''
try:
return result.files[result.rootUID].moduleMetadata['DISPOSITIONER']['Disposition']['Result']
except QuitScanException:
raise
except:
logging.debug("Unable to disposition the result")
return ['Error']
def getAttachmentList(result):
children = []
rootObject = None
for uid, scanObject in result.files.iteritems():
if not scanObject.parent:
rootObject = uid
for uid, scanObject in result.files.iteritems():
if scanObject.parent == rootObject:
if scanObject.filename:
children.append(scanObject.filename)
return children
def flagRollup(result):
'''
This function takes a fully populated result object and returns a list of flags
which has been sorted and deduplicated.
Arguments:
result -- a fully populated scan result set
Returns:
A sorted/unique list of all flags in the result
'''
flag_rollup = []
for id, scanObject in result.files.iteritems():
flag_rollup.extend(scanObject.flags)
flag_rollup = set(flag_rollup)
return sorted(flag_rollup)
def getRootObject(result):
'''
Returns the ScanObject in a result set that contains no parent (making it the root).
Arguments:
result -- a fully populated scan result set
Returns:
The root ScanObject for the result set.
'''
return result.files[result.rootUID] #ScanObject type
def get_scanObjectUID(scanObject):
'''
Get the UID for a ScanObject instance.
Arguments:
scanObject -- a ScanObject instance
Returns:
A string containing the UID of the object.
'''
return scanObject.uuid
def getJSON(result):
'''
This function takes the result of a scan, and returns the JSON output.
Arguments:
result -- a fully populated scan result set.
Returns:
A string representation of the json formatted output.
'''
resultText = ''
# Build the results portion of the log record. This will be a list of
# dictionaries, where each dictionary is the result of a single buffer's
# scan. The list will contain all of the buffers that were exploded from
# a root buffer's scan in the order they were processed.
buffer_results = [None] * len(result.files)
for scan_object in result.files.itervalues():
# Do not damage the original result -> clone
buffer_result = clone_object(scan_object.__dict__)
# Don't log buffers here, just metadata
if "buffer" in buffer_result:
del buffer_result["buffer"]
buffer_results[buffer_result["order"]] = buffer_result
# Construct the log record with fields useful for log processing and
# routing
log_record = {
'source': result.source,
'scan_result': buffer_results
}
resultText = json.dumps(log_record)
return resultText
class Client:
_CONTEXT = None
_CLIENT = None
_TIMEOUT = None
_POLL = None
_BROKER_HOST = None
_SSH_HOST = None
_USE_SSH = None
_REQUEST_TYPE = None
def __init__(self, brokerHost, context=None, useSSH=False, sshHost=None, async=False, useGevent=False, requestType=REQ_TYPE_PICKLE_ZLIB):
# Initialize Attributes
if useGevent:
#logging.debug("Using Gevent_zmq")
#from gevent_zeromq import zmq
import zmq.green as zmq
else:
import zmq
self.zmq = zmq
self._BROKER_HOST = brokerHost
self._SSH_HOST = sshHost
self._USE_SSH = useSSH
self._POLL = zmq.Poller()
self._ID = randint(1,999)
self._ASYNC = async
self._REQUEST_TYPE = requestType
if context is not None:
self._CONTEXT = context
else:
self._CONTEXT = self.zmq.Context()
# Connect Client
self._connect()
# END __init__
def close(self):
try:
self._disconnect()
self._CONTEXT.term()
except:
raise
def _connect(self):
# Get Context
if self._ASYNC:
self._CLIENT = self._CONTEXT.socket(self.zmq.PUSH)
else:
self._CLIENT = self._CONTEXT.socket(self.zmq.REQ)
# Check if SSH is requested
if self._USE_SSH:
from zmq import ssh
# Ensure there exists an SSH Host
if self._SSH_HOST:
try:
ssh.tunnel_connection(self._CLIENT, self._BROKER_HOST, self._SSH_HOST)
except RuntimeError as e:
raise e
else:
raise AttributeError("No SSH Host.")
else:
self._CLIENT.connect(self._BROKER_HOST)
# Register Poll
self._POLL.register(self._CLIENT, self.zmq.POLLIN)
# END _connect
def _disconnect(self):
self._CLIENT.setsockopt(self.zmq.LINGER, 0)
self._CLIENT.close()
self._POLL.unregister(self._CLIENT)
# END _disconnect
def _send_recv(self, externalObject):
# Serialize and compress the externalObject
zmo = pickle.dumps(externalObject, pickle.HIGHEST_PROTOCOL)
if self._REQUEST_TYPE == REQ_TYPE_PICKLE_ZLIB:
zmo = zlib.compress(zmo)
# Send (if _TIMEOUT=None, there is unlimited time)
try:
self._CLIENT.send_multipart([self._REQUEST_TYPE, '', zmo])
# An error will occur if the ZMQ socket is in the wrong state
# In this case, we disconnect and then reconnect before retrying
#except self.zmq.core.error.ZMQError:
except:
logging.debug("ID %i : ZMQ socket in wrong state, reconnecting." % self._ID)
self._disconnect()
self._connect()
self._CLIENT.send_multipart([self._REQUEST_TYPE, '', zmo])
socks = dict(self._POLL.poll(self._TIMEOUT))
if socks.get(self._CLIENT) == self.zmq.POLLIN:
# Recieve reply
reply = self._CLIENT.recv()
logging.debug("ID %i : got reply" % self._ID)
# Check for non-empty reply
if not reply:
return None
else:
return None
# Decompress and deserialize reply
if self._REQUEST_TYPE == REQ_TYPE_PICKLE_ZLIB:
reply = zlib.decompress(reply)
result = pickle.loads(reply)
# Return the result
return result
# END _send_recv
def _send_only(self, externalObject, timeout=-1):
logging.debug("AED Async Send Timeout: %s" % timeout)
# Serialize and compress the externalObject
zmo = pickle.dumps(externalObject, pickle.HIGHEST_PROTOCOL)
if self._REQUEST_TYPE == REQ_TYPE_PICKLE_ZLIB:
zmo = zlib.compress(zmo)
# Send (if _TIMEOUT=None, there is unlimited time)
try:
if timeout:
tracker = self._CLIENT.send_multipart([self._REQUEST_TYPE, '', zmo], copy=False, track=True)
tracker.wait(timeout)
else:
self._CLIENT.send_multipart([self._REQUEST_TYPE, '', zmo])
# An error will occur if the ZMQ socket is in the wrong state
# In this case, we disconnect and then reconnect before retrying
# If the second attempt fails, return False
except self.zmq.NotDone:
logging.debug("Message sending timed out...")
return False
except:
try:
logging.debug("ID %i : ZMQ socket in wrong state, reconnecting" % self._ID)
self._disconnect()
self._connect()
if timeout:
tracker = self._CLIENT.send_multipart([self._REQUEST_TYPE, '', zmo], copy=False, track=True)
tracker.wait(timeout)
else:
self._CLIENT.send_multipart([self._REQUEST_TYPE, '', zmo])
except:
return False
# Return the result
return True
# END _send_only
def send(self, externalObject, retry=0, timeout=None):
self._TIMEOUT = timeout
retriesLeft = retry
result = None
try:
if self._ASYNC:
result = self._send_only(externalObject, timeout=self._TIMEOUT)
else:
result = self._send_recv(externalObject)
while retriesLeft and not result:
logging.debug("ID %i : No response from broker, retrying..." % self._ID)
self._disconnect()
self._connect()
if self._ASYNC:
result = self._send_only(externalObject, timeout=self._TIMEOUT)
else:
result = self._send_recv(externalObject)
retriesLeft -= 1
return result
except KeyboardInterrupt:
print "Interrupted by user, exiting..."
sys.exit()
except:
raise
# END send
|
|
from __pyjamas__ import JS
#from __future__ import division
#from warnings import warn as _warn
#from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from binascii import hexlify as _hexlify
#__all__ = ["Random","seed","random","uniform","randint","choice","sample",
# "randrange","shuffle","normalvariate","lognormvariate",
# "expovariate","vonmisesvariate","gammavariate","triangular",
# "gauss","betavariate","paretovariate","weibullvariate",
# "getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
# "SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
# """Initialize internal state from hashable object.
# None or no argument seeds from current time or from an operating
# system specific randomness source if available.
# If a is not None or an int or long, hash(a) is used instead.
# """
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
super(Random, self).seed(a)
self.gauss_next = None
def getstate(self):
# """Return internal state; can be passed to setstate() later."""
return self.VERSION, super(Random, self).getstate(), self.gauss_next
def setstate(self, state):
# """Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super(Random, self).setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple( long(x) % (2**32) for x in internalstate )
except ValueError, e:
raise TypeError, e
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None,
maxwidth=1L<<BPF):
# """Choose a random item from range(start, stop[, step]).
# This fixes the problem with randint() which includes the
# endpoint; in Python this is usually not what you want.
# Do not supply the 'int', 'default', and 'maxwidth' arguments.
# """
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is default:
if istart > 0:
if istart >= maxwidth:
return self._randbelow(istart)
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= maxwidth:
return int(istart + self._randbelow(width))
return int(istart + int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= maxwidth:
return istart + istep*self._randbelow(n)
return istart + istep*int(self.random() * n)
def randint(self, a, b):
# """Return random integer in range [a, b], including both end points.
# """
return self.randrange(a, b+1)
def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF):
#def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF,
# _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
# """Return a random int in the range [0,n)
# Handles the case where n has more bits than returned
# by a single call to the underlying generator.
# """
try:
getrandbits = self.getrandbits
except AttributeError:
pass
else:
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
# This assures that the two methods correspond.
#if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
if True:
k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = getrandbits(k)
while r >= n:
r = getrandbits(k)
return long(r)
#if n >= _maxwidth:
# _warn("Underlying random() generator does not supply \n"
# "enough bits to choose from a population range this large")
return int(self.random() * n)
## -------------------- sequence methods -------------------
def choice(self, seq):
# """Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None, int=int):
# """x, random=random.random -> shuffle list x in place; return None.
# Optional arg random is a 0-argument function returning a random
# float in [0.0, 1.0); by default, the standard random.random.
# """
if random is None:
random = self.random
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
# """Chooses k unique random elements from a population sequence.
# Returns a new list containing elements from the population while
# leaving the original population unchanged. The resulting list is
# in selection order so that all sub-slices will also be valid random
# samples. This allows raffle winners (the sample) to be partitioned
# into grand prize and second place winners (the subslices).
#
# Members of the population need not be hashable or unique. If the
# population contains repeats, then each occurrence is a possible
# selection in the sample.
#
# To choose a sample in a range of integers, use xrange as an argument.
# This is especially fast and space efficient for sampling from a
# large population: sample(xrange(10000000), 60)
# """
# XXX Although the documentation says `population` is "a sequence",
# XXX attempts are made to cater to any iterable with a __len__
# XXX method. This has had mixed success. Examples from both
# XXX sides: sets work fine, and should become officially supported;
# XXX dicts are much harder, and have failed in various subtle
# XXX ways across attempts. Support for mapping types should probably
# XXX be dropped (and users should pass mapping.keys() or .values()
# XXX explicitly).
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
n = len(population)
if not 0 <= k <= n:
raise ValueError, "sample larger than population"
__random = self.random
_int = int
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize or hasattr(population, "keys"):
# An n-length list is smaller than a k-length set, or this is a
# mapping type so the other algorithm wouldn't work.
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(__random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
selected = set()
selected_add = selected.add
for i in xrange(k):
j = _int(__random() * n)
while j in selected:
j = _int(__random() * n)
selected_add(j)
result[i] = population[j]
except (TypeError, KeyError): # handle (at least) sets
if isinstance(population, list):
raise
return self.sample(tuple(population), k)
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
# """Get a random number in the range [a, b)."""
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
# """Triangular distribution.
#
# Continuous distribution bounded by given lower and upper limits,
# and having a given mode value in-between.
#
# http://en.wikipedia.org/wiki/Triangular_distribution
# """
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
# """Normal distribution.
# mu is the mean, and sigma is the standard deviation.
# """
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
__random = self.random
while 1:
u1 = __random()
u2 = 1.0 - __random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
# """Log normal distribution.
# If you take the natural logarithm of this distribution, you'll get a
# normal distribution with mean mu and standard deviation sigma.
# mu can have any value, and sigma must be greater than zero.
# """
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
# """Exponential distribution.
# lambd is 1.0 divided by the desired mean. It should be
# nonzero. (The parameter would be called "lambda", but that is
# a reserved word in Python.) Returned values range from 0 to
# positive infinity if lambd is positive, and from negative
# infinity to 0 if lambd is negative.
# """
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
__random = self.random
u = __random()
while u <= 1e-7:
u = __random()
return -_log(u)/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
# """Circular data distribution.
# mu is the mean angle, expressed in radians between 0 and 2*pi, and
# kappa is the concentration parameter, which must be greater than or
# equal to zero. If kappa is equal to zero, this distribution reduces
# to a uniform random angle over the range 0 to 2*pi.
# """
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
__random = self.random
if kappa <= 1e-6:
return TWOPI * __random()
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
r = (1.0 + b * b)/(2.0 * b)
while 1:
u1 = __random()
z = _cos(_pi * u1)
f = (1.0 + r * z)/(r + z)
c = kappa * (r - f)
u2 = __random()
if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
break
u3 = __random()
if u3 > 0.5:
theta = (mu % TWOPI) + _acos(f)
else:
theta = (mu % TWOPI) - _acos(f)
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
# """Gamma distribution. Not the gamma function!
# Conditions on the parameters are alpha > 0 and beta > 0.
# """
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
__random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = __random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - __random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = __random()
while u <= 1e-7:
u = __random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = __random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = __random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
# """Gaussian distribution.
# mu is the mean, and sigma is the standard deviation. This is
# slightly faster than the normalvariate() function.
# Not thread-safe without a lock around calls.
# """
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
__random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = __random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - __random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
# """Beta distribution.
# Conditions on the parameters are alpha > 0 and beta > 0.
# Returned values range between 0 and 1.
# """
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
# """Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
# """Weibull distribution.
# alpha is the scale parameter and beta is the shape parameter.
# """
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## -------------------- Wichmann-Hill -------------------
class WichmannHill(Random):
VERSION = 1 # used by getstate/setstate
def seed(self, a=None):
# """Initialize internal state from hashable object.
#
# None or no argument seeds from current time or from an operating
# system specific randomness source if available.
#
# If a is not None or an int or long, hash(a) is used instead.
#
# If a is an int or long, a is used directly. Distinct values between
# 0 and 27814431486575L inclusive are guaranteed to yield distinct
# internal states (this guarantee is specific to the default
# Wichmann-Hill generator).
# """
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
if not isinstance(a, (int, long)):
a = hash(a)
a, x = divmod(a, 30268)
a, y = divmod(a, 30306)
a, z = divmod(a, 30322)
self._seed = int(x)+1, int(y)+1, int(z)+1
self.gauss_next = None
def random(self):
# """Get the next random number in the range [0.0, 1.0)."""
# Wichman-Hill random number generator.
#
# Wichmann, B. A. & Hill, I. D. (1982)
# Algorithm AS 183:
# An efficient and portable pseudo-random number generator
# Applied Statistics 31 (1982) 188-190
#
# see also:
# Correction to Algorithm AS 183
# Applied Statistics 33 (1984) 123
#
# McLeod, A. I. (1985)
# A remark on Algorithm AS 183
# Applied Statistics 34 (1985),198-200
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
self._seed = x, y, z
# END CRITICAL SECTION
# Note: on a platform using IEEE-754 double arithmetic, this can
# never return 0.0 (asserted by Tim; proof too long for a comment).
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def getstate(self):
# """Return internal state; can be passed to setstate() later."""
return self.VERSION, self._seed, self.gauss_next
def setstate(self, state):
# """Restore internal state from object returned by getstate()."""
version = state[0]
if version == 1:
version, self._seed, self.gauss_next = state
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
# """Act as if n calls to random() were made, but quickly.
#
# n is an int, greater than or equal to 0.
#
# Example use: If you have 2 threads and know that each will
# consume no more than a million random numbers, create two Random
# objects r1 and r2, then do
# r2.setstate(r1.getstate())
# r2.jumpahead(1000000)
# Then r1 and r2 will use guaranteed-disjoint segments of the full
# period.
# """
if not n >= 0:
raise ValueError("n must be >= 0")
x, y, z = self._seed
x = int(x * pow(171, n, 30269)) % 30269
y = int(y * pow(172, n, 30307)) % 30307
z = int(z * pow(170, n, 30323)) % 30323
self._seed = x, y, z
def __whseed(self, x=0, y=0, z=0):
# """Set the Wichmann-Hill seed from (x, y, z).
# These must be integers in the range [0, 256).
# """
if not type(x) == type(y) == type(z) == int:
raise TypeError('seeds must be integers')
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError('seeds must be in range(0, 256)')
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
self.gauss_next = None
def whseed(self, a=None):
# """Seed from hashable object's hash code.
# None or no argument seeds from current time. It is not guaranteed
# that objects with distinct hash codes lead to distinct internal
# states.
# This is obsolete, provided for compatibility with the seed routine
# used prior to Python 2.1. Use the .seed() method instead.
# """
if a is None:
self.__whseed()
return
a = hash(a)
a, x = divmod(a, 256)
a, y = divmod(a, 256)
a, z = divmod(a, 256)
x = (x + a) % 256 or 1
y = (y + a) % 256 or 1
z = (z + a) % 256 or 1
self.__whseed(x, y, z)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
# """Alternate random number generator using sources provided
# by the operating system (such as /dev/urandom on Unix or
# CryptGenRandom on Windows).
#
# Not available on all systems (see os.urandom() for details).
# """
def random(self):
#"""Get the next random number in the range [0.0, 1.0)."""
return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
def getrandbits(self, k):
#"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
bytes = (k + 7) // 8 # bits / 8 and rounded up
x = long(_hexlify(_urandom(bytes)), 16)
return x >> (bytes * 8 - k) # trim excess bits
def _stub(self, *args, **kwds):
#"Stub method. Not used for a system random number generator."
return None
seed = jumpahead = _stub
def _notimplemented(self, *args, **kwds):
#"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print n, 'times', func.__name__
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = getattr(_inst, 'seed')
random = getattr(_inst, 'random')
uniform = getattr(_inst, 'uniform')
triangular = getattr(_inst, 'triangular')
randint = getattr(_inst, 'randint')
choice = getattr(_inst, 'choice')
randrange = getattr(_inst, 'randrange')
sample = getattr(_inst, 'sample')
shuffle = getattr(_inst, 'shuffle')
normalvariate = getattr(_inst, 'normalvariate')
lognormvariate = getattr(_inst, 'lognormvariate')
expovariate = getattr(_inst, 'expovariate')
vonmisesvariate = getattr(_inst, 'vonmisesvariate')
gammavariate = getattr(_inst, 'gammavariate')
gauss = getattr(_inst, 'gauss')
betavariate = getattr(_inst, 'betavariate')
paretovariate = getattr(_inst, 'paretovariate')
weibullvariate = getattr(_inst, 'weibullvariate')
getstate = getattr(_inst, 'getstate')
setstate = getattr(_inst, 'setstate')
jumpahead = getattr(_inst, 'jumpahead')
getrandbits = getattr(_inst, 'getrandbits')
if __name__ == '__main__':
_test()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Blog.has_artists'
db.add_column('blogs_blog', 'has_artists',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Blog.has_artists'
db.delete_column('blogs_blog', 'has_artists')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'block_css': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_footer': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_header': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_left': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_middle': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_navbar': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_bottom': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_middle_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_middle_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_top': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_single_left': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_subscribe_button': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_subscribe_text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_title': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'has_artists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bootblog': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_color': ('django.db.models.fields.TextField', [], {'default': "'#ff7f00'", 'max_length': '10'}),
'main_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'}),
'top_level_cat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
|
#!/usr/bin/python3
# Copyright 2019 by Jeff Woods
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import sys
# ### Entity Generator ###
# The EntityGenerator class serves as a container for Elements. When the
# create() method is called on this object, it walks through its list of
# children calling create() on each of them in turn.
# ### Elements ###
# There are three main types of elements. These elements are all derrived
# from the EntityElement class, and each may be added as a child to the
# EntityGenerator object (above). These Element types include:
#
# - A SimpleElement. This is an Element which creates nothing more than
# a simple value, such as a string or integer.
# - An ArrayElement. This Element is implemented as a list of homogenous
# items. The items INSIDE this container, which are created by the
# generator passed in the object initialization, may be of any supported
# Element type (Simple, Array, or Dict). The number of items generated
# inside the array will be repeated as described by the count_fn param.
# - A DictElement. This Element returns a dict populated with key/value
# pairs. This is the most sophisticated Element, supporting the nesting
# of child Elements (see below).
#
# Each type of Element supports one or more generators. A sample of
# generators might include gender (simple), dob (simple), name (dict),
# address (dict), and ssn (simple). An entity would almost certainly have
# more than one address, so we could employ an ArrayElement to enclose the
# address dict.
# ### Child Elements ###
# Outside of the EntityGenerator class, only Elements based on the DictElement
# class may have children. This makes sense because:
# - the entity to which the child is added must be a container of some
# sort. Of the three Element types, only Dict and Array qualify.
# - An ArrayElement gets filled with a consistent type of element (names,
# addresses, trades, etc).
class EntityGenerator(object):
'''
EntityGenerator maintains the structured relationships between initialized
EntityElements. It is responsible for initializing the root data element
and walking the tree of generator Entities. EntityElements are added to
the EntityGenerator much like nodes might be added to an XML document when
working with DOM.
'''
def __init__(self):
self.data = None # the current data object being built
# the fact that children is an array is VERY IMPORTANT. The order
# in which the elements are created must be guaranteed so that
# generators which reference other elements can be guaranteed that
# referenced values exists. As an example, assume that we have name
# and gender elements, with the generation of the name depending on
# the value selected for gender. It wouldn't do much good to generate
# the name before the gender.
self.children = [] # a list of child generators (populate data)
return
def addElement(self, elem, label = None):
if label is None:
label = elem.name
elem.setRoot(self)
x = (label, elem)
self.children.append(x)
return
def create(self, **kwargs):
self.data = {}
for elem in self.children:
e_nam = elem[0]
e_val = elem[1].create(**kwargs)
self.data[e_nam] = e_val
return self.data
def getValueByPath(self, path):
'''
Traverse the data element being generated and return the value
matching the given path. If the path cannot be parsed, we will assume
that the given value was intended to be a literal value. If we
encounter an array during our traversal, we will always select or
navigate through the last element in the list (assuming it is the
most recently generated).
'''
parts = path.split('/')
if len(parts[0]) == 0:
element = self.data
for i in parts[1:]: # skip the first (empty) part
if type(element) is list:
element = element[-1]
element = element[i]
return element
return path # can't parse? return the whole dang thing as literal
@staticmethod
def defaultDataPath():
p = os.path.dirname(__file__)
datapath = os.path.join(p, 'data')
return datapath
class EntityElement(object):
'''
Base class for all Entity Elements. An EntityElement may be added to
either an Entity (as an element on the root of the element) or
as a child of another EntityElement.
Constructor accepts the arguments:
name - the name of the element in generated output (dict key)
count - the number of times this element will be repeated. This
may be a callable (function) or an integer value.
generator - the generator class used to create data
params - parameters to be passed to each create() call. The list
of valid parameters is relative to the generator being used
root - a reference to EntityGenerator object used to create this
data entity.
'''
def __init__(self, name = None,
generator = None,
params = None,
root = None):
self.name = name
self.root = root
self.params = params
self.generator = None
self.mods = None
return
def setRoot(self, root):
self.root = root
@staticmethod
def count_const_fn(x):
return lambda: x
@staticmethod
def count_rand_fn(max, min=0):
if max <= min:
raise ValueError('min must be less than max in count_rand_fn()')
return lambda: int(((max - min) * random.random()) + min)
@staticmethod
def count_norm_fn(mean=0.0, stdev=1.0, integer=False):
if integer is False:
return lambda: random.normalvariate(mu=mean, sigma=stdev)
return lambda: int(random.normalvariate(mu=mean, sigma=stdev))
class ArrayElement(EntityElement):
'''
'''
def __init__(self, generator = None,
count_fn = None,
count = None,
**kwargs):
EntityElement.__init__(self, **kwargs)
# TODO - an ArrayElement may not have children
# count must be a callable function, but we can also accept an integer.
# If given an integer, we'll convert it using a lambda function which
# returns the appropriate value.
if not callable(count_fn):
if type(count) is int:
count_fn = lambda: count
else:
raise ValueError('Invalid type for element count')
self.count = count_fn
self.count_fn = count_fn
self.generator = generator
return
def create(self, **kwargs):
data = []
if self.count_fn is None: return data
c = self.count_fn()
while c > 0:
e = self.generator.create(root = data)
data.append(e)
c -= 1
return data
class DictElement(EntityElement):
'''
A DictElement may have children of any type.
'''
def __init__(self, **kwargs):
EntityElement.__init__(self, **kwargs)
self.children = None
return
def addElement(self, elem, label = None):
if not isinstance(elem, EntityElement):
raise ValueError('element not EntityElement type in addElement')
if label is None:
label = elem.name
# the fact that children is an array is VERY IMPORTANT. The order
# in which the elements are created must be guaranteed so that
# parameters which reference other elements can be guaranteed that
# referenced values exists.
self.children = []
elem.setRoot(self.root)
x = (label, elem)
self.children.append(x)
return
def addChildren(self, data, **kwargs):
if self.children is None: return None
for child in self.children:
enam = child[0]
egen = child[1]
data[enam] = egen.create()
return
class SimpleElement(EntityElement):
'''
A SimpleElement cannot have children.
'''
def __init__(self, **kwargs):
EntityElement.__init__(self, **kwargs)
# TODO - a SimpleElement may not have children
return
def create(self, **kwargs):
return
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common utilities helpful for processing US census data.
"""
import csv
import io
import json
import os
import zipfile
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('zip_path', None,
'Path to zip file downloaded from US Census')
flags.DEFINE_string('csv_path', None,
'Path to csv file downloaded from US Census')
flags.DEFINE_list('csv_list', None,
'List of paths to csv files downloaded from US Census')
flags.DEFINE_string('spec', None, 'Path to config spec JSON file')
flags.DEFINE_boolean('get_tokens', False,
'Produce a list of tokens from the input file/s')
flags.DEFINE_boolean('get_columns', False,
'Produce a list of columns from the input file/s')
flags.DEFINE_boolean(
'get_ignored_columns', False,
'Produce a list of columns ignored from the input file/s according to spec')
flags.DEFINE_boolean('ignore_columns', False,
'Account for columns to be ignored according to the spec')
flags.DEFINE_boolean('is_metadata', False,
'Parses the file assuming it is _metadata_ type file')
flags.DEFINE_string('delimiter', '!!',
'The delimiter to extract tokens from column name')
def get_tokens_list_from_zip(zip_file_path: str,
check_metadata: bool = False,
print_details: bool = False,
delimiter: str = '!!') -> list:
"""Function to get list of all tokens given a zip file downloaded from the data.census.gov site.
Args:
zip_file_path: Path of the zip file downloaded from the data.census.gov site
check_metadata: zip file contains 2 types of files:
- metadata
- data_overlays
User can select which type of file to use depending on the size of data.
print_details: If set, prints the file name of each file and new tokens found within it to stdout.
delimiter: delimiter seperating tokens within single column name string.
Returns:
List of tokens present in the csv files within the zip file.
"""
zip_file_path = os.path.expanduser(zip_file_path)
tokens = []
with zipfile.ZipFile(zip_file_path) as zf:
for filename in zf.namelist():
temp_flag = False
if check_metadata:
if '_metadata_' in filename:
temp_flag = True
elif '_data_' in filename:
temp_flag = True
if temp_flag:
if print_details:
print(
'----------------------------------------------------')
print(filename)
print(
'----------------------------------------------------')
with zf.open(filename, 'r') as data_f:
csv_reader = csv.reader(io.TextIOWrapper(data_f, 'utf-8'))
for row in csv_reader:
if check_metadata:
# metadata file has variable ID as the first column and
# the column name corresponding to it as the second column.
for tok in row[1].split(delimiter):
if tok not in tokens:
tokens.append(tok)
if print_details:
print(tok)
else:
# The entire second row is name of the columns.
if csv_reader.line_num == 2:
for column_name in row:
for tok in column_name.split(delimiter):
if tok not in tokens:
tokens.append(tok)
if print_details:
print(tok)
return tokens
def token_in_list_ignore_case(token: str, list_check: list) -> bool:
"""Function that checks if the given token is in the list of tokens ignoring the case.
Args:
token: Token to be searched in the list.
list_check: List of tokens within which to search.
Returns:
Boolean value:
True if token is present in the list.
False if token is not present in the list.
"""
cmp_token = token.lower()
for tok in list_check:
if tok.lower() == cmp_token:
return True
return False
def column_to_be_ignored(column_name: str,
spec_dict: dict,
delimiter: str = '!!') -> bool:
"""Function that checks if the given column is to be ignored according to the spec.
Column is considered to be ignored if there is a full match or if `ignoreColumns`
contains token which is present within the column name.
Args:
column_name: The column name string.
spec_dict: Dict obj containing configurations for the import.
delimiter: delimiter seperating tokens within single column name string.
Returns:
Boolean value:
True if the column is to be ignored accoring to the spec.
False if column is not to be ignored accoring to the spec.
"""
ret_value = False
if 'ignoreColumns' in spec_dict:
for ignore_token in spec_dict['ignoreColumns']:
if delimiter in ignore_token and ignore_token.lower(
) == column_name.lower():
ret_value = True
elif token_in_list_ignore_case(ignore_token,
column_name.split(delimiter)):
ret_value = True
return ret_value
def remove_columns_to_be_ignored(column_name_list: list,
spec_dict: dict,
delimiter: str = '!!') -> list:
"""Function that removes columns to be ignored from a given list of columns.
Args:
column_name_list: The list of column name strings.
spec_dict: Dict obj containing configurations for the import.
delimiter: delimiter seperating tokens within single column name string.
Returns:
A list of filtered column names, with the column names to be ignored
removed from the input list.
"""
ret_list = []
for column_name in column_name_list:
if not column_to_be_ignored(column_name, spec_dict, delimiter):
ret_list.append(column_name)
return ret_list
def ignored_columns(column_name_list: list,
spec_dict: dict,
delimiter: str = '!!') -> list:
"""Function that returns list of columns to be ignored from a given list of columns.
Args:
column_name_list: The list of column name strings.
spec_dict: Dict obj containing configurations for the import.
delimiter: delimiter seperating tokens within single column name string.
Returns:
A list of column names that will be ignored according to the spec_dict.
"""
ret_list = []
for column_name in column_name_list:
if column_to_be_ignored(column_name, spec_dict, delimiter):
ret_list.append(column_name)
return ret_list
def get_tokens_list_from_column_list(column_name_list: list,
delimiter: str = '!!') -> list:
"""Function that returns list of tokens present in the list of column names.
Args:
column_name_list: The list of column name strings.
delimiter: delimiter seperating tokens within single column name string.
Returns:
A list of tokens present in the list of column names.
"""
tokens = []
for column_name in column_name_list:
for tok in column_name.split(delimiter):
if tok not in tokens:
tokens.append(tok)
return tokens
def get_spec_token_list(spec_dict: dict, delimiter: str = '!!') -> dict:
"""Function that returns list of tokens present in the import configuration spec.
Args:
spec_dict: Dict obj containing configurations for the import.
delimiter: delimiter seperating tokens within single column name string.
Returns:
A dict containing 2 key values:
token_list: list of tokens present in the spec_dict.
repeated_list: list of tokens that appear multiple times within the spec.
"""
ret_list = []
repeated_list = []
# collect the token appears in any of the pvs
for prop in spec_dict['pvs'].keys():
for token in spec_dict['pvs'][prop]:
if token in ret_list and not token.startswith('_'):
repeated_list.append(token)
elif not token.startswith('_'):
ret_list.append(token)
# collect the tokens appear in any of the population type
if 'populationType' in spec_dict:
for token in spec_dict['populationType'].keys():
if token in ret_list and not token.startswith('_'):
repeated_list.append(token)
elif not token.startswith('_'):
ret_list.append(token)
# collect the tokens that appears in measurement
if 'measurement' in spec_dict:
for token in spec_dict['measurement'].keys():
if token in ret_list and not token.startswith('_'):
repeated_list.append(token)
elif not token.startswith('_'):
ret_list.append(token)
#collect the tokens to be ignored
if 'ignoreTokens' in spec_dict:
for token in spec_dict['ignoreTokens']:
if token in ret_list and not token.startswith('_'):
repeated_list.append(token)
elif not token.startswith('_'):
ret_list.append(token)
#collect the column names that appears as ignore column or if a token appears in ignoreColumns
if 'ignoreColumns' in spec_dict:
for token in spec_dict['ignoreColumns']:
if token in ret_list and not token.startswith('_'):
repeated_list.append(token)
elif not token.startswith('_'):
ret_list.append(token)
#collect the tokens appears on any side of the enumspecialisation
if 'enumSpecializations' in spec_dict:
for token in spec_dict['enumSpecializations'].keys():
ret_list.append(token)
ret_list.append(spec_dict['enumSpecializations'][token])
#collect the total columns present and tokens in right side of denominator
if 'denominators' in spec_dict:
for column in spec_dict['denominators']:
ret_list.append(column)
for token in spec_dict['denominators'][column]:
ret_list.append(token)
return {
'token_list': list(set(ret_list)),
'repeated_list': list(set(repeated_list))
}
def find_missing_tokens(token_list: list,
spec_dict: dict,
delimiter: str = '!!') -> list:
"""Find tokens missing in the import configuration spec given a list of tokens.
Args:
token_list: List of tokens expected to appear in the spec.
This can be compiled from list of columns after discarding columns to be ignored.
spec_dict: Dict obj containing configurations for the import.
delimiter: delimiter seperating tokens within single column name string.
Returns:
List of tokens that are missing in the spec.
"""
spec_tokens = get_spec_token_list(spec_dict, delimiter)['token_list']
tokens_copy = token_list.copy()
for token in token_list:
if token_in_list_ignore_case(token, spec_tokens):
tokens_copy.remove(token)
return tokens_copy
# assumes metadata file or data with overlays file
def columns_from_CSVreader(csv_reader, is_metadata_file: bool = False) -> list:
"""Function to get list of all columns given a csv reader object.
Args:
csv_reader: csv reader object of the file to extract data from.
NOTE: It is assumed the the object is at start position.
is_metadata_file: csv file can be of 2 types:
- metadata
- data_overlays
User can select which type of file to use depending on the size of data.
Returns:
List of columns present in the csv reader object.
"""
column_name_list = []
for row in csv_reader:
if is_metadata_file:
if len(row) > 1:
column_name_list.append(row[1])
else:
if csv_reader.line_num == 2:
column_name_list = row.copy()
return column_name_list
# assumes metadata file or data with overlays file
def columns_from_CSVfile(csv_path: str, is_metadata_file: bool = False) -> list:
"""Function to get list of all columns given a csv file downloaded from the data.census.gov site.
Args:
csv_path: List of paths of the csv file downloaded from the data.census.gov site
is_metadata_file: csv file can be of 2 types:
- metadata
- data_overlays
User can select which type of file to use depending on the size of data.
Returns:
List of columns present in the csv file.
"""
csv_path = os.path.expanduser(csv_path)
csv_reader = csv.reader(open(csv_path, 'r'))
all_columns = columns_from_CSVreader(csv_reader, is_metadata_file)
return all_columns
# assumes metadata file or data with overlays file
def columns_from_CSVfile_list(
csv_path_list: list, is_metadata: list = (False)) -> list:
"""Function to get list of all columns given a list of csv files downloaded from the data.census.gov site.
Args:
csv_path_list: List of paths of the csv file downloaded from the data.census.gov site
is_metadata: csv file can be of 2 types:
- metadata
- data_overlays
User can pass the type of file for each file entry.
Returns:
List of columns present in the list of csv files.
"""
all_columns = []
if type(is_metadata) != type([]):
is_metadata = list(is_metadata)
if len(is_metadata) < len(csv_path_list):
for i in range(0, (len(csv_path_list) - len(is_metadata))):
is_metadata.append(False)
for i, cur_file in enumerate(csv_path_list):
# create csv reader
cur_file = os.path.expanduser(cur_file)
csv_reader = csv.reader(open(cur_file, 'r'))
cur_columns = columns_from_CSVreader(csv_reader, is_metadata[i])
all_columns.extend(cur_columns)
all_columns = list(set(all_columns))
return all_columns
# assumes metadata file or data with overlays file
def columns_from_zip_file(zip_path: str, check_metadata: bool = False) -> list:
"""Function to get list of all columns given a zip file downloaded from the data.census.gov site.
Args:
zip_path: Path of the zip file downloaded from the data.census.gov site
check_metadata: zip file contains 2 types of files:
- metadata
- data_overlays
User can select which type of file to use depending on the size of data.
Returns:
List of columns present in the csv files within the zip file.
"""
zip_path = os.path.expanduser(zip_path)
all_columns = []
with zipfile.ZipFile(zip_path) as zf:
for filename in zf.namelist():
temp_flag = False
if check_metadata:
if '_metadata_' in filename:
temp_flag = True
elif '_data_' in filename:
temp_flag = True
if temp_flag:
with zf.open(filename, 'r') as data_f:
csv_reader = csv.reader(io.TextIOWrapper(data_f, 'utf-8'))
cur_columns = columns_from_CSVreader(
csv_reader, check_metadata)
all_columns.extend(cur_columns)
all_columns = list(set(all_columns))
return all_columns
def get_spec_dict_from_path(spec_path: str) -> dict:
"""Read .json file containing the import configuration
Args:
spec_path: Path to the JSON file containing the configuration.
Returns:
dict obj of the configuration spec.
"""
spec_path = os.path.expanduser(spec_path)
with open(spec_path, 'r') as fp:
spec_dict = json.load(fp)
return spec_dict
def main(argv):
if not FLAGS.spec:
if FLAGS.ignore_columns:
print('ERROR: Path to spec JSON required to ignore columns')
return
else:
spec_dict = get_spec_dict_from_path(FLAGS.spec)
all_columns = []
print_columns = []
if FLAGS.zip_path:
all_columns = columns_from_zip_file(FLAGS.zip_path, FLAGS.is_metadata)
if FLAGS.ignore_columns:
print_columns = remove_columns_to_be_ignored(
all_columns, spec_dict, FLAGS.delimiter)
else:
print_columns = all_columns
elif FLAGS.csv_path:
all_columns = columns_from_CSVfile(FLAGS.csv_path, FLAGS.is_metadata)
if FLAGS.ignore_columns:
print_columns = remove_columns_to_be_ignored(
all_columns, spec_dict, FLAGS.delimiter)
else:
print_columns = all_columns
elif FLAGS.csv_list:
all_columns = columns_from_CSVfile_list(FLAGS.csv_list,
[FLAGS.is_metadata])
if FLAGS.ignore_columns:
print_columns = remove_columns_to_be_ignored(
all_columns, spec_dict, FLAGS.delimiter)
else:
print_columns = all_columns
if FLAGS.get_tokens:
print(
json.dumps(get_tokens_list_from_column_list(print_columns,
FLAGS.delimiter),
indent=2))
if FLAGS.get_columns:
print(json.dumps(print_columns, indent=2))
if FLAGS.get_ignored_columns:
print(
json.dumps(list(
set(ignored_columns(all_columns, spec_dict, FLAGS.delimiter))),
indent=2))
if __name__ == '__main__':
flags.mark_flags_as_mutual_exclusive(['zip_path', 'csv_path', 'csv_list'],
required=True)
app.run(main)
|
|
from __future__ import absolute_import, unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from wagtail.tests.testapp.models import EventPage, SimplePage, SingleEventPage
from wagtail.wagtailcore.models import Page, PageViewRestriction, Site
from wagtail.wagtailcore.signals import page_unpublished
class TestPageQuerySet(TestCase):
fixtures = ['test.json']
def test_live(self):
pages = Page.objects.live()
# All pages must be live
for page in pages:
self.assertTrue(page.live)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertTrue(pages.filter(id=homepage.id).exists())
def test_not_live(self):
pages = Page.objects.not_live()
# All pages must not be live
for page in pages:
self.assertFalse(page.live)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertTrue(pages.filter(id=event.id).exists())
def test_in_menu(self):
pages = Page.objects.in_menu()
# All pages must be be in the menus
for page in pages:
self.assertTrue(page.show_in_menus)
# Check that the events index is in the results
events_index = Page.objects.get(url_path='/home/events/')
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_in_menu(self):
pages = Page.objects.not_in_menu()
# All pages must not be in menus
for page in pages:
self.assertFalse(page.show_in_menus)
# Check that the root page is in the results
self.assertTrue(pages.filter(id=1).exists())
def test_page(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.page(homepage)
# Should only select the homepage
self.assertEqual(pages.count(), 1)
self.assertEqual(pages.first(), homepage)
def test_not_page(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.not_page(homepage)
# Should select everything except for the homepage
self.assertEqual(pages.count(), Page.objects.all().count() - 1)
for page in pages:
self.assertNotEqual(page, homepage)
def test_descendant_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.descendant_of(events_index)
# Check that all pages descend from events index
for page in pages:
self.assertTrue(page.get_ancestors().filter(id=events_index.id).exists())
def test_descendant_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.descendant_of(events_index, inclusive=True)
# Check that all pages descend from events index, includes event index
for page in pages:
self.assertTrue(page == events_index or page.get_ancestors().filter(id=events_index.id).exists())
# Check that event index was included
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_descendant_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_descendant_of(events_index)
# Check that no pages descend from events_index
for page in pages:
self.assertFalse(page.get_ancestors().filter(id=events_index.id).exists())
# As this is not inclusive, events index should be in the results
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_descendant_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_descendant_of(events_index, inclusive=True)
# Check that all pages descend from homepage but not events index
for page in pages:
self.assertFalse(page.get_ancestors().filter(id=events_index.id).exists())
# As this is inclusive, events index should not be in the results
self.assertFalse(pages.filter(id=events_index.id).exists())
def test_child_of(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.child_of(homepage)
# Check that all pages are children of homepage
for page in pages:
self.assertEqual(page.get_parent(), homepage)
def test_not_child_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_child_of(events_index)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
def test_ancestor_of(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.ancestor_of(events_index)
self.assertEqual(pages.count(), 2)
self.assertEqual(pages[0], root_page)
self.assertEqual(pages[1], homepage)
def test_ancestor_of_inclusive(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.ancestor_of(events_index, inclusive=True)
self.assertEqual(pages.count(), 3)
self.assertEqual(pages[0], root_page)
self.assertEqual(pages[1], homepage)
self.assertEqual(pages[2], events_index)
def test_not_ancestor_of(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_ancestor_of(events_index)
# Test that none of the ancestors are in pages
for page in pages:
self.assertNotEqual(page, root_page)
self.assertNotEqual(page, homepage)
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_ancestor_of_inclusive(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_ancestor_of(events_index, inclusive=True)
# Test that none of the ancestors or the events_index are in pages
for page in pages:
self.assertNotEqual(page, root_page)
self.assertNotEqual(page, homepage)
self.assertNotEqual(page, events_index)
def test_parent_of(self):
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.parent_of(events_index)
# Pages must only contain homepage
self.assertEqual(pages.count(), 1)
self.assertEqual(pages[0], homepage)
def test_not_parent_of(self):
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_parent_of(events_index)
# Pages must not contain homepage
for page in pages:
self.assertNotEqual(page, homepage)
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_sibling_of_default(self):
"""
sibling_of should default to an inclusive definition of sibling
if 'inclusive' flag not passed
"""
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
def test_sibling_of_exclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event, inclusive=False)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
def test_sibling_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event, inclusive=True)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
def test_not_sibling_of_default(self):
"""
not_sibling_of should default to an inclusive definition of sibling -
i.e. eliminate self from the results as well -
if 'inclusive' flag not passed
"""
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_sibling_of_exclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event, inclusive=False)
# Check that all pages are not children of events_index
for page in pages:
if page != event:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_sibling_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event, inclusive=True)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_type(self):
pages = Page.objects.type(EventPage)
# Check that all objects are EventPages
for page in pages:
self.assertIsInstance(page.specific, EventPage)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertTrue(pages.filter(id=event.id).exists())
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertTrue(pages.filter(id=event.id).exists())
def test_type_includes_subclasses(self):
from wagtail.wagtailforms.models import AbstractEmailForm
pages = Page.objects.type(AbstractEmailForm)
# Check that all objects are instances of AbstractEmailForm
for page in pages:
self.assertIsInstance(page.specific, AbstractEmailForm)
# Check that the contact form page is in the results
contact_us = Page.objects.get(url_path='/home/contact-us/')
self.assertTrue(pages.filter(id=contact_us.id).exists())
def test_not_type(self):
pages = Page.objects.not_type(EventPage)
# Check that no objects are EventPages
for page in pages:
self.assertNotIsInstance(page.specific, EventPage)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertTrue(pages.filter(id=homepage.id).exists())
def test_exact_type(self):
pages = Page.objects.exact_type(EventPage)
# Check that all objects are EventPages (and not a subclass)
for page in pages:
self.assertEqual(type(page.specific), EventPage)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertTrue(pages.filter(id=event.id).exists())
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is NOT in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertFalse(pages.filter(id=event.id).exists())
def test_not_exact_type(self):
pages = Page.objects.not_exact_type(EventPage)
# Check that no objects are EventPages
for page in pages:
self.assertNotEqual(type(page.specific), EventPage)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertTrue(pages.filter(id=homepage.id).exists())
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertTrue(pages.filter(id=event.id).exists())
def test_public(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
homepage = Page.objects.get(url_path='/home/')
# Add PageViewRestriction to events_index
PageViewRestriction.objects.create(page=events_index, password='hello')
# Get public pages
pages = Page.objects.public()
# Check that the homepage is in the results
self.assertTrue(pages.filter(id=homepage.id).exists())
# Check that the events index is not in the results
self.assertFalse(pages.filter(id=events_index.id).exists())
# Check that the event is not in the results
self.assertFalse(pages.filter(id=event.id).exists())
def test_not_public(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
homepage = Page.objects.get(url_path='/home/')
# Add PageViewRestriction to events_index
PageViewRestriction.objects.create(page=events_index, password='hello')
# Get public pages
pages = Page.objects.not_public()
# Check that the homepage is not in the results
self.assertFalse(pages.filter(id=homepage.id).exists())
# Check that the events index is in the results
self.assertTrue(pages.filter(id=events_index.id).exists())
# Check that the event is in the results
self.assertTrue(pages.filter(id=event.id).exists())
class TestPageQueryInSite(TestCase):
fixtures = ['test.json']
def setUp(self):
self.site_2_page = SimplePage(
title="Site 2 page",
slug="site_2_page",
content="Hello",
)
Page.get_first_root_node().add_child(instance=self.site_2_page)
self.site_2_subpage = SimplePage(
title="Site 2 subpage",
slug="site_2_subpage",
content="Hello again",
)
self.site_2_page.add_child(instance=self.site_2_subpage)
self.site_2 = Site.objects.create(
hostname='example.com',
port=8080,
root_page=Page.objects.get(pk=self.site_2_page.pk),
is_default_site=False
)
self.about_us_page = SimplePage.objects.get(url_path='/home/about-us/')
def test_in_site(self):
site_2_pages = SimplePage.objects.in_site(self.site_2)
self.assertIn(self.site_2_page, site_2_pages)
self.assertIn(self.site_2_subpage, site_2_pages)
self.assertNotIn(self.about_us_page, site_2_pages)
class TestPageQuerySetSearch(TestCase):
fixtures = ['test.json']
def test_search(self):
pages = EventPage.objects.search('moon', fields=['location'])
self.assertEqual(pages.count(), 2)
self.assertIn(Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/someone-elses-event/').specific, pages)
def test_operators(self):
results = EventPage.objects.search("moon ponies", operator='and')
self.assertEqual(list(results), [
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific
])
results = EventPage.objects.search("moon ponies", operator='or')
sorted_results = sorted(results, key=lambda page: page.url_path)
self.assertEqual(sorted_results, [
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
])
def test_custom_order(self):
pages = EventPage.objects.order_by('url_path').search('moon', fields=['location'], order_by_relevance=False)
self.assertEqual(list(pages), [
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
])
pages = EventPage.objects.order_by('-url_path').search('moon', fields=['location'], order_by_relevance=False)
self.assertEqual(list(pages), [
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
])
def test_unpublish(self):
# set up a listener for the unpublish signal
unpublish_signals_fired = []
def page_unpublished_handler(sender, instance, **kwargs):
unpublish_signals_fired.append((sender, instance))
page_unpublished.connect(page_unpublished_handler)
events_index = Page.objects.get(url_path='/home/events/')
events_index.get_children().unpublish()
# Previously-live children of event index should now be non-live
christmas = EventPage.objects.get(url_path='/home/events/christmas/')
saint_patrick = SingleEventPage.objects.get(url_path='/home/events/saint-patrick/')
unpublished_event = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
self.assertFalse(christmas.live)
self.assertFalse(saint_patrick.live)
# Check that a signal was fired for each unpublished page
self.assertIn((EventPage, christmas), unpublish_signals_fired)
self.assertIn((SingleEventPage, saint_patrick), unpublish_signals_fired)
# a signal should not be fired for pages that were in the queryset
# but already unpublished
self.assertNotIn((EventPage, unpublished_event), unpublish_signals_fired)
class TestSpecificQuery(TestCase):
"""
Test the .specific() queryset method. This is isolated in its own test case
because it is sensitive to database changes that might happen for other
tests.
The fixture sets up a page structure like:
=========== =========================================
Type Path
=========== =========================================
Page /
Page /home/
SimplePage /home/about-us/
EventIndex /home/events/
EventPage /home/events/christmas/
EventPage /home/events/someone-elses-event/
EventPage /home/events/tentative-unpublished-event/
SimplePage /home/other/
EventPage /home/other/special-event/
=========== =========================================
"""
fixtures = ['test_specific.json']
def test_specific(self):
root = Page.objects.get(url_path='/home/')
with self.assertNumQueries(0):
# The query should be lazy.
qs = root.get_descendants().specific()
with self.assertNumQueries(4):
# One query to get page type and ID, one query per page type:
# EventIndex, EventPage, SimplePage
pages = list(qs)
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 7)
for page in pages:
# An instance of the specific page type should be returned,
# not wagtailcore.Page.
content_type = page.content_type
model = content_type.model_class()
self.assertIsInstance(page, model)
# The page should already be the specific type, so this should not
# need another database query.
with self.assertNumQueries(0):
self.assertIs(page, page.specific)
def test_filtering_before_specific(self):
# This will get the other events, and then christmas
# 'someone-elses-event' and the tentative event are unpublished.
with self.assertNumQueries(0):
qs = Page.objects.live().order_by('-url_path')[:3].specific()
with self.assertNumQueries(3):
# Metadata, EventIndex and EventPage
pages = list(qs)
self.assertEqual(len(pages), 3)
self.assertEqual(pages, [
Page.objects.get(url_path='/home/other/special-event/').specific,
Page.objects.get(url_path='/home/other/').specific,
Page.objects.get(url_path='/home/events/christmas/').specific])
def test_filtering_after_specific(self):
# This will get the other events, and then christmas
# 'someone-elses-event' and the tentative event are unpublished.
with self.assertNumQueries(0):
qs = Page.objects.specific().live().in_menu().order_by('-url_path')[:4]
with self.assertNumQueries(4):
# Metadata, EventIndex, EventPage, SimplePage.
pages = list(qs)
self.assertEqual(len(pages), 4)
self.assertEqual(pages, [
Page.objects.get(url_path='/home/other/').specific,
Page.objects.get(url_path='/home/events/christmas/').specific,
Page.objects.get(url_path='/home/events/').specific,
Page.objects.get(url_path='/home/about-us/').specific])
def test_specific_query_with_search(self):
# 1276 - The database search backend didn't return results with the
# specific type when searching a specific queryset.
pages = list(Page.objects.specific().live().in_menu().search(None, backend='wagtail.wagtailsearch.backends.db'))
# Check that each page is in the queryset with the correct type.
# We don't care about order here
self.assertEqual(len(pages), 4)
self.assertIn(Page.objects.get(url_path='/home/other/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/christmas/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/about-us/').specific, pages)
def test_specific_gracefully_handles_missing_models(self):
# 3567 - PageQuerySet.specific should gracefully handle pages whose class definition
# is missing, by keeping them as basic Page instances.
# Create a ContentType that doesn't correspond to a real model
missing_page_content_type = ContentType.objects.create(app_label='tests', model='missingpage')
# Turn /home/events/ into this content type
Page.objects.filter(url_path='/home/events/').update(content_type=missing_page_content_type)
pages = list(Page.objects.get(url_path='/home/').get_children().specific())
self.assertEqual(pages, [
Page.objects.get(url_path='/home/events/'),
Page.objects.get(url_path='/home/about-us/').specific,
Page.objects.get(url_path='/home/other/').specific,
])
class TestFirstCommonAncestor(TestCase):
"""
Uses the same fixture as TestSpecificQuery. See that class for the layout
of pages.
"""
fixtures = ['test_specific.json']
def setUp(self):
self.all_events = Page.objects.type(EventPage)
self.regular_events = Page.objects.type(EventPage)\
.exclude(url_path__contains='/other/')
def test_bookkeeping(self):
self.assertEqual(self.all_events.count(), 4)
self.assertEqual(self.regular_events.count(), 3)
def test_event_pages(self):
"""Common ancestor for EventPages"""
# As there are event pages in multiple trees under /home/, the home
# page is the common ancestor
self.assertEqual(
Page.objects.get(slug='home'),
self.all_events.first_common_ancestor())
def test_normal_event_pages(self):
"""Common ancestor for EventPages, excluding /other/ events"""
self.assertEqual(
Page.objects.get(slug='events'),
self.regular_events.first_common_ancestor())
def test_normal_event_pages_include_self(self):
"""
Common ancestor for EventPages, excluding /other/ events, with
include_self=True
"""
self.assertEqual(
Page.objects.get(slug='events'),
self.regular_events.first_common_ancestor(include_self=True))
def test_single_page_no_include_self(self):
"""Test getting a single page, with include_self=False."""
self.assertEqual(
Page.objects.get(slug='events'),
Page.objects.filter(title='Christmas').first_common_ancestor())
def test_single_page_include_self(self):
"""Test getting a single page, with include_self=True."""
self.assertEqual(
Page.objects.get(title='Christmas'),
Page.objects.filter(title='Christmas').first_common_ancestor(include_self=True))
def test_all_pages(self):
self.assertEqual(
Page.get_first_root_node(),
Page.objects.first_common_ancestor())
def test_all_pages_strict(self):
with self.assertRaises(Page.DoesNotExist):
Page.objects.first_common_ancestor(strict=True)
def test_all_pages_include_self_strict(self):
self.assertEqual(
Page.get_first_root_node(),
Page.objects.first_common_ancestor(include_self=True, strict=True))
def test_empty_queryset(self):
self.assertEqual(
Page.get_first_root_node(),
Page.objects.none().first_common_ancestor())
def test_empty_queryset_strict(self):
with self.assertRaises(Page.DoesNotExist):
Page.objects.none().first_common_ancestor(strict=True)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_messaging as messaging
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova import exception
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import test
from nova.tests.unit import fake_instance
from nova.tests import uuidsentinel as uuids
from nova import utils
class LiveMigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(LiveMigrationTaskTestCase, self).setUp()
self.context = "context"
self.instance_host = "host"
self.instance_uuid = uuids.instance
self.instance_image = "image_ref"
db_instance = fake_instance.fake_db_instance(
host=self.instance_host,
uuid=self.instance_uuid,
power_state=power_state.RUNNING,
vm_state = vm_states.ACTIVE,
memory_mb=512,
image_ref=self.instance_image)
self.instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'}
self.destination = "destination"
self.block_migration = "bm"
self.disk_over_commit = "doc"
self.migration = objects.Migration()
self.fake_spec = objects.RequestSpec()
self._generate_task()
def _generate_task(self):
self.task = live_migrate.LiveMigrationTask(self.context,
self.instance, self.destination, self.block_migration,
self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(),
servicegroup.API(), scheduler_client.SchedulerClient(),
self.fake_spec)
def test_execute_with_destination(self):
with test.nested(
mock.patch.object(self.task, '_check_host_is_up'),
mock.patch.object(self.task, '_check_requested_destination'),
mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
) as (mock_check_up, mock_check_dest, mock_mig):
mock_mig.return_value = "bob"
self.assertEqual("bob", self.task.execute())
mock_check_up.assert_called_once_with(self.instance_host)
mock_check_dest.assert_called_once_with()
mock_mig.assert_called_once_with(
self.context,
host=self.instance_host,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migration=self.migration,
migrate_data=None)
def test_execute_without_destination(self):
self.destination = None
self._generate_task()
self.assertIsNone(self.task.destination)
with test.nested(
mock.patch.object(self.task, '_check_host_is_up'),
mock.patch.object(self.task, '_find_destination'),
mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
mock.patch.object(self.migration, 'save')
) as (mock_check, mock_find, mock_mig, mock_save):
mock_find.return_value = "found_host"
mock_mig.return_value = "bob"
self.assertEqual("bob", self.task.execute())
mock_check.assert_called_once_with(self.instance_host)
mock_find.assert_called_once_with()
mock_mig.assert_called_once_with(self.context,
host=self.instance_host,
instance=self.instance,
dest="found_host",
block_migration=self.block_migration,
migration=self.migration,
migrate_data=None)
self.assertTrue(mock_save.called)
self.assertEqual('found_host', self.migration.dest_compute)
def test_check_instance_is_active_passes_when_paused(self):
self.task.instance['power_state'] = power_state.PAUSED
self.task._check_instance_is_active()
def test_check_instance_is_active_fails_when_shutdown(self):
self.task.instance['power_state'] = power_state.SHUTDOWN
self.assertRaises(exception.InstanceInvalidState,
self.task._check_instance_is_active)
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(servicegroup.API, 'service_is_up')
def test_check_instance_host_is_up(self, mock_is_up, mock_get):
mock_get.return_value = "service"
mock_is_up.return_value = True
self.task._check_host_is_up("host")
mock_get.assert_called_once_with(self.context, "host")
mock_is_up.assert_called_once_with("service")
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(servicegroup.API, 'service_is_up')
def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up,
mock_get):
mock_get.return_value = "service"
mock_is_up.return_value = False
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
mock_get.assert_called_once_with(self.context, "host")
mock_is_up.assert_called_once_with("service")
@mock.patch.object(objects.Service, 'get_by_compute_host',
side_effect=exception.ComputeHostNotFound(host='host'))
def test_check_instance_host_is_up_fails_if_not_found(self, mock):
self.assertRaises(exception.ComputeHostNotFound,
self.task._check_host_is_up, "host")
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
@mock.patch.object(servicegroup.API, 'service_is_up')
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
def test_check_requested_destination(self, mock_check, mock_is_up,
mock_get_info, mock_get_host):
mock_get_host.return_value = "service"
mock_is_up.return_value = True
hypervisor_details = objects.ComputeNode(
hypervisor_type="a",
hypervisor_version=6.1,
free_ram_mb=513,
memory_mb=512,
ram_allocation_ratio=1.0)
mock_get_info.return_value = hypervisor_details
mock_check.return_value = "migrate_data"
self.task._check_requested_destination()
self.assertEqual("migrate_data", self.task.migrate_data)
mock_get_host.assert_called_once_with(self.context, self.destination)
mock_is_up.assert_called_once_with("service")
self.assertEqual([mock.call(self.destination),
mock.call(self.instance_host),
mock.call(self.destination)],
mock_get_info.call_args_list)
mock_check.assert_called_once_with(self.context, self.instance,
self.destination, self.block_migration, self.disk_over_commit)
def test_check_requested_destination_fails_with_same_dest(self):
self.task.destination = "same"
self.task.source = "same"
self.assertRaises(exception.UnableToMigrateToSelf,
self.task._check_requested_destination)
@mock.patch.object(objects.Service, 'get_by_compute_host',
side_effect=exception.ComputeHostNotFound(host='host'))
def test_check_requested_destination_fails_when_destination_is_up(self,
mock):
self.assertRaises(exception.ComputeHostNotFound,
self.task._check_requested_destination)
@mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up')
@mock.patch.object(objects.ComputeNode,
'get_first_node_by_host_for_old_compat')
def test_check_requested_destination_fails_with_not_enough_memory(
self, mock_get_first, mock_is_up):
mock_get_first.return_value = (
objects.ComputeNode(free_ram_mb=513,
memory_mb=1024,
ram_allocation_ratio=0.9,))
# free_ram is bigger than instance.ram (512) but the allocation
# ratio reduces the total available RAM to 410MB
# (1024 * 0.9 - (1024 - 513))
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_requested_destination)
mock_is_up.assert_called_once_with(self.destination)
mock_get_first.assert_called_once_with(self.context, self.destination)
@mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_destination_has_enough_memory')
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
def test_check_requested_destination_fails_with_hypervisor_diff(
self, mock_get_info, mock_check, mock_is_up):
mock_get_info.side_effect = [
objects.ComputeNode(hypervisor_type='b'),
objects.ComputeNode(hypervisor_type='a')]
self.assertRaises(exception.InvalidHypervisorType,
self.task._check_requested_destination)
mock_is_up.assert_called_once_with(self.destination)
mock_check.assert_called_once_with()
self.assertEqual([mock.call(self.instance_host),
mock.call(self.destination)],
mock_get_info.call_args_list)
@mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_destination_has_enough_memory')
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
def test_check_requested_destination_fails_with_hypervisor_too_old(
self, mock_get_info, mock_check, mock_is_up):
host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
mock_get_info.side_effect = [objects.ComputeNode(**host1),
objects.ComputeNode(**host2)]
self.assertRaises(exception.DestinationHypervisorTooOld,
self.task._check_requested_destination)
mock_is_up.assert_called_once_with(self.destination)
mock_check.assert_called_once_with()
self.assertEqual([mock.call(self.instance_host),
mock.call(self.destination)],
mock_get_info.call_args_list)
def test_find_destination_works(self):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(objects.RequestSpec,
'reset_forced_destinations')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.fake_spec.reset_forced_destinations()
self.task.scheduler_client.select_destinations(
self.context, self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def test_find_destination_works_with_no_request_spec(self):
task = live_migrate.LiveMigrationTask(
self.context, self.instance, self.destination,
self.block_migration, self.disk_over_commit, self.migration,
compute_rpcapi.ComputeAPI(), servicegroup.API(),
scheduler_client.SchedulerClient(), request_spec=None)
another_spec = objects.RequestSpec()
self.instance.flavor = objects.Flavor()
self.instance.numa_topology = None
self.instance.pci_requests = None
@mock.patch.object(task, '_call_livem_checks_on_host')
@mock.patch.object(task, '_check_compatible_with_source_hypervisor')
@mock.patch.object(task.scheduler_client, 'select_destinations')
@mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(utils, 'get_image_from_system_metadata')
def do_test(get_image, setup_ig, from_components, select_dest,
check_compat, call_livem_checks):
get_image.return_value = "image"
from_components.return_value = another_spec
select_dest.return_value = [{'host': 'host1'}]
self.assertEqual("host1", task._find_destination())
get_image.assert_called_once_with(self.instance.system_metadata)
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
setup_ig.assert_called_once_with(
self.context, fake_props,
{'ignore_hosts': [self.instance_host]}
)
select_dest.assert_called_once_with(self.context, another_spec)
check_compat.assert_called_once_with("host1")
call_livem_checks.assert_called_once_with("host1")
do_test()
def test_find_destination_no_image_works(self):
self.instance['image_ref'] = ''
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def _test_find_destination_retry_hypervisor_raises(self, error):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(error)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_old_hypervisor(self):
self._test_find_destination_retry_hypervisor_raises(
exception.DestinationHypervisorTooOld)
def test_find_destination_retry_with_invalid_hypervisor_type(self):
self._test_find_destination_retry_hypervisor_raises(
exception.InvalidHypervisorType)
def test_find_destination_retry_with_invalid_livem_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.Invalid)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_failed_migration_pre_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.MigrationPreCheckError("reason"))
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self):
self.flags(migrate_max_retries=0)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(exception.DestinationHypervisorTooOld)
self.mox.ReplayAll()
with mock.patch.object(self.task.migration, 'save') as save_mock:
self.assertRaises(exception.MaxRetriesExceeded,
self.task._find_destination)
self.assertEqual('failed', self.task.migration.status)
save_mock.assert_called_once_with()
def test_find_destination_when_runs_out_of_hosts(self):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec).AndRaise(
exception.NoValidHost(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
@mock.patch("nova.utils.get_image_from_system_metadata")
@mock.patch("nova.scheduler.utils.build_request_spec")
@mock.patch("nova.scheduler.utils.setup_instance_group")
@mock.patch("nova.objects.RequestSpec.from_primitives")
def test_find_destination_with_remoteError(self,
m_from_primitives, m_setup_instance_group,
m_build_request_spec, m_get_image_from_system_metadata):
m_get_image_from_system_metadata.return_value = {'properties': {}}
m_build_request_spec.return_value = {}
fake_spec = objects.RequestSpec()
m_from_primitives.return_value = fake_spec
with mock.patch.object(self.task.scheduler_client,
'select_destinations') as m_select_destinations:
error = messaging.RemoteError()
m_select_destinations.side_effect = error
self.assertRaises(exception.MigrationSchedulerRPCError,
self.task._find_destination)
def test_call_livem_checks_on_host(self):
with mock.patch.object(self.task.compute_rpcapi,
'check_can_live_migrate_destination',
side_effect=messaging.MessagingTimeout):
self.assertRaises(exception.MigrationPreCheckError,
self.task._call_livem_checks_on_host, {})
|
|
# -*- coding: UTF8
'''
Created on 02.10.2015
@author: mEDI
'''
from PySide import QtCore, QtGui
import PySide
import gui.guitools as guitools
from sqlite3_functions import calcDistance
__toolname__ = "Bookmarks"
__internalName__ = "Bo"
__statusTip__ = "Open A %s Window" % __toolname__
class tool(QtGui.QWidget):
main = None
mydb = None
route = None
def __init__(self, main):
super(tool, self).__init__(main)
self.main = main
self.mydb = main.mydb
self.guitools = guitools.guitools(self)
self.createActions()
def getWideget(self):
locationButton = QtGui.QToolButton()
locationButton.setIcon(self.guitools.getIconFromsvg("img/location.svg"))
locationButton.clicked.connect(self.setCurentLocation)
locationButton.setToolTip("Current Location")
locationLabel = QtGui.QLabel("Location:")
self.locationlineEdit = guitools.LineEdit()
self.locationlineEdit.setText(self.main.location.getLocation())
self.locationlineEdit.textChanged.connect(self.showBookmarks)
self.searchbutton = QtGui.QPushButton("Search")
self.searchbutton.clicked.connect(self.showBookmarks)
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(locationLabel)
layout.addWidget(locationButton)
layout.addWidget(self.locationlineEdit)
layout.addWidget(self.searchbutton)
locationGroupBox = QtGui.QGroupBox()
locationGroupBox.setFlat(True)
locationGroupBox.setStyleSheet("""QGroupBox {border:0;margin:0;padding:0;} margin:0;padding:0;""")
# locationGroupBox.setFlat(True)
locationGroupBox.setLayout(layout)
self.listView = QtGui.QTreeView()
self.listView.setAlternatingRowColors(True)
self.listView.setSortingEnabled(False)
self.listView.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.listView.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.listView.setRootIsDecorated(True)
self.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.listView.customContextMenuRequested.connect(self.myContextMenuEvent)
vGroupBox = QtGui.QGroupBox()
vGroupBox.setFlat(True)
layout = QtGui.QVBoxLayout()
layout.setContentsMargins(6, 6, 6, 6)
layout.addWidget(locationGroupBox)
layout.addWidget(self.listView)
vGroupBox.setLayout(layout)
self.guitools.setSystemComplete("", self.locationlineEdit)
self.showBookmarks()
return vGroupBox
def myContextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.copyAct)
indexes = self.listView.selectionModel().selectedIndexes()
if indexes and isinstance(indexes[0].internalPointer(), BookmarkTreeItem):
menu.addAction(self.deleteBookmarkAct)
menu.addAction(self.reloadAct)
menu.exec_(self.listView.viewport().mapToGlobal(event))
def setCurentLocation(self):
self.locationlineEdit.setText(self.main.location.getLocation())
def createActions(self):
self.copyAct = QtGui.QAction("Copy", self, triggered=self.guitools.copyToClipboard, shortcut=QtGui.QKeySequence.Copy)
self.deleteBookmarkAct = QtGui.QAction("Delete Bookmark", self, triggered=self.deleteBookmark)
self.reloadAct = QtGui.QAction("Reload Bookmarks", self, triggered=self.showBookmarks)
def deleteBookmark(self):
indexes = self.listView.selectionModel().selectedIndexes()
if isinstance(indexes[0].internalPointer(), BookmarkTreeItem):
treeItem = indexes[0].internalPointer()
bockmarkID = int(treeItem.data(0))
msg = "Are you sure you want to delete the bookmark?"
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Information,
"Delete Bookmark", msg,
QtGui.QMessageBox.NoButton, self)
msgBox.addButton("Delete", QtGui.QMessageBox.AcceptRole)
msgBox.addButton("Cancel", QtGui.QMessageBox.RejectRole)
if msgBox.exec_() == QtGui.QMessageBox.AcceptRole:
self.mydb.deleteBookmark( bockmarkID )
self.showBookmarks()
def showBookmarks(self):
firstrun = False
if not self.listView.header().count():
firstrun = True
location = self.locationlineEdit.text()
systemID = self.mydb.getSystemIDbyName(location)
currentSystem = None
if systemID:
currentSystem = self.mydb.getSystemData(systemID)
bookmarks = self.mydb.getBookmarks()
self.bookmarkModel = BookmarkTreeModel(bookmarks, currentSystem)
self.listView.setModel(self.bookmarkModel)
self.bookmarkModel.dataChanged.connect(self.saveItemEdit)
self.setCurrentProfit()
if firstrun:
for i in range(0, self.listView.header().count()):
self.listView.resizeColumnToContents(i)
def saveItemEdit(self, item):
changesSaved = None
if isinstance(item.internalPointer(), BookmarkTreeItem) and item.column() == 1:
print(type(item.internalPointer()) )
boockmarkID = self.listView.model().index( item.row(), 0).data()
changesSaved = self.mydb.updateBookmarkName(boockmarkID, item.data(0) )
if changesSaved:
self.main.setStatusBar("changes saved")
def setCurrentProfit(self):
if self.listView.model():
for rid in range(0, self.listView.model().rowCount(QtCore.QModelIndex())):
routeRoot = self.listView.model().index(rid, 0).internalPointer()
if routeRoot.childCount() > 0:
routeID = routeRoot.data(0)
itemData = self.mydb.getChildsFromBookarkRoute(routeID)
if not itemData:
continue
totalProfit = 0
for cid in range(0, routeRoot.childCount()):
child = routeRoot.child(cid)
if cid + 1 < routeRoot.childCount():
destStation = self.mydb.getPriceOnStation( itemData[cid + 1]['StationID'], itemData[cid]['ItemID'])
if destStation:
profit = destStation['StationBuy'] - itemData[cid]['StationSell']
totalProfit += profit
else: # back deal
destStation = self.mydb.getPriceOnStation( itemData[0]['StationID'], itemData[cid]['ItemID'])
if destStation:
profit = destStation['StationBuy'] - itemData[cid]['StationSell']
totalProfit += profit
child.setData(6, profit)
routeRoot.setData(6, totalProfit)
self.listView.dataChanged(self.listView.model().index(0, 0), self.listView.model().index(self.listView.model().rowCount(QtCore.QModelIndex()), 0))
'''
Bookmark Tree Item Model
'''
class BookmarkRootTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def setData(self, column, value):
if column < 0 or column >= len(self.itemData):
return False
self.itemData[column] = value
return True
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkChildTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def setData(self, column, value):
try:
self.itemData[column] = value
return True
except IndexError:
return None
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkTreeModel(QtCore.QAbstractItemModel):
def __init__(self, data, currentSystem, parent=None):
super(BookmarkTreeModel, self).__init__(parent)
self.currentSystem = currentSystem
self.rootItem = BookmarkRootTreeItem(("Id.", "Name", "System", "Distance", "Station", "Item", "Profit", ""))
self.setupModelData(data, self.rootItem)
def columnCount(self, parent):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.rootItem.columnCount()
def data(self, index, role):
if not index.isValid():
return None
if role == QtCore.Qt.TextAlignmentRole:
if index.column() == 3: # dist
return QtCore.Qt.AlignRight
if role != QtCore.Qt.DisplayRole and role != QtCore.Qt.EditRole:
return None
item = index.internalPointer()
return item.data(index.column())
def setData(self, index, value, role=QtCore.Qt.EditRole):
if role != QtCore.Qt.EditRole:
return False
item = self.getItem(index)
result = item.setData(index.column(), value)
if result:
self.dataChanged.emit(index, index)
return result
def flags(self, index):
if not index.isValid():
return QtCore.Qt.NoItemFlags
if index.column() == 1 and isinstance(index.internalPointer(), BookmarkTreeItem): # Edit Name/ Comment
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def getItem(self, index):
if index.isValid():
item = index.internalPointer()
if item:
return item
return self.rootItem
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.rootItem.data(section)
return None
def index(self, row, column, parent=QtCore.QModelIndex()):
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
def setupModelData(self, bookmarks, parent):
parents = [parent]
for bookmark in bookmarks:
if bookmark['childs'] and len(bookmark['childs']) >= 1:
distance = None
if self.currentSystem and self.currentSystem['posX'] and bookmark['childs'][0]['posX']:
distance = calcDistance(self.currentSystem["posX"], self.currentSystem["posY"], self.currentSystem["posZ"], bookmark['childs'][0]["posX"], bookmark['childs'][0]["posY"], bookmark['childs'][0]["posZ"])
system = None
if bookmark['Type'] == 1:
system = bookmark['childs'][0]['System']
data = [bookmark['id'], bookmark['Name'], system, distance, "", "", ""]
parents[-1].appendChild(BookmarkTreeItem(data, parents[-1]))
if bookmark['Type'] != 1:
# follow is a child
parents.append(parents[-1].child(parents[-1].childCount() - 1))
for i, child in enumerate(bookmark['childs']):
distance = None
if i + 1 < len(bookmark['childs']):
if bookmark['childs'][i + 1] and child['posX']:
distance = calcDistance(bookmark['childs'][i + 1]["posX"], bookmark['childs'][i + 1]["posY"], bookmark['childs'][i + 1]["posZ"], child["posX"], child["posY"], child["posZ"])
else: # back hop
if bookmark['childs'][0] and child['posX']:
distance = calcDistance(bookmark['childs'][0]["posX"], bookmark['childs'][0]["posY"], bookmark['childs'][0]["posZ"], child["posX"], child["posY"], child["posZ"])
data = ["", "", child['System'], distance, child['Station'], child['name'], ""]
parents[-1].appendChild(BookmarkChildTreeItem(data, parents[-1]))
parents.pop()
|
|
####### dev hack flags ###############
verify_stack_after_op = False
# ######################################
import sys
sys.setrecursionlimit(10000)
import copy
from rlp.utils import encode_hex, ascii_chr
from ethereum import utils
from ethereum.abi import is_numeric
from ethereum import opcodes
from ethereum.slogging import get_logger
from ethereum.utils import to_string, encode_int, zpad, bytearray_to_bytestr, safe_ord
if sys.version_info.major == 2:
from repoze.lru import lru_cache
else:
from functools import lru_cache
log_log = get_logger('eth.vm.log')
log_msg = get_logger('eth.pb.msg')
log_vm_exit = get_logger('eth.vm.exit')
log_vm_op = get_logger('eth.vm.op')
log_vm_op_stack = get_logger('eth.vm.op.stack')
log_vm_op_memory = get_logger('eth.vm.op.memory')
log_vm_op_storage = get_logger('eth.vm.op.storage')
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
TT255 = 2 ** 255
MAX_DEPTH = 1024
# Wrapper to store call data. This is needed because it is possible to
# call a contract N times with N bytes of data with a gas cost of O(N);
# if implemented naively this would require O(N**2) bytes of data
# copying. Instead we just copy the reference to the parent memory
# slice plus the start and end of the slice
class CallData(object):
def __init__(self, parent_memory, offset=0, size=None):
self.data = parent_memory
self.offset = offset
self.size = len(self.data) if size is None else size
self.rlimit = self.offset + self.size
# Convert calldata to bytes
def extract_all(self):
d = self.data[self.offset: self.offset + self.size]
d.extend(bytearray(self.size - len(d)))
return bytes(bytearray(d))
# Extract 32 bytes as integer
def extract32(self, i):
if i >= self.size:
return 0
o = self.data[self.offset + i: min(self.offset + i + 32, self.rlimit)]
o.extend(bytearray(32 - len(o)))
return utils.bytearray_to_int(o)
# Extract a slice and copy it to memory
def extract_copy(self, mem, memstart, datastart, size):
for i in range(size):
if datastart + i < self.size:
mem[memstart + i] = self.data[self.offset + datastart + i]
else:
mem[memstart + i] = 0
# Stores a message object, including context data like sender,
# destination, gas, whether or not it is a STATICCALL, etc
class Message(object):
def __init__(self, sender, to, value=0, gas=1000000, data='', depth=0,
code_address=None, is_create=False, transfers_value=True, static=False):
self.sender = sender
self.to = to
self.value = value
self.gas = gas
self.data = CallData(list(map(utils.safe_ord, data))) if isinstance(
data, (str, bytes)) else data
self.depth = depth
self.logs = []
self.code_address = to if code_address is None else code_address
self.is_create = is_create
self.transfers_value = transfers_value
self.static = static
def __repr__(self):
return '<Message(to:%s...)>' % self.to[:8]
# Virtual machine state of the current EVM instance
class Compustate():
def __init__(self, **kwargs):
self.memory = bytearray()
self.stack = []
self.steps = 0
self.pc = 0
self.gas = 0
self.prev_memory = bytearray()
self.prev_stack = []
self.prev_pc = 0
self.prev_gas = 0
self.prev_prev_op = None
self.last_returned = bytearray()
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def reset_prev(self):
self.prev_memory = copy.copy(self.memory)
self.prev_stack = copy.copy(self.stack)
self.prev_pc = self.pc
self.prev_gas = self.gas
# Preprocesses code, and determines which locations are in the middle
# of pushdata and thus invalid
@lru_cache(128)
def preprocess_code(code):
o = 0
i = 0
pushcache = {}
code = code + b'\x00' * 32
while i < len(code) - 32:
codebyte = safe_ord(code[i])
if codebyte == 0x5b:
o |= 1 << i
if 0x60 <= codebyte <= 0x7f:
pushcache[i] = utils.big_endian_to_int(
code[i + 1: i + codebyte - 0x5e])
i += codebyte - 0x5e
else:
i += 1
return o, pushcache
# Extends memory, and pays gas for it
def mem_extend(mem, compustate, op, start, sz):
if sz and start + sz > len(mem):
oldsize = len(mem) // 32
old_totalfee = oldsize * opcodes.GMEMORY + \
oldsize ** 2 // opcodes.GQUADRATICMEMDENOM
newsize = utils.ceil32(start + sz) // 32
new_totalfee = newsize * opcodes.GMEMORY + \
newsize**2 // opcodes.GQUADRATICMEMDENOM
memfee = new_totalfee - old_totalfee
if compustate.gas < memfee:
compustate.gas = 0
return False
compustate.gas -= memfee
m_extend = (newsize - oldsize) * 32
mem.extend(bytearray(m_extend))
return True
# Pays gas for copying data
def data_copy(compustate, size):
return eat_gas(compustate, opcodes.GCOPY * utils.ceil32(size) // 32)
# Consumes a given amount of gas
def eat_gas(compustate, amount):
if compustate.gas < amount:
compustate.gas = 0
return False
else:
compustate.gas -= amount
return True
# Used to compute maximum amount of gas for child calls
def all_but_1n(x, n):
return x - x // n
# Throws a VM exception
def vm_exception(error, **kargs):
log_vm_exit.trace('EXCEPTION', cause=error, **kargs)
return 0, 0, []
# Peacefully exits the VM
def peaceful_exit(cause, gas, data, **kargs):
log_vm_exit.trace('EXIT', cause=cause, **kargs)
return 1, gas, data
# Exits with the REVERT opcode
def revert(gas, data, **kargs):
log_vm_exit.trace('REVERT', **kargs)
return 0, gas, data
def vm_trace(ext, msg, compustate, opcode, pushcache, tracer=log_vm_op):
"""
This diverges from normal logging, as we use the logging namespace
only to decide which features get logged in 'eth.vm.op'
i.e. tracing can not be activated by activating a sub
like 'eth.vm.op.stack'
"""
op, in_args, out_args, fee = opcodes.opcodes[opcode]
trace_data = {}
trace_data['stack'] = list(map(to_string, list(compustate.prev_stack)))
if compustate.prev_prev_op in ('MLOAD', 'MSTORE', 'MSTORE8', 'SHA3', 'CALL',
'CALLCODE', 'CREATE', 'CALLDATACOPY', 'CODECOPY',
'EXTCODECOPY'):
if len(compustate.prev_memory) < 4096:
trace_data['memory'] = \
''.join([encode_hex(ascii_chr(x)) for x
in compustate.prev_memory])
else:
trace_data['sha3memory'] = \
encode_hex(utils.sha3(b''.join([ascii_chr(x) for
x in compustate.prev_memory])))
if compustate.prev_prev_op in ('SSTORE',) or compustate.steps == 0:
trace_data['storage'] = ext.log_storage(msg.to)
trace_data['gas'] = to_string(compustate.prev_gas)
trace_data['gas_cost'] = to_string(compustate.prev_gas - compustate.gas)
trace_data['fee'] = fee
trace_data['inst'] = opcode
trace_data['pc'] = to_string(compustate.prev_pc)
if compustate.steps == 0:
trace_data['depth'] = msg.depth
trace_data['address'] = msg.to
trace_data['steps'] = compustate.steps
trace_data['depth'] = msg.depth
if op[:4] == 'PUSH':
print(repr(pushcache))
trace_data['pushvalue'] = pushcache[compustate.prev_pc]
tracer.trace('vm', op=op, **trace_data)
compustate.steps += 1
compustate.prev_prev_op = op
# Main function
def vm_execute(ext, msg, code):
# precompute trace flag
# if we trace vm, we're in slow mode anyway
trace_vm = log_vm_op.is_active('trace')
# Initialize stack, memory, program counter, etc
compustate = Compustate(gas=msg.gas)
stk = compustate.stack
mem = compustate.memory
# Compute
jumpdest_mask, pushcache = preprocess_code(code)
codelen = len(code)
# For tracing purposes
op = None
_prevop = None
steps = 0
while compustate.pc < codelen:
opcode = safe_ord(code[compustate.pc])
# Invalid operation
if opcode not in opcodes.opcodes:
return vm_exception('INVALID OP', opcode=opcode)
if opcode in opcodes.opcodesMetropolis and not ext.post_metropolis_hardfork():
return vm_exception('INVALID OP (not yet enabled)', opcode=opcode)
op, in_args, out_args, fee = opcodes.opcodes[opcode]
# Apply operation
if trace_vm:
compustate.reset_prev()
compustate.gas -= fee
compustate.pc += 1
# Tracing
if trace_vm:
"""
This diverges from normal logging, as we use the logging namespace
only to decide which features get logged in 'eth.vm.op'
i.e. tracing can not be activated by activating a sub
like 'eth.vm.op.stack'
"""
trace_data = {}
trace_data['stack'] = list(map(to_string, list(compustate.stack)))
if _prevop in ('MLOAD', 'MSTORE', 'MSTORE8', 'SHA3', 'CALL',
'CALLCODE', 'CREATE', 'CALLDATACOPY', 'CODECOPY',
'EXTCODECOPY'):
if len(compustate.memory) < 4096:
trace_data['memory'] = \
''.join([encode_hex(ascii_chr(x)) for x
in compustate.memory])
else:
trace_data['sha3memory'] = \
encode_hex(utils.sha3(b''.join([ascii_chr(x) for
x in compustate.memory])))
if _prevop in ('SSTORE',) or steps == 0:
trace_data['storage'] = ext.log_storage(msg.to)
trace_data['gas'] = to_string(compustate.gas + fee)
trace_data['inst'] = opcode
trace_data['pc'] = to_string(compustate.pc - 1)
if steps == 0:
trace_data['depth'] = msg.depth
trace_data['address'] = msg.to
trace_data['steps'] = steps
trace_data['depth'] = msg.depth
if op[:4] == 'PUSH':
trace_data['pushvalue'] = pushcache[compustate.pc - 1]
log_vm_op.trace('vm', op=op, **trace_data)
steps += 1
_prevop = op
# out of gas error
if compustate.gas < 0:
return vm_exception('OUT OF GAS')
# empty stack error
if in_args > len(compustate.stack):
return vm_exception('INSUFFICIENT STACK',
op=op, needed=to_string(in_args),
available=to_string(len(compustate.stack)))
# overfull stack error
if len(compustate.stack) - in_args + out_args > 1024:
return vm_exception('STACK SIZE LIMIT EXCEEDED',
op=op,
pre_height=to_string(len(compustate.stack)))
# Valid operations
# Pushes first because they are very frequent
if 0x60 <= opcode <= 0x7f:
stk.append(pushcache[compustate.pc - 1])
# Move 1 byte forward for 0x60, up to 32 bytes for 0x7f
compustate.pc += opcode - 0x5f
# Arithmetic
elif opcode < 0x10:
if op == 'STOP':
return peaceful_exit('STOP', compustate.gas, [])
elif op == 'ADD':
stk.append((stk.pop() + stk.pop()) & TT256M1)
elif op == 'SUB':
stk.append((stk.pop() - stk.pop()) & TT256M1)
elif op == 'MUL':
stk.append((stk.pop() * stk.pop()) & TT256M1)
elif op == 'DIV':
s0, s1 = stk.pop(), stk.pop()
stk.append(0 if s1 == 0 else s0 // s1)
elif op == 'MOD':
s0, s1 = stk.pop(), stk.pop()
stk.append(0 if s1 == 0 else s0 % s1)
elif op == 'SDIV':
s0, s1 = utils.to_signed(stk.pop()), utils.to_signed(stk.pop())
stk.append(0 if s1 == 0 else (abs(s0) // abs(s1) *
(-1 if s0 * s1 < 0 else 1)) & TT256M1)
elif op == 'SMOD':
s0, s1 = utils.to_signed(stk.pop()), utils.to_signed(stk.pop())
stk.append(0 if s1 == 0 else (abs(s0) % abs(s1) *
(-1 if s0 < 0 else 1)) & TT256M1)
elif op == 'ADDMOD':
s0, s1, s2 = stk.pop(), stk.pop(), stk.pop()
stk.append((s0 + s1) % s2 if s2 else 0)
elif op == 'MULMOD':
s0, s1, s2 = stk.pop(), stk.pop(), stk.pop()
stk.append((s0 * s1) % s2 if s2 else 0)
elif op == 'EXP':
base, exponent = stk.pop(), stk.pop()
# fee for exponent is dependent on its bytes
# calc n bytes to represent exponent
nbytes = len(utils.encode_int(exponent))
expfee = nbytes * opcodes.GEXPONENTBYTE
if ext.post_spurious_dragon_hardfork():
expfee += opcodes.EXP_SUPPLEMENTAL_GAS * nbytes
if compustate.gas < expfee:
compustate.gas = 0
return vm_exception('OOG EXPONENT')
compustate.gas -= expfee
stk.append(pow(base, exponent, TT256))
elif op == 'SIGNEXTEND':
s0, s1 = stk.pop(), stk.pop()
if s0 <= 31:
testbit = s0 * 8 + 7
if s1 & (1 << testbit):
stk.append(s1 | (TT256 - (1 << testbit)))
else:
stk.append(s1 & ((1 << testbit) - 1))
else:
stk.append(s1)
# Comparisons
elif opcode < 0x20:
if op == 'LT':
stk.append(1 if stk.pop() < stk.pop() else 0)
elif op == 'GT':
stk.append(1 if stk.pop() > stk.pop() else 0)
elif op == 'SLT':
s0, s1 = utils.to_signed(stk.pop()), utils.to_signed(stk.pop())
stk.append(1 if s0 < s1 else 0)
elif op == 'SGT':
s0, s1 = utils.to_signed(stk.pop()), utils.to_signed(stk.pop())
stk.append(1 if s0 > s1 else 0)
elif op == 'EQ':
stk.append(1 if stk.pop() == stk.pop() else 0)
elif op == 'ISZERO':
stk.append(0 if stk.pop() else 1)
elif op == 'AND':
stk.append(stk.pop() & stk.pop())
elif op == 'OR':
stk.append(stk.pop() | stk.pop())
elif op == 'XOR':
stk.append(stk.pop() ^ stk.pop())
elif op == 'NOT':
stk.append(TT256M1 - stk.pop())
elif op == 'BYTE':
s0, s1 = stk.pop(), stk.pop()
if s0 >= 32:
stk.append(0)
else:
stk.append((s1 // 256 ** (31 - s0)) % 256)
# SHA3 and environment info
elif opcode < 0x40:
if op == 'SHA3':
s0, s1 = stk.pop(), stk.pop()
compustate.gas -= opcodes.GSHA3WORD * (utils.ceil32(s1) // 32)
if compustate.gas < 0:
return vm_exception('OOG PAYING FOR SHA3')
if not mem_extend(mem, compustate, op, s0, s1):
return vm_exception('OOG EXTENDING MEMORY')
data = bytearray_to_bytestr(mem[s0: s0 + s1])
stk.append(utils.big_endian_to_int(utils.sha3(data)))
elif op == 'ADDRESS':
stk.append(utils.coerce_to_int(msg.to))
elif op == 'BALANCE':
if ext.post_anti_dos_hardfork():
if not eat_gas(compustate,
opcodes.BALANCE_SUPPLEMENTAL_GAS):
return vm_exception("OUT OF GAS")
addr = utils.coerce_addr_to_hex(stk.pop() % 2**160)
stk.append(ext.get_balance(addr))
elif op == 'ORIGIN':
stk.append(utils.coerce_to_int(ext.tx_origin))
elif op == 'CALLER':
stk.append(utils.coerce_to_int(msg.sender))
elif op == 'CALLVALUE':
stk.append(msg.value)
elif op == 'CALLDATALOAD':
stk.append(msg.data.extract32(stk.pop()))
elif op == 'CALLDATASIZE':
stk.append(msg.data.size)
elif op == 'CALLDATACOPY':
mstart, dstart, size = stk.pop(), stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, mstart, size):
return vm_exception('OOG EXTENDING MEMORY')
if not data_copy(compustate, size):
return vm_exception('OOG COPY DATA')
msg.data.extract_copy(mem, mstart, dstart, size)
elif op == 'CODESIZE':
stk.append(codelen)
elif op == 'CODECOPY':
mstart, dstart, size = stk.pop(), stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, mstart, size):
return vm_exception('OOG EXTENDING MEMORY')
if not data_copy(compustate, size):
return vm_exception('OOG COPY DATA')
for i in range(size):
if dstart + i < codelen:
mem[mstart + i] = safe_ord(code[dstart + i])
else:
mem[mstart + i] = 0
elif op == 'RETURNDATACOPY':
mstart, dstart, size = stk.pop(), stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, mstart, size):
return vm_exception('OOG EXTENDING MEMORY')
if not data_copy(compustate, size):
return vm_exception('OOG COPY DATA')
if dstart + size > len(compustate.last_returned):
return vm_exception('RETURNDATACOPY out of range')
mem[mstart: mstart + size] = compustate.last_returned[dstart: dstart + size]
elif op == 'RETURNDATASIZE':
stk.append(len(compustate.last_returned))
elif op == 'GASPRICE':
stk.append(ext.tx_gasprice)
elif op == 'EXTCODESIZE':
if ext.post_anti_dos_hardfork():
if not eat_gas(compustate,
opcodes.EXTCODELOAD_SUPPLEMENTAL_GAS):
return vm_exception("OUT OF GAS")
addr = utils.coerce_addr_to_hex(stk.pop() % 2**160)
stk.append(len(ext.get_code(addr) or b''))
elif op == 'EXTCODECOPY':
if ext.post_anti_dos_hardfork():
if not eat_gas(compustate,
opcodes.EXTCODELOAD_SUPPLEMENTAL_GAS):
return vm_exception("OUT OF GAS")
addr = utils.coerce_addr_to_hex(stk.pop() % 2**160)
start, s2, size = stk.pop(), stk.pop(), stk.pop()
extcode = ext.get_code(addr) or b''
assert utils.is_string(extcode)
if not mem_extend(mem, compustate, op, start, size):
return vm_exception('OOG EXTENDING MEMORY')
if not data_copy(compustate, size):
return vm_exception('OOG COPY DATA')
for i in range(size):
if s2 + i < len(extcode):
mem[start + i] = safe_ord(extcode[s2 + i])
else:
mem[start + i] = 0
# Block info
elif opcode < 0x50:
if op == 'BLOCKHASH':
if ext.post_constantinople_hardfork() and False:
bh_addr = ext.blockhash_store
stk.append(ext.get_storage_data(bh_addr, stk.pop()))
else:
stk.append(
utils.big_endian_to_int(
ext.block_hash(
stk.pop())))
elif op == 'COINBASE':
stk.append(utils.big_endian_to_int(ext.block_coinbase))
elif op == 'TIMESTAMP':
stk.append(ext.block_timestamp)
elif op == 'NUMBER':
stk.append(ext.block_number)
elif op == 'DIFFICULTY':
stk.append(ext.block_difficulty)
elif op == 'GASLIMIT':
stk.append(ext.block_gas_limit)
# VM state manipulations
elif opcode < 0x60:
if op == 'POP':
stk.pop()
elif op == 'MLOAD':
s0 = stk.pop()
if not mem_extend(mem, compustate, op, s0, 32):
return vm_exception('OOG EXTENDING MEMORY')
stk.append(utils.bytes_to_int(mem[s0: s0 + 32]))
elif op == 'MSTORE':
s0, s1 = stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, s0, 32):
return vm_exception('OOG EXTENDING MEMORY')
mem[s0: s0 + 32] = utils.encode_int32(s1)
elif op == 'MSTORE8':
s0, s1 = stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, s0, 1):
return vm_exception('OOG EXTENDING MEMORY')
mem[s0] = s1 % 256
elif op == 'SLOAD':
if ext.post_anti_dos_hardfork():
if not eat_gas(compustate, opcodes.SLOAD_SUPPLEMENTAL_GAS):
return vm_exception("OUT OF GAS")
stk.append(ext.get_storage_data(msg.to, stk.pop()))
elif op == 'SSTORE':
s0, s1 = stk.pop(), stk.pop()
if msg.static:
return vm_exception(
'Cannot SSTORE inside a static context')
if ext.get_storage_data(msg.to, s0):
gascost = opcodes.GSTORAGEMOD if s1 else opcodes.GSTORAGEKILL
refund = 0 if s1 else opcodes.GSTORAGEREFUND
else:
gascost = opcodes.GSTORAGEADD if s1 else opcodes.GSTORAGEMOD
refund = 0
if compustate.gas < gascost:
return vm_exception('OUT OF GAS')
compustate.gas -= gascost
# adds neg gascost as a refund if below zero
ext.add_refund(refund)
ext.set_storage_data(msg.to, s0, s1)
elif op == 'JUMP':
compustate.pc = stk.pop()
if compustate.pc >= codelen or not (
(1 << compustate.pc) & jumpdest_mask):
return vm_exception('BAD JUMPDEST')
elif op == 'JUMPI':
s0, s1 = stk.pop(), stk.pop()
if s1:
compustate.pc = s0
if compustate.pc >= codelen or not (
(1 << compustate.pc) & jumpdest_mask):
return vm_exception('BAD JUMPDEST')
elif op == 'PC':
stk.append(compustate.pc - 1)
elif op == 'MSIZE':
stk.append(len(mem))
elif op == 'GAS':
stk.append(compustate.gas) # AFTER subtracting cost 1
# DUPn (eg. DUP1: a b c -> a b c c, DUP3: a b c -> a b c a)
elif op[:3] == 'DUP':
# 0x7f - opcode is a negative number, -1 for 0x80 ... -16 for 0x8f
stk.append(stk[0x7f - opcode])
# SWAPn (eg. SWAP1: a b c d -> a b d c, SWAP3: a b c d -> d b c a)
elif op[:4] == 'SWAP':
# 0x8e - opcode is a negative number, -2 for 0x90 ... -17 for 0x9f
temp = stk[0x8e - opcode]
stk[0x8e - opcode] = stk[-1]
stk[-1] = temp
# Logs (aka "events")
elif op[:3] == 'LOG':
"""
0xa0 ... 0xa4, 32/64/96/128/160 + len(data) gas
a. Opcodes LOG0...LOG4 are added, takes 2-6 stack arguments
MEMSTART MEMSZ (TOPIC1) (TOPIC2) (TOPIC3) (TOPIC4)
b. Logs are kept track of during tx execution exactly the same way as suicides
(except as an ordered list, not a set).
Each log is in the form [address, [topic1, ... ], data] where:
* address is what the ADDRESS opcode would output
* data is mem[MEMSTART: MEMSTART + MEMSZ]
* topics are as provided by the opcode
c. The ordered list of logs in the transaction are expressed as [log0, log1, ..., logN].
"""
depth = int(op[3:])
mstart, msz = stk.pop(), stk.pop()
topics = [stk.pop() for x in range(depth)]
compustate.gas -= msz * opcodes.GLOGBYTE
if msg.static:
return vm_exception('Cannot LOG inside a static context')
if not mem_extend(mem, compustate, op, mstart, msz):
return vm_exception('OOG EXTENDING MEMORY')
data = bytearray_to_bytestr(mem[mstart: mstart + msz])
ext.log(msg.to, topics, data)
log_log.trace('LOG', to=msg.to, topics=topics,
data=list(map(utils.safe_ord, data)))
# print('LOG', msg.to, topics, list(map(ord, data)))
# Create a new contract
elif op == 'CREATE':
value, mstart, msz = stk.pop(), stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, mstart, msz):
return vm_exception('OOG EXTENDING MEMORY')
if msg.static:
return vm_exception('Cannot CREATE inside a static context')
if ext.get_balance(msg.to) >= value and msg.depth < MAX_DEPTH:
cd = CallData(mem, mstart, msz)
ingas = compustate.gas
if ext.post_anti_dos_hardfork():
ingas = all_but_1n(ingas, opcodes.CALL_CHILD_LIMIT_DENOM)
create_msg = Message(msg.to, b'', value, ingas, cd, msg.depth + 1)
o, gas, data = ext.create(create_msg)
if o:
stk.append(utils.coerce_to_int(data))
compustate.last_returned = bytearray(b'')
else:
stk.append(0)
compustate.last_returned = bytearray(data)
compustate.gas = compustate.gas - ingas + gas
else:
stk.append(0)
compustate.last_returned = bytearray(b'')
# Calls
elif op in ('CALL', 'CALLCODE', 'DELEGATECALL', 'STATICCALL'):
# Pull arguments from the stack
if op in ('CALL', 'CALLCODE'):
gas, to, value, meminstart, meminsz, memoutstart, memoutsz = \
stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop()
else:
gas, to, meminstart, meminsz, memoutstart, memoutsz = \
stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop(), stk.pop()
value = 0
# Static context prohibition
if msg.static and value > 0 and op == 'CALL':
return vm_exception(
'Cannot make a non-zero-value call inside a static context')
# Expand memory
if not mem_extend(mem, compustate, op, meminstart, meminsz) or \
not mem_extend(mem, compustate, op, memoutstart, memoutsz):
return vm_exception('OOG EXTENDING MEMORY')
to = utils.int_to_addr(to)
# Extra gas costs based on various factors
extra_gas = 0
# Creating a new account
if op == 'CALL' and not ext.account_exists(to) and (
value > 0 or not ext.post_spurious_dragon_hardfork()):
extra_gas += opcodes.GCALLNEWACCOUNT
# Value transfer
if value > 0:
extra_gas += opcodes.GCALLVALUETRANSFER
# Cost increased from 40 to 700 in Tangerine Whistle
if ext.post_anti_dos_hardfork():
extra_gas += opcodes.CALL_SUPPLEMENTAL_GAS
# Compute child gas limit
if ext.post_anti_dos_hardfork():
if compustate.gas < extra_gas:
return vm_exception('OUT OF GAS', needed=extra_gas)
gas = min(
gas,
all_but_1n(
compustate.gas -
extra_gas,
opcodes.CALL_CHILD_LIMIT_DENOM))
else:
if compustate.gas < gas + extra_gas:
return vm_exception('OUT OF GAS', needed=gas + extra_gas)
submsg_gas = gas + opcodes.GSTIPEND * (value > 0)
# Verify that there is sufficient balance and depth
if ext.get_balance(msg.to) < value or msg.depth >= MAX_DEPTH:
compustate.gas -= (gas + extra_gas - submsg_gas)
stk.append(0)
compustate.last_returned = bytearray(b'')
else:
# Subtract gas from parent
compustate.gas -= (gas + extra_gas)
assert compustate.gas >= 0
cd = CallData(mem, meminstart, meminsz)
# Generate the message
if op == 'CALL':
call_msg = Message(msg.to, to, value, submsg_gas, cd,
msg.depth + 1, code_address=to, static=msg.static)
elif ext.post_homestead_hardfork() and op == 'DELEGATECALL':
call_msg = Message(msg.sender, msg.to, msg.value, submsg_gas, cd,
msg.depth + 1, code_address=to, transfers_value=False, static=msg.static)
elif ext.post_metropolis_hardfork() and op == 'STATICCALL':
call_msg = Message(msg.to, to, value, submsg_gas, cd,
msg.depth + 1, code_address=to, static=True)
elif op in ('DELEGATECALL', 'STATICCALL'):
return vm_exception('OPCODE %s INACTIVE' % op)
elif op == 'CALLCODE':
call_msg = Message(msg.to, msg.to, value, submsg_gas, cd,
msg.depth + 1, code_address=to, static=msg.static)
else:
raise Exception("Lolwut")
# Get result
result, gas, data = ext.msg(call_msg)
if result == 0:
stk.append(0)
else:
stk.append(1)
# Set output memory
for i in range(min(len(data), memoutsz)):
mem[memoutstart + i] = data[i]
compustate.gas += gas
compustate.last_returned = bytearray(data)
# Return opcode
elif op == 'RETURN':
s0, s1 = stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, s0, s1):
return vm_exception('OOG EXTENDING MEMORY')
return peaceful_exit('RETURN', compustate.gas, mem[s0: s0 + s1])
# Revert opcode (Metropolis)
elif op == 'REVERT':
if not ext.post_metropolis_hardfork():
return vm_exception('Opcode not yet enabled')
s0, s1 = stk.pop(), stk.pop()
if not mem_extend(mem, compustate, op, s0, s1):
return vm_exception('OOG EXTENDING MEMORY')
return revert(compustate.gas, mem[s0: s0 + s1])
# SUICIDE opcode (also called SELFDESTRUCT)
elif op == 'SUICIDE':
if msg.static:
return vm_exception('Cannot SUICIDE inside a static context')
to = utils.encode_int(stk.pop())
to = ((b'\x00' * (32 - len(to))) + to)[12:]
xfer = ext.get_balance(msg.to)
if ext.post_anti_dos_hardfork():
extra_gas = opcodes.SUICIDE_SUPPLEMENTAL_GAS + \
(not ext.account_exists(to)) * (xfer >
0 or not ext.post_spurious_dragon_hardfork()) * opcodes.GCALLNEWACCOUNT
if not eat_gas(compustate, extra_gas):
return vm_exception("OUT OF GAS")
ext.set_balance(to, ext.get_balance(to) + xfer)
ext.set_balance(msg.to, 0)
ext.add_suicide(msg.to)
log_msg.debug(
'SUICIDING',
addr=utils.checksum_encode(
msg.to),
to=utils.checksum_encode(to),
xferring=xfer)
return peaceful_exit('SUICIDED', compustate.gas, [])
if trace_vm:
vm_trace(ext, msg, compustate, opcode, pushcache)
if trace_vm:
compustate.reset_prev()
vm_trace(ext, msg, compustate, 0, None)
return peaceful_exit('CODE OUT OF RANGE', compustate.gas, [])
# A stub that's mainly here to show what you would need to implement to
# hook into the EVM
class VmExtBase():
def __init__(self):
self.get_code = lambda addr: b''
self.get_balance = lambda addr: 0
self.set_balance = lambda addr, balance: 0
self.set_storage_data = lambda addr, key, value: 0
self.get_storage_data = lambda addr, key: 0
self.log_storage = lambda addr: 0
self.add_suicide = lambda addr: 0
self.add_refund = lambda x: 0
self.block_prevhash = 0
self.block_coinbase = 0
self.block_timestamp = 0
self.block_number = 0
self.block_difficulty = 0
self.block_gas_limit = 0
self.log = lambda addr, topics, data: 0
self.tx_origin = b'0' * 40
self.tx_gasprice = 0
self.create = lambda msg: 0, 0, 0
self.call = lambda msg: 0, 0, 0
self.sendmsg = lambda msg: 0, 0, 0
|
|
import os
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
################
# Config setup #
################
conf = {}
if sys.argv[0].split(os.sep)[-1] == "fab":
# Ensure we import settings from the current dir
try:
conf = __import__("settings", globals(), locals(), [], 0).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print "Aborting, no hosts defined."
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % (env.venv_path,
env.venv_path)
env.live_host = conf.get("LIVE_HOSTNAME", env.hosts[0] if env.hosts else None)
env.repo_url = conf.get("REPO_URL", None)
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl reload",
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/live_settings.py",
"remote_path": "%(proj_path)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_dirname):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = os.path.join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print
print output
print
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command)
@task
def sudo(command, show=True):
"""
Runs a command as sudo.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload a
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return sudo("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the database.
"""
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
"""
Restores the database.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print settings.STATIC_ROOT").split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
locale = "LC_ALL=%s" % env.locale
with hide("stdout"):
if locale not in sudo("cat /etc/default/locale"):
sudo("update-locale %s" % locale)
run("exit")
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor")
sudo("easy_install pip")
sudo("pip install virtualenv mercurial")
@task
@log_call
def create():
"""
Create a new virtual environment for a project.
Pulls the project's repo from version control, adds system-level
configs for the project, and initialises the database with the
live host.
"""
# Create virtualenv
with cd(env.venv_home):
if exists(env.proj_name):
prompt = raw_input("\nVirtualenv exists: %s\nWould you like "
"to replace it? (yes/no) " % env.proj_name)
if prompt.lower() != "yes":
print "\nAborting!"
return False
remove()
run("virtualenv %s --distribute" % env.proj_name)
vcs = "git" if env.repo_url.startswith("git") else "hg"
run("%s clone %s %s" % (vcs, env.repo_url, env.proj_path))
# Create DB and DB user.
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
# Set up SSL certificate.
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(os.path.join("deploy", "*.crt"))
key_local, = glob(os.path.join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.live_host)
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_file, crt_local, use_sudo=True)
upload_template(key_file, key_local, use_sudo=True)
# Set up project.
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle south psycopg2 "
"django-compressor python-memcached")
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"site, _ = Site.objects.get_or_create(id=settings.SITE_ID);"
"site.domain = '" + env.live_host + "';"
"site.save();")
if env.admin_pass:
pw = env.admin_pass
user_py = ("from django.contrib.auth.models import User;"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
psql("DROP DATABASE %s;" % env.proj_name)
psql("DROP USER %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
sudo("kill -HUP `cat %s`" % pid_path)
else:
start_args = (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Check out the latest version of the project from version
control, install new requirements, sync and migrate the database,
collect any new static assets, and restart gunicorn's work
processes for the project.
"""
if not exists(env.venv_path):
prompt = raw_input("\nVirtualenv doesn't exist: %s\nWould you like "
"to create it? (yes/no) " % env.proj_name)
if prompt.lower() != "yes":
print "\nAborting!"
return False
create()
for name in get_templates():
upload_template_and_reload(name)
with project():
backup("last.db")
run("tar -cf last.tar %s" % static())
git = env.repo_url.startswith("git")
run("%s > last.commit" % "git rev-parse HEAD" if git else "hg id -i")
with update_changed_requirements():
run("git pull origin master -f" if git else "hg pull && hg up -C")
manage("collectstatic -v 0 --noinput")
manage("syncdb --noinput")
manage("migrate --noinput")
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the last commit checked out, the database,
and all static files. Calling rollback will revert all of these to
their state prior to the last deploy.
"""
with project():
with update_changed_requirements():
git = env.repo_url.startswith("git")
update = "git checkout" if git else "hg up -C"
run("%s `cat last.commit`" % update)
with cd(os.path.join(static(), "..")):
run("tar -xf %s" % os.path.join(env.proj_path, "last.tar"))
restore("last.db")
restart()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2018 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""ADMM algorithms for the Convolutional Constrained MOD problem with
Mask Decoupling"""
from __future__ import division, absolute_import
import copy
import numpy as np
from sporco.admm import admm
from sporco.admm import ccmod
import sporco.cnvrep as cr
import sporco.linalg as sl
from sporco.common import _fix_dynamic_class_lookup
from sporco.fft import rfftn, irfftn, empty_aligned, rfftn_empty_aligned
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
class ConvCnstrMODMaskDcplBase(admm.ADMMTwoBlockCnstrnt):
r"""
Base class for ADMM algorithms for Convolutional Constrained MOD
with Mask Decoupling :cite:`heide-2015-fast`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcplBase
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{d} \;
(1/2) \left\| W \left(\sum_m \mathbf{d}_m * \mathbf{x}_m -
\mathbf{s}\right) \right\|_2^2 \quad \text{such that} \quad
\mathbf{d}_m \in C \;\; \forall m
where :math:`C` is the feasible set consisting of filters with unit
norm and constrained support, and :math:`W` is a mask array, via the
ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{d},\mathbf{g}_0,\mathbf{g}_1} \;
(1/2) \| W \mathbf{g}_0 \|_2^2 + \iota_C(\mathbf{g}_1)
\;\text{such that}\;
\left( \begin{array}{c} X \\ I \end{array} \right) \mathbf{d}
- \left( \begin{array}{c} \mathbf{g}_0 \\ \mathbf{g}_1 \end{array}
\right) = \left( \begin{array}{c} \mathbf{s} \\
\mathbf{0} \end{array} \right) \;\;,
where :math:`\iota_C(\cdot)` is the indicator function of feasible
set :math:`C`, and :math:`X \mathbf{d} = \sum_m \mathbf{x}_m *
\mathbf{d}_m`.
|
The implementation of this class is substantially complicated by the
support of multi-channel signals. In the following, the number of
channels in the signal and dictionary are denoted by ``C`` and ``Cd``
respectively, the number of signals and the number of filters are
denoted by ``K`` and ``M`` respectively, ``X``, ``Z``, and ``S`` denote
the dictionary, coefficient map, and signal arrays respectively, and
``Y0`` and ``Y1`` denote blocks 0 and 1 of the auxiliary (split)
variable of the ADMM problem. We need to consider three different cases:
1. Single channel signal and dictionary (``C`` = ``Cd`` = 1)
2. Multi-channel signal, single channel dictionary (``C`` > 1,
``Cd`` = 1)
3. Multi-channel signal and dictionary (``C`` = ``Cd`` > 1)
The final three (non-spatial) dimensions of the main variables in each
of these cases are as in the following table:
====== ================== ===================== ==================
Var. ``C`` = ``Cd`` = 1 ``C`` > 1, ``Cd`` = 1 ``C`` = ``Cd`` > 1
====== ================== ===================== ==================
``X`` 1 x 1 x ``M`` 1 x 1 x ``M`` ``Cd`` x 1 x ``M``
``Z`` 1 x ``K`` x ``M`` ``C`` x ``K`` x ``M`` 1 x ``K`` x ``M``
``S`` 1 x ``K`` x 1 ``C`` x ``K`` x 1 ``C`` x ``K`` x 1
``Y0`` 1 x ``K`` x 1 ``C`` x ``K`` x 1 ``C`` x ``K`` x 1
``Y1`` 1 x 1 x ``M`` 1 x 1 x ``M`` ``C`` x 1 x ``M``
====== ================== ===================== ==================
In order to combine the block components ``Y0`` and ``Y1`` of
variable ``Y`` into a single array, we need to be able to
concatenate the two component arrays on one of the axes, but the shapes
``Y0`` and ``Y1`` are not compatible for concatenation. The solution for
cases 1. and 3. is to swap the ``K`` and ``M`` axes of `Y0`` before
concatenating, as well as after extracting the ``Y0`` component from the
concatenated ``Y`` variable. In case 2., since the ``C`` and ``K``
indices have the same behaviour in the dictionary update equation, we
combine these axes in :meth:`.__init__`, so that the case 2. array
shapes become
====== =====================
Var. ``C`` > 1, ``Cd`` = 1
====== =====================
``X`` 1 x 1 x ``M``
``Z`` 1 x ``C`` ``K`` x ``M``
``S`` 1 x ``C`` ``K`` x 1
``Y0`` 1 x ``C`` ``K`` x 1
``Y1`` 1 x 1 x ``M``
====== =====================
making it possible to concatenate ``Y0`` and ``Y1`` using the same
axis swapping strategy as in the other cases. See :meth:`.block_sep0`
and :meth:`block_cat` for additional details.
|
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``DFid`` : Value of data fidelity term :math:`(1/2) \sum_k \|
W (\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`
``Cnstr`` : Constraint violation measure
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``XSlvRelRes`` : Relative residual of X step solver
``Time`` : Cumulative run time
"""
class Options(admm.ADMMTwoBlockCnstrnt.Options):
r"""ConvCnstrMODMaskDcplBase algorithm options
Options include all of those defined in
:class:`.ADMMTwoBlockCnstrnt.Options`, together with
additional options:
``LinSolveCheck`` : Flag indicating whether to compute
relative residual of X step solver.
``ZeroMean`` : Flag indicating whether the solution
dictionary :math:`\{\mathbf{d}_m\}` should have zero-mean
components.
"""
defaults = copy.deepcopy(admm.ADMMEqual.Options.defaults)
defaults.update({'AuxVarObj': False, 'fEvalX': True,
'gEvalY': False, 'LinSolveCheck': False,
'ZeroMean': False, 'RelaxParam': 1.8,
'rho': 1.0, 'ReturnVar': 'Y1'})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvCnstrMODMaskDcpl algorithm options
"""
if opt is None:
opt = {}
admm.ADMMTwoBlockCnstrnt.Options.__init__(self, opt)
def __setitem__(self, key, value):
"""Set options 'fEvalX' and 'gEvalY' appropriately when option
'AuxVarObj' is set.
"""
admm.ADMMTwoBlockCnstrnt.Options.__setitem__(self, key, value)
if key == 'AuxVarObj':
if value is True:
self['fEvalX'] = False
self['gEvalY'] = True
else:
self['fEvalX'] = True
self['gEvalY'] = False
itstat_fields_objfn = ('DFid', 'Cnstr')
itstat_fields_extra = ('XSlvRelRes',)
hdrtxt_objfn = ('DFid', 'Cnstr')
hdrval_objfun = {'DFid': 'DFid', 'Cnstr': 'Cnstr'}
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2):
"""
Parameters
----------
Z : array_like
Coefficient map array
S : array_like
Signal array
W : array_like
Mask array. The array shape must be such that the array is
compatible for multiplication with the *internal* shape of
input array S (see :class:`.cnvrep.CDU_ConvRepIndexing` for a
discussion of the distinction between *external* and *internal*
data layouts) after reshaping to the shape determined by
:func:`.cnvrep.mskWshape`.
dsz : tuple
Filter support size(s)
opt : :class:`ConvCnstrMODMaskDcplBase.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
# Set default options if none specified
if opt is None:
opt = ConvCnstrMODMaskDcplBase.Options()
# Infer problem dimensions and set relevant attributes of self
self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN)
# Convert W to internal shape
W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)),
dtype=S.dtype)
# Reshape W if necessary (see discussion of reshape of S below)
if self.cri.Cd == 1 and self.cri.C > 1:
# In most cases broadcasting rules make it possible for W
# to have a singleton dimension corresponding to a non-singleton
# dimension in S. However, when S is reshaped to interleave axisC
# and axisK on the same axis, broadcasting is no longer sufficient
# unless axisC and axisK of W are either both singleton or both
# of the same size as the corresponding axes of S. If neither of
# these cases holds, it is necessary to replicate the axis of W
# (axisC or axisK) that does not have the same size as the
# corresponding axis of S.
shpw = list(W.shape)
swck = shpw[self.cri.axisC] * shpw[self.cri.axisK]
if swck > 1 and swck < self.cri.C * self.cri.K:
if W.shape[self.cri.axisK] == 1 and self.cri.K > 1:
shpw[self.cri.axisK] = self.cri.K
else:
shpw[self.cri.axisC] = self.cri.C
W = np.broadcast_to(W, shpw)
self.W = W.reshape(
W.shape[0:self.cri.dimN] +
(1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1))
else:
self.W = W
# Call parent class __init__
Nx = self.cri.N * self.cri.Cd * self.cri.M
CK = (self.cri.C if self.cri.Cd == 1 else 1) * self.cri.K
shpY = list(self.cri.shpX)
shpY[self.cri.axisC] = self.cri.Cd
shpY[self.cri.axisK] = 1
shpY[self.cri.axisM] += CK
super(ConvCnstrMODMaskDcplBase, self).__init__(
Nx, shpY, self.cri.axisM, CK, S.dtype, opt)
# Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed
# to be taken from cbpdn, and therefore already in standard
# form). If the dictionary has a single channel but the input
# (and therefore also the coefficient map array) has multiple
# channels, the channel index and multiple image index have
# the same behaviour in the dictionary update equation: the
# simplest way to handle this is to just reshape so that the
# channels also appear on the multiple image index.
if self.cri.Cd == 1 and self.cri.C > 1:
self.S = S.reshape(self.cri.Nv + (1, self.cri.C*self.cri.K, 1))
else:
self.S = S.reshape(self.cri.shpS)
self.S = np.asarray(self.S, dtype=self.dtype)
# Create constraint set projection function
self.Pcn = cr.getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd,
zm=opt['ZeroMean'])
# Initialise byte-aligned arrays for pyfftw
self.YU = empty_aligned(self.Y.shape, dtype=self.dtype)
xfshp = list(self.cri.Nv + (self.cri.Cd, 1, self.cri.M))
self.Xf = rfftn_empty_aligned(xfshp, self.cri.axisN,
self.dtype)
if Z is not None:
self.setcoef(Z)
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Ub0 = (self.W**2) * self.block_sep0(self.Y) / self.rho
Ub1 = self.block_sep1(self.Y)
return self.block_cat(Ub0, Ub1)
def setcoef(self, Z):
"""Set coefficient array."""
# If the dictionary has a single channel but the input (and
# therefore also the coefficient map array) has multiple
# channels, the channel index and multiple image index have
# the same behaviour in the dictionary update equation: the
# simplest way to handle this is to just reshape so that the
# channels also appear on the multiple image index.
if self.cri.Cd == 1 and self.cri.C > 1:
Z = Z.reshape(self.cri.Nv + (1, self.cri.Cx*self.cri.K,
self.cri.M,))
self.Z = np.asarray(Z, dtype=self.dtype)
self.Zf = rfftn(self.Z, self.cri.Nv, self.cri.axisN)
def getdict(self, crop=True):
"""Get final dictionary. If ``crop`` is ``True``, apply
:func:`.cnvrep.bcrop` to returned array.
"""
D = self.block_sep1(self.Y)
if crop:
D = cr.bcrop(D, self.cri.dsz, self.cri.dimN)
return D
def xstep_check(self, b):
r"""Check the minimisation of the Augmented Lagrangian with
respect to :math:`\mathbf{x}` by method `xstep` defined in
derived classes. This method should be called at the end of any
`xstep` method.
"""
if self.opt['LinSolveCheck']:
Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
ZHop = lambda x: sl.inner(np.conj(self.Zf), x,
axis=self.cri.axisK)
ax = ZHop(Zop(self.Xf)) + self.Xf
self.xrrs = sl.rrs(ax, b)
else:
self.xrrs = None
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
AXU = self.AX + self.U
Y0 = (self.rho*(self.block_sep0(AXU) - self.S)) / (self.W**2 +
self.rho)
Y1 = self.Pcn(self.block_sep1(AXU))
self.Y = self.block_cat(Y0, Y1)
def relax_AX(self):
"""Implement relaxation if option ``RelaxParam`` != 1.0."""
self.AXnr = self.cnst_A(self.X, self.Xf)
if self.rlx == 1.0:
self.AX = self.AXnr
else:
alpha = self.rlx
self.AX = alpha*self.AXnr + (1-alpha)*self.block_cat(
self.var_y0() + self.S, self.var_y1())
def block_sep0(self, Y):
r"""Separate variable into component corresponding to
:math:`\mathbf{y}_0` in :math:`\mathbf{y}\;\;`. The method from
parent class :class:`.ADMMTwoBlockCnstrnt` is overridden here to
allow swapping of K (multi-image) and M (filter) axes in block 0
so that it can be concatenated on axis M with block 1. This is
necessary because block 0 has the dimensions of S while block 1
has the dimensions of D. Handling of multi-channel signals
substantially complicate this issue. There are two multi-channel
cases: multi-channel dictionary and signal (Cd = C > 1), and
single-channel dictionary with multi-channel signal (Cd = 1, C >
1). In the former case, S and D shapes are (N x C x K x 1) and
(N x C x 1 x M) respectively. In the latter case,
:meth:`.__init__` has already taken care of combining C
(multi-channel) and K (multi-image) axes in S, so the S and D
shapes are (N x 1 x C K x 1) and (N x 1 x 1 x M) respectively.
"""
return np.swapaxes(
Y[(slice(None),)*self.blkaxis + (slice(0, self.blkidx),)],
self.cri.axisK, self.cri.axisM)
def block_cat(self, Y0, Y1):
r"""Concatenate components corresponding to :math:`\mathbf{y}_0`
and :math:`\mathbf{y}_1` to form :math:`\mathbf{y}\;\;`. The
method from parent class :class:`.ADMMTwoBlockCnstrnt` is
overridden here to allow swapping of K (multi-image) and M
(filter) axes in block 0 so that it can be concatenated on axis
M with block 1. This is necessary because block 0 has the
dimensions of S while block 1 has the dimensions of D. Handling
of multi-channel signals substantially complicate this
issue. There are two multi-channel cases: multi-channel
dictionary and signal (Cd = C > 1), and single-channel
dictionary with multi-channel signal (Cd = 1, C > 1). In the
former case, S and D shapes are (N x C x K x 1) and (N x C x 1 x
M) respectively. In the latter case, :meth:`.__init__` has
already taken care of combining C (multi-channel) and K
(multi-image) axes in S, so the S and D shapes are (N x 1 x C K
x 1) and (N x 1 x 1 x M) respectively.
"""
return np.concatenate((np.swapaxes(Y0, self.cri.axisK,
self.cri.axisM), Y1),
axis=self.blkaxis)
def cnst_A(self, X, Xf=None):
r"""Compute :math:`A \mathbf{x}` component of ADMM problem
constraint.
"""
return self.block_cat(self.cnst_A0(X, Xf), self.cnst_A1(X))
def obfn_g0var(self):
"""Variable to be evaluated in computing
:meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj``
option value.
"""
return self.var_y0() if self.opt['AuxVarObj'] else \
self.cnst_A0(None, self.Xf) - self.cnst_c0()
def cnst_A0(self, X, Xf=None):
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint.
"""
# This calculation involves non-negligible computational cost
# when Xf is None (i.e. the function is not being applied to
# self.X).
if Xf is None:
Xf = rfftn(X, None, self.cri.axisN)
return irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN)
def cnst_A0T(self, Y0):
r"""Compute :math:`A_0^T \mathbf{y}_0` component of
:math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
"""
# This calculation involves non-negligible computational cost. It
# should be possible to disable relevant diagnostic information
# (dual residual) to avoid this cost.
Y0f = rfftn(Y0, None, self.cri.axisN)
return irfftn(sl.inner(np.conj(self.Zf), Y0f,
axis=self.cri.axisK), self.cri.Nv,
self.cri.axisN)
def cnst_c0(self):
r"""Compute constant component :math:`\mathbf{c}_0` of
:math:`\mathbf{c}` in the ADMM problem constraint.
"""
return self.S
def eval_objfn(self):
"""Compute components of regularisation function as well as total
contribution to objective function.
"""
dfd = self.obfn_g0(self.obfn_g0var())
cns = self.obfn_g1(self.obfn_g1var())
return (dfd, cns)
def obfn_g0(self, Y0):
r"""Compute :math:`g_0(\mathbf{y}_0)` component of ADMM objective
function.
"""
return (np.linalg.norm(self.W * Y0)**2) / 2.0
def obfn_g1(self, Y1):
r"""Compute :math:`g_1(\mathbf{y_1})` component of ADMM objective
function.
"""
return np.linalg.norm((self.Pcn(Y1) - Y1))
def itstat_extra(self):
"""Non-standard entries for the iteration stats record tuple."""
return (self.xrrs,)
def reconstruct(self, D=None):
"""Reconstruct representation."""
if D is None:
Df = self.Xf
else:
Df = rfftn(D, None, self.cri.axisN)
Sf = np.sum(self.Zf * Df, axis=self.cri.axisM)
return irfftn(Sf, self.cri.Nv, self.cri.axisN)
def rsdl_s(self, Yprev, Y):
"""Compute dual residual vector."""
return self.rho*np.linalg.norm(self.cnst_AT(self.U))
def rsdl_sn(self, U):
"""Compute dual residual normalisation term."""
return self.rho*np.linalg.norm(U)
class ConvCnstrMODMaskDcpl_IterSM(ConvCnstrMODMaskDcplBase):
r"""
ADMM algorithm for Convolutional Constrained MOD with Mask Decoupling
:cite:`heide-2015-fast` with the :math:`\mathbf{x}` step solved via
iterated application of the Sherman-Morrison equation
:cite:`wohlberg-2016-efficient`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcpl_IterSM
:parts: 2
|
Multi-channel signals/images are supported
:cite:`wohlberg-2016-convolutional`. See
:class:`.ConvCnstrMODMaskDcplBase` for interface details.
"""
class Options(ConvCnstrMODMaskDcplBase.Options):
"""ConvCnstrMODMaskDcpl_IterSM algorithm options
Options are the same as those defined in
:class:`.ConvCnstrMODMaskDcplBase.Options`.
"""
defaults = copy.deepcopy(ConvCnstrMODMaskDcplBase.Options.defaults)
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvCnstrMODMaskDcpl_IterSM algorithm options
"""
if opt is None:
opt = {}
ConvCnstrMODMaskDcplBase.Options.__init__(self, opt)
def __init__(self, Z, S, W, dsz, opt=None, dimK=1, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/ccmodmdism_init.svg
:width: 20%
:target: ../_static/jonga/ccmodmdism_init.svg
"""
# Set default options if none specified
if opt is None:
opt = ConvCnstrMODMaskDcpl_IterSM.Options()
super(ConvCnstrMODMaskDcpl_IterSM, self).__init__(Z, S, W, dsz,
opt, dimK, dimN)
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
self.YU[:] = self.Y - self.U
self.block_sep0(self.YU)[:] += self.S
YUf = rfftn(self.YU, None, self.cri.axisN)
b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
axis=self.cri.axisK) + self.block_sep1(YUf)
self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
self.cri.axisK)
self.X = irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
self.xstep_check(b)
class ConvCnstrMODMaskDcpl_CG(ConvCnstrMODMaskDcplBase):
r"""
ADMM algorithm for Convolutional Constrained MOD with Mask Decoupling
:cite:`heide-2015-fast` with the :math:`\mathbf{x}` step solved via
Conjugate Gradient (CG) :cite:`wohlberg-2016-efficient`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcpl_CG
:parts: 2
|
Multi-channel signals/images are supported
:cite:`wohlberg-2016-convolutional`. See
:class:`.ConvCnstrMODMaskDcplBase` for interface details.
"""
class Options(ConvCnstrMODMaskDcplBase.Options):
"""ConvCnstrMODMaskDcpl_CG algorithm options
Options include all of those defined in
:class:`.ConvCnstrMODMaskDcplBase.Options`, together with
additional options:
``CG`` : CG solver options
``MaxIter`` : Maximum CG iterations.
``StopTol`` : CG stopping tolerance.
"""
defaults = copy.deepcopy(ConvCnstrMODMaskDcplBase.Options.defaults)
defaults.update({'CG': {'MaxIter': 1000, 'StopTol': 1e-3}})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvCnstrMODMaskDcpl_CG algorithm options
"""
if opt is None:
opt = {}
ConvCnstrMODMaskDcplBase.Options.__init__(self, opt)
itstat_fields_extra = ('XSlvRelRes', 'XSlvCGIt')
def __init__(self, Z, S, W, dsz, opt=None, dimK=1, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/ccmodmdcg_init.svg
:width: 20%
:target: ../_static/jonga/ccmodmdcg_init.svg
"""
# Set default options if none specified
if opt is None:
opt = ConvCnstrMODMaskDcpl_CG.Options()
super(ConvCnstrMODMaskDcpl_CG, self).__init__(Z, S, W, dsz, opt,
dimK, dimN)
self.Xf[:] = 0.0
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
self.cgit = None
self.YU[:] = self.Y - self.U
self.block_sep0(self.YU)[:] += self.S
YUf = rfftn(self.YU, None, self.cri.axisN)
b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
axis=self.cri.axisK) + self.block_sep1(YUf)
self.Xf[:], cgit = sl.solvemdbi_cg(
self.Zf, 1.0, b, self.cri.axisM, self.cri.axisK,
self.opt['CG', 'StopTol'], self.opt['CG', 'MaxIter'], self.Xf)
self.cgit = cgit
self.X = irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
self.xstep_check(b)
def itstat_extra(self):
"""Non-standard entries for the iteration stats record tuple."""
return (self.xrrs, self.cgit)
class ConvCnstrMODMaskDcpl_Consensus(ccmod.ConvCnstrMOD_Consensus):
r"""
Hybrid ADMM Consensus algorithm for Convolutional Constrained MOD with
Mask Decoupling :cite:`garcia-2018-convolutional1`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcpl_Consensus
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{d} \;
(1/2) \left\| W \left(\sum_m \mathbf{d}_m * \mathbf{x}_m -
\mathbf{s} \right) \right\|_2^2 \quad \text{such that} \quad
\mathbf{d}_m \in C \;\; \forall m
where :math:`C` is the feasible set consisting of filters with unit
norm and constrained support, and :math:`W` is a mask array, via a
hybrid ADMM Consensus problem.
See the documentation of :class:`.ConvCnstrMODMaskDcplBase` for a
detailed discussion of the implementational complications resulting
from the support of multi-channel signals.
"""
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/ccmodmdcnsns_init.svg
:width: 20%
:target: ../_static/jonga/ccmodmdcnsns_init.svg
|
Parameters
----------
Z : array_like
Coefficient map array
S : array_like
Signal array
W : array_like
Mask array. The array shape must be such that the array is
compatible for multiplication with input array S (see
:func:`.cnvrep.mskWshape` for more details).
dsz : tuple
Filter support size(s)
opt : :class:`.ConvCnstrMOD_Consensus.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
# Set default options if none specified
if opt is None:
opt = ccmod.ConvCnstrMOD_Consensus.Options()
super(ConvCnstrMODMaskDcpl_Consensus, self).__init__(
Z, S, dsz, opt=opt, dimK=dimK, dimN=dimN)
# Convert W to internal shape
if W is None:
W = np.array([1.0], dtype=self.dtype)
W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)),
dtype=S.dtype)
# Reshape W if necessary (see discussion of reshape of S in
# ccmod.ConvCnstrMOD_Consensus.__init__)
if self.cri.Cd == 1 and self.cri.C > 1:
# In most cases broadcasting rules make it possible for W
# to have a singleton dimension corresponding to a non-singleton
# dimension in S. However, when S is reshaped to interleave axisC
# and axisK on the same axis, broadcasting is no longer sufficient
# unless axisC and axisK of W are either both singleton or both
# of the same size as the corresponding axes of S. If neither of
# these cases holds, it is necessary to replicate the axis of W
# (axisC or axisK) that does not have the same size as the
# corresponding axis of S.
shpw = list(W.shape)
swck = shpw[self.cri.axisC] * shpw[self.cri.axisK]
if swck > 1 and swck < self.cri.C * self.cri.K:
if W.shape[self.cri.axisK] == 1 and self.cri.K > 1:
shpw[self.cri.axisK] = self.cri.K
else:
shpw[self.cri.axisC] = self.cri.C
W = np.broadcast_to(W, shpw)
self.W = W.reshape(
W.shape[0:self.cri.dimN] +
(1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1))
else:
self.W = W
# Initialise additional variables required for the different
# splitting used in combining the consensus solution with mask
# decoupling
self.Y1 = np.zeros(self.S.shape, dtype=self.dtype)
self.U1 = np.zeros(self.S.shape, dtype=self.dtype)
self.YU1 = empty_aligned(self.S.shape, dtype=self.dtype)
def setcoef(self, Z):
"""Set coefficient array."""
# This method largely replicates the method from parent class
# ConvCnstrMOD_Consensus that it overrides. The inherited
# method is overridden to avoid the superfluous computation of
# self.ZSf in that method, which is not required for the
# modified algorithm with mask decoupling
if self.cri.Cd == 1 and self.cri.C > 1:
Z = Z.reshape(self.cri.Nv + (1,) + (self.cri.Cx*self.cri.K,) +
(self.cri.M,))
self.Z = np.asarray(Z, dtype=self.dtype)
self.Zf = rfftn(self.Z, self.cri.Nv, self.cri.axisN)
def var_y1(self):
"""Get the auxiliary variable that is constrained to be equal to
the dictionary. The method is named for compatibility with the
method of the same name in :class:`.ConvCnstrMODMaskDcpl_IterSM`
and :class:`.ConvCnstrMODMaskDcpl_CG` (it is *not* variable `Y1`
in this class).
"""
return self.Y
def relax_AX(self):
"""The parent class method that this method overrides only
implements the relaxation step for the variables of the baseline
consensus algorithm. This method calls the overridden method and
then implements the relaxation step for the additional variables
required for the mask decoupling modification to the baseline
algorithm.
"""
super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
self.AX1nr = irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf),
axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN)
if self.rlx == 1.0:
self.AX1 = self.AX1nr
else:
alpha = self.rlx
self.AX1 = alpha*self.AX1nr + (1-alpha)*(self.Y1 + self.S)
def xstep(self):
"""The xstep of the baseline consensus class from which this
class is derived is re-used to implement the xstep of the
modified algorithm by replacing ``self.ZSf``, which is constant
in the baseline algorithm, with a quantity derived from the
additional variables ``self.Y1`` and ``self.U1``. It is also
necessary to set the penalty parameter to unity for the duration
of the x step.
"""
self.YU1[:] = self.Y1 - self.U1
self.ZSf = np.conj(self.Zf) * (self.Sf + rfftn(
self.YU1, None, self.cri.axisN))
rho = self.rho
self.rho = 1.0
super(ConvCnstrMODMaskDcpl_Consensus, self).xstep()
self.rho = rho
def ystep(self):
"""The parent class ystep method is overridden to allow also
performing the ystep for the additional variables introduced in
the modification to the baseline algorithm.
"""
super(ConvCnstrMODMaskDcpl_Consensus, self).ystep()
AXU1 = self.AX1 + self.U1
self.Y1 = self.rho*(AXU1 - self.S) / (self.W**2 + self.rho)
def ustep(self):
"""The parent class ystep method is overridden to allow also
performing the ystep for the additional variables introduced in
the modification to the baseline algorithm.
"""
super(ConvCnstrMODMaskDcpl_Consensus, self).ustep()
self.U1 += self.AX1 - self.Y1 - self.S
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
"""
Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
- self.Sf
return (np.linalg.norm(self.W * irfftn(Ef, self.cri.Nv,
self.cri.axisN))**2) / 2.0
def compute_residuals(self):
"""Compute residuals and stopping thresholds. The parent class
method is overridden to ensure that the residual calculations
include the additional variables introduced in the modification
to the baseline algorithm.
"""
# The full primary residual is straightforward to compute from
# the primary residuals for the baseline algorithm and for the
# additional variables
r0 = self.rsdl_r(self.AXnr, self.Y)
r1 = self.AX1nr - self.Y1 - self.S
r = np.sqrt(np.sum(r0**2) + np.sum(r1**2))
# The full dual residual is more complicated to compute than the
# full primary residual
ATU = self.swapaxes(self.U) + irfftn(
np.conj(self.Zf) * rfftn(self.U1, self.cri.Nv, self.cri.axisN),
self.cri.Nv, self.cri.axisN)
s = self.rho * np.linalg.norm(ATU)
# The normalisation factor for the full primal residual is also not
# straightforward
nAX = np.sqrt(np.linalg.norm(self.AXnr)**2 +
np.linalg.norm(self.AX1nr)**2)
nY = np.sqrt(np.linalg.norm(self.Y)**2 +
np.linalg.norm(self.Y1)**2)
rn = max(nAX, nY, np.linalg.norm(self.S))
# The normalisation factor for the full dual residual is
# straightforward to compute
sn = self.rho * np.sqrt(np.linalg.norm(self.U)**2 +
np.linalg.norm(self.U1)**2)
# Final residual values and stopping tolerances depend on
# whether standard or normalised residuals are specified via the
# options object
if self.opt['AutoRho', 'StdResiduals']:
epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
rn*self.opt['RelStopTol']
edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
sn*self.opt['RelStopTol']
else:
if rn == 0.0:
rn = 1.0
if sn == 0.0:
sn = 1.0
r /= rn
s /= sn
epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
self.opt['RelStopTol']
edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
self.opt['RelStopTol']
return r, s, epri, edua
def ConvCnstrMODMaskDcpl(*args, **kwargs):
"""A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
with Mask Decoupling problems, and returns an object instantiated
with the provided. parameters. The wrapper is designed to allow the
appropriate object to be created by calling this function using the
same syntax as would be used if it were a class. The specific
implementation is selected by use of an additional keyword
argument 'method'. Valid values are:
- ``'ism'`` :
Use the implementation defined in :class:`.ConvCnstrMODMaskDcpl_IterSM`.
This method works well for a small number of training images, but is
very slow for larger training sets.
- ``'cg'`` :
Use the implementation defined in :class:`.ConvCnstrMODMaskDcpl_CG`.
This method is slower than ``'ism'`` for small training sets, but has
better run time scaling as the training set grows.
- ``'cns'`` :
Use the implementation defined in
:class:`.ConvCnstrMODMaskDcpl_Consensus`. This method is the best choice
for large training sets.
The default value is ``'cns'``.
"""
# Extract method selection argument or set default
if 'method' in kwargs:
method = kwargs['method']
del kwargs['method']
else:
method = 'cns'
# Assign base class depending on method selection argument
if method == 'ism':
base = ConvCnstrMODMaskDcpl_IterSM
elif method == 'cg':
base = ConvCnstrMODMaskDcpl_CG
elif method == 'cns':
base = ConvCnstrMODMaskDcpl_Consensus
else:
raise ValueError('Unknown ConvCnstrMODMaskDcpl solver method %s'
% method)
# Nested class with dynamically determined inheritance
class ConvCnstrMODMaskDcpl(base):
def __init__(self, *args, **kwargs):
super(ConvCnstrMODMaskDcpl, self).__init__(*args, **kwargs)
# Allow pickling of objects of type ConvCnstrMODMaskDcpl
_fix_dynamic_class_lookup(ConvCnstrMODMaskDcpl, method)
# Return object of the nested class type
return ConvCnstrMODMaskDcpl(*args, **kwargs)
def ConvCnstrMODMaskDcplOptions(opt=None, method='cns'):
"""A wrapper function that dynamically defines a class derived from
the Options class associated with one of the implementations of
the Convolutional Constrained MOD with Mask Decoupling problem,
and returns an object instantiated with the provided parameters.
The wrapper is designed to allow the appropriate object to be
created by calling this function using the same syntax as would be
used if it were a class. The specific implementation is selected
by use of an additional keyword argument 'method'. Valid values are
as specified in the documentation for :func:`ConvCnstrMODMaskDcpl`.
"""
# Assign base class depending on method selection argument
if method == 'ism':
base = ConvCnstrMODMaskDcpl_IterSM.Options
elif method == 'cg':
base = ConvCnstrMODMaskDcpl_CG.Options
elif method == 'cns':
base = ConvCnstrMODMaskDcpl_Consensus.Options
else:
raise ValueError('Unknown ConvCnstrMODMaskDcpl solver method %s'
% method)
# Nested class with dynamically determined inheritance
class ConvCnstrMODMaskDcplOptions(base):
def __init__(self, opt):
super(ConvCnstrMODMaskDcplOptions, self).__init__(opt)
# Allow pickling of objects of type ConvCnstrMODMaskDcplOptions
_fix_dynamic_class_lookup(ConvCnstrMODMaskDcplOptions, method)
# Return object of the nested class type
return ConvCnstrMODMaskDcplOptions(opt)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import json
import platform
import subprocess
import logging
import invoke
from invoke import Collection
from website import settings
from .utils import pip_install, bin_prefix
logging.getLogger('invoke').setLevel(logging.CRITICAL)
# gets the root path for all the scripts that rely on it
HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
CONSTRAINTS_PATH = os.path.join(HERE, 'requirements', 'constraints.txt')
ns = Collection()
try:
from admin import tasks as admin_tasks
ns.add_collection(Collection.from_module(admin_tasks), name='admin')
except ImportError:
pass
def task(*args, **kwargs):
"""Behaves the same way as invoke.task. Adds the task
to the root namespace.
"""
if len(args) == 1 and callable(args[0]):
new_task = invoke.task(args[0])
ns.add_task(new_task)
return new_task
def decorator(f):
new_task = invoke.task(f, *args, **kwargs)
ns.add_task(new_task)
return new_task
return decorator
@task
def server(ctx, host=None, port=5000, debug=True, gitlogs=False):
"""Run the app server."""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true' or not debug:
if os.environ.get('WEB_REMOTE_DEBUG', None):
import pydevd
# e.g. '127.0.0.1:5678'
remote_parts = os.environ.get('WEB_REMOTE_DEBUG').split(':')
pydevd.settrace(remote_parts[0], port=int(remote_parts[1]), suspend=False, stdoutToServer=True, stderrToServer=True)
if gitlogs:
git_logs(ctx)
from website.app import init_app
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
else:
from framework.flask import app
context = None
if settings.SECURE_MODE:
context = (settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH], ssl_context=context)
@task
def git_logs(ctx, branch=None):
from scripts.meta import gatherer
gatherer.main(branch=branch)
@task
def apiserver(ctx, port=8000, wait=True, autoreload=True, host='127.0.0.1', pty=True):
"""Run the API server."""
env = os.environ.copy()
cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {}:{} --nothreading'\
.format(sys.executable, host, port)
if not autoreload:
cmd += ' --noreload'
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
if wait:
return ctx.run(cmd, echo=True, pty=pty)
from subprocess import Popen
return Popen(cmd, shell=True, env=env)
@task
def adminserver(ctx, port=8001, host='127.0.0.1', pty=True):
"""Run the Admin server."""
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
cmd = '{} python manage.py runserver {}:{} --nothreading'.format(env, host, port)
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
ctx.run(cmd, echo=True, pty=pty)
@task
def shell(ctx, transaction=True, print_sql=False, notebook=False):
cmd = 'DJANGO_SETTINGS_MODULE="api.base.settings" python manage.py osf_shell'
if print_sql:
cmd += ' --print-sql'
if notebook:
cmd += ' --notebook'
if not transaction:
cmd += ' --no-transaction'
return ctx.run(cmd, pty=True, echo=True)
@task
def sharejs(ctx, host=None, port=None, db_url=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_url:
os.environ['SHAREJS_DB_URL'] = db_url
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
ctx.run('node {0}'.format(share_server))
@task(aliases=['celery'])
def celery_worker(ctx, level='debug', hostname=None, beat=False, queues=None, concurrency=None, max_tasks_per_child=None):
"""Run the Celery process."""
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
cmd = 'celery worker -A framework.celery_tasks -Ofair -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
if queues:
cmd = cmd + ' --queues={}'.format(queues)
if concurrency:
cmd = cmd + ' --concurrency={}'.format(concurrency)
if max_tasks_per_child:
cmd = cmd + ' --maxtasksperchild={}'.format(max_tasks_per_child)
ctx.run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(ctx, level='debug', schedule=None):
"""Run the Celery process."""
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.celery_tasks -l {0} --pidfile='.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
ctx.run(bin_prefix(cmd), pty=True)
@task
def migrate_search(ctx, delete=True, remove=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.app import init_app
init_app(routes=False, set_backends=False)
from website.search_migration.migrate import migrate
# NOTE: Silence the warning:
# "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised."
SILENT_LOGGERS = ['py.warnings']
for logger in SILENT_LOGGERS:
logging.getLogger(logger).setLevel(logging.ERROR)
migrate(delete, remove=remove, index=index)
@task
def rebuild_search(ctx):
"""Delete and recreate the index for elasticsearch"""
from website.app import init_app
import requests
from website import settings
init_app(routes=False, set_backends=True)
if not settings.ELASTIC_URI.startswith('http'):
protocol = 'http://' if settings.DEBUG_MODE else 'https://'
else:
protocol = ''
url = '{protocol}{uri}/{index}'.format(
protocol=protocol,
uri=settings.ELASTIC_URI.rstrip('/'),
index=settings.ELASTIC_INDEX,
)
print('Deleting index {}'.format(settings.ELASTIC_INDEX))
print('----- DELETE {}*'.format(url))
requests.delete(url + '*')
print('Creating index {}'.format(settings.ELASTIC_INDEX))
print('----- PUT {}'.format(url))
requests.put(url)
migrate_search(ctx, delete=False)
@task
def mailserver(ctx, port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
ctx.run(bin_prefix(cmd), pty=True)
@task
def syntax(ctx):
"""Use pre-commit to run formatters and linters."""
ctx.run('pre-commit run --all-files --show-diff-on-failure', echo=True)
@task(aliases=['req'])
def requirements(ctx, base=False, addons=False, release=False, dev=False, all=False):
"""Install python dependencies.
Examples:
inv requirements
inv requirements --all
You should use --all for updating your developement environment.
--all will install (in order): addons, dev and the base requirements.
By default, base requirements will run. However, if any set of addons, release, or dev are chosen, base
will have to be mentioned explicitly in order to run. This is to remain compatible with previous usages. Release
requirements will prevent dev, and base from running.
"""
if all:
base = True
addons = True
dev = True
if not(addons or dev):
base = True
if release or addons:
addon_requirements(ctx)
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
else:
if dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
if base: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
# fix URITemplate name conflict h/t @github
ctx.run('pip uninstall uritemplate.py --yes || true')
ctx.run('pip install --no-cache-dir uritemplate.py==0.3.0')
@task
def test_module(ctx, module=None, numprocesses=None, nocapture=False, params=None, coverage=False):
"""Helper for running tests.
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'osf_tests.settings'
import pytest
if not numprocesses:
from multiprocessing import cpu_count
numprocesses = cpu_count()
numprocesses = int(numprocesses)
# NOTE: Subprocess to compensate for lack of thread safety in the httpretty module.
# https://github.com/gabrielfalcao/HTTPretty/issues/209#issue-54090252
args = []
if coverage:
args.extend([
'--cov-report', 'term-missing',
'--cov', 'admin',
'--cov', 'addons',
'--cov', 'api',
'--cov', 'framework',
'--cov', 'osf',
'--cov', 'website',
])
if not nocapture:
args += ['-s']
if numprocesses > 1:
args += ['-n {}'.format(numprocesses), '--max-slave-restart=0']
modules = [module] if isinstance(module, basestring) else module
args.extend(modules)
if params:
params = [params] if isinstance(params, basestring) else params
args.extend(params)
retcode = pytest.main(args)
sys.exit(retcode)
OSF_TESTS = [
'osf_tests',
]
WEBSITE_TESTS = [
'tests',
]
API_TESTS1 = [
'api_tests/identifiers',
'api_tests/institutions',
'api_tests/licenses',
'api_tests/logs',
'api_tests/schemas',
'api_tests/providers',
'api_tests/preprints',
'api_tests/registrations',
'api_tests/users',
]
API_TESTS2 = [
'api_tests/actions',
'api_tests/nodes',
'api_tests/requests',
'api_tests/subscriptions',
# 'api_tests/waffle',
# 'api_tests/wb',
]
API_TESTS3 = [
'api_tests/addons_tests',
'api_tests/alerts',
'api_tests/applications',
'api_tests/banners',
'api_tests/base',
'api_tests/collections',
'api_tests/comments',
'api_tests/crossref',
'api_tests/files',
'api_tests/guids',
'api_tests/reviews',
'api_tests/regions',
'api_tests/search',
'api_tests/scopes',
'api_tests/taxonomies',
'api_tests/test',
'api_tests/tokens',
'api_tests/view_only_links',
'api_tests/wikis',
]
ADDON_TESTS = [
'addons',
]
ADMIN_TESTS = [
'admin_tests',
]
@task
def test_osf(ctx, numprocesses=None, coverage=False):
"""Run the OSF test suite."""
print('Testing modules "{}"'.format(OSF_TESTS))
test_module(ctx, module=OSF_TESTS, numprocesses=numprocesses, coverage=coverage)
@task
def test_website(ctx, numprocesses=None, coverage=False):
"""Run the old test suite."""
print('Testing modules "{}"'.format(WEBSITE_TESTS))
test_module(ctx, module=WEBSITE_TESTS, numprocesses=numprocesses, coverage=coverage)
@task
def test_api1(ctx, numprocesses=None, coverage=False):
"""Run the API test suite."""
print('Testing modules "{}"'.format(API_TESTS1 + ADMIN_TESTS))
test_module(ctx, module=API_TESTS1 + ADMIN_TESTS, numprocesses=numprocesses, coverage=coverage)
@task
def test_api2(ctx, numprocesses=None, coverage=False):
"""Run the API test suite."""
print('Testing modules "{}"'.format(API_TESTS2))
test_module(ctx, module=API_TESTS2, numprocesses=numprocesses, coverage=coverage)
@task
def test_api3(ctx, numprocesses=None, coverage=False):
"""Run the API test suite."""
print('Testing modules "{}"'.format(API_TESTS3 + OSF_TESTS))
# NOTE: There may be some concurrency issues with ES
test_module(ctx, module=API_TESTS3 + OSF_TESTS, numprocesses=numprocesses, coverage=coverage)
@task
def test_admin(ctx, numprocesses=None, coverage=False):
"""Run the Admin test suite."""
print('Testing module "admin_tests"')
test_module(ctx, module=ADMIN_TESTS, numprocesses=numprocesses, coverage=coverage)
@task
def test_addons(ctx, numprocesses=None, coverage=False):
"""Run all the tests in the addons directory.
"""
print('Testing modules "{}"'.format(ADDON_TESTS))
test_module(ctx, module=ADDON_TESTS, numprocesses=numprocesses, coverage=coverage)
@task
def test(ctx, all=False, lint=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if lint:
syntax(ctx)
test_website(ctx) # /tests
test_api1(ctx)
test_api2(ctx)
test_api3(ctx) # also /osf_tests
if all:
test_addons(ctx)
# TODO: Enable admin tests
test_admin(ctx)
karma(ctx)
@task
def travis_setup(ctx):
ctx.run('npm install -g bower', echo=True)
with open('package.json', 'r') as fobj:
package_json = json.load(fobj)
ctx.run('npm install @centerforopenscience/list-of-licenses@{}'.format(package_json['dependencies']['@centerforopenscience/list-of-licenses']), echo=True)
with open('bower.json', 'r') as fobj:
bower_json = json.load(fobj)
ctx.run('bower install {}'.format(bower_json['dependencies']['styles']), echo=True)
@task
def test_travis_addons(ctx, numprocesses=None, coverage=False):
"""
Run half of the tests to help travis go faster.
"""
travis_setup(ctx)
syntax(ctx)
test_addons(ctx, numprocesses=numprocesses, coverage=coverage)
@task
def test_travis_website(ctx, numprocesses=None, coverage=False):
"""
Run other half of the tests to help travis go faster.
"""
travis_setup(ctx)
test_website(ctx, numprocesses=numprocesses, coverage=coverage)
@task
def test_travis_api1_and_js(ctx, numprocesses=None, coverage=False):
# TODO: Uncomment when https://github.com/travis-ci/travis-ci/issues/8836 is resolved
# karma(ctx)
travis_setup(ctx)
test_api1(ctx, numprocesses=numprocesses, coverage=coverage)
@task
def test_travis_api2(ctx, numprocesses=None, coverage=False):
travis_setup(ctx)
test_api2(ctx, numprocesses=numprocesses, coverage=coverage)
@task
def test_travis_api3_and_osf(ctx, numprocesses=None, coverage=False):
travis_setup(ctx)
test_api3(ctx, numprocesses=numprocesses, coverage=coverage)
@task
def karma(ctx, travis=False):
"""Run JS tests with Karma. Requires Chrome to be installed."""
if travis:
return ctx.run('yarn test-travis', echo=True)
ctx.run('yarn test', echo=True)
@task
def wheelhouse(ctx, addons=False, release=False, dev=False, pty=True):
"""Build wheels for python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format(
WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_PATH,
)
ctx.run(cmd, pty=pty)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format(
WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_PATH,
)
ctx.run(cmd, pty=pty)
@task
def addon_requirements(ctx):
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
requirements_file = os.path.join(path, 'requirements.txt')
if os.path.isdir(path) and os.path.isfile(requirements_file):
print('Installing requirements for {0}'.format(directory))
ctx.run(
pip_install(requirements_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
print('Finished installing addon requirements')
@task
def travis_addon_settings(ctx):
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
ctx.run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings(ctx):
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
ctx.run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(ctx, addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
ctx.run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings(ctx)
@task(aliases=['bower'])
def bower_install(ctx):
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', '.bin', 'bower')
ctx.run('{} prune --allow-root'.format(bower_bin), echo=True)
ctx.run('{} install --allow-root'.format(bower_bin), echo=True)
@task
def docker_init(ctx):
"""Initial docker setup"""
print('You will be asked for your sudo password to continue...')
if platform.system() == 'Darwin': # Mac OSX
ctx.run('sudo ifconfig lo0 alias 192.168.168.167')
else:
print('Your system is not recognized, you will have to setup docker manually')
def ensure_docker_env_setup(ctx):
if hasattr(os.environ, 'DOCKER_ENV_SETUP') and os.environ['DOCKER_ENV_SETUP'] == '1':
pass
else:
os.environ['WEB_REMOTE_DEBUG'] = '192.168.168.167:11000'
os.environ['API_REMOTE_DEBUG'] = '192.168.168.167:12000'
os.environ['WORKER_REMOTE_DEBUG'] = '192.168.168.167:13000'
os.environ['DOCKER_ENV_SETUP'] = '1'
docker_init(ctx)
@task
def docker_requirements(ctx):
ensure_docker_env_setup(ctx)
ctx.run('docker-compose up requirements requirements_mfr requirements_wb')
@task
def docker_appservices(ctx):
ensure_docker_env_setup(ctx)
ctx.run('docker-compose up assets fakecas elasticsearch tokumx postgres')
@task
def docker_osf(ctx):
ensure_docker_env_setup(ctx)
ctx.run('docker-compose up mfr wb web api')
@task
def clear_sessions(ctx, months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(ctx, name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
ctx.run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
ctx.run('git checkout {}'.format(name), echo=True)
ctx.run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
ctx.run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
ctx.run('git push --follow-tags origin master', echo=True)
ctx.run('git push origin develop', echo=True)
@task
def feature(ctx, name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
ctx.run('git branch -m feature/{}'.format(name), echo=True)
if finish:
ctx.run('git flow feature finish {}'.format(name), echo=True)
if push:
ctx.run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
'git',
'describe',
'--dirty',
'--tags',
'--long',
'--abbrev=40'
], stderr=subprocess.STDOUT
).decode().split('-')
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == 'dirty':
info['dirty'] = True
describe_out.pop()
info['commit_sha'] = describe_out.pop().lstrip('g')
info['distance_to_latest_tag'] = int(describe_out.pop())
info['current_version'] = describe_out.pop().lstrip('v')
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(ctx, domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
ctx.run(cmd)
@task
def generate_key_nopass(ctx, domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
ctx.run(cmd)
@task
def generate_csr(ctx, domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
ctx.run(cmd)
@task
def request_ssl_cert(ctx, domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(ctx, domain)
generate_key_nopass(ctx, domain)
generate_csr(ctx, domain)
@task
def bundle_certs(ctx, domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
ctx.run(cmd)
@task
def clean_assets(ctx):
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
ctx.run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(ctx, clean=False, watch=False, dev=False, colors=False):
"""Build static assets with webpack."""
if clean:
clean_assets(ctx)
args = ['yarn run webpack-{}'.format('dev' if dev else 'prod')]
args += ['--progress']
if watch:
args += ['--watch']
if colors:
args += ['--colors']
command = ' '.join(args)
ctx.run(command, echo=True)
@task()
def build_js_config_files(ctx):
from website import settings
print('Building JS config files...')
with open(os.path.join(settings.STATIC_FOLDER, 'built', 'nodeCategories.json'), 'wb') as fp:
json.dump(settings.NODE_CATEGORY_MAP, fp)
print('...Done.')
@task()
def assets(ctx, dev=False, watch=False, colors=False):
"""Install and build static assets."""
command = 'yarn install --frozen-lockfile'
if not dev:
command += ' --production'
ctx.run(command, echo=True)
bower_install(ctx)
build_js_config_files(ctx)
# Always set clean=False to prevent possible mistakes
# on prod
webpack(ctx, clean=False, watch=watch, dev=dev, colors=colors)
@task
def generate_self_signed(ctx, domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
ctx.run(cmd)
@task
def update_citation_styles(ctx):
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print('Parsed {} styles'.format(total))
@task
def clean(ctx, verbose=False):
ctx.run('find . -name "*.pyc" -delete', echo=True)
@task(default=True)
def usage(ctx):
ctx.run('invoke --list')
### Maintenance Tasks ###
@task
def set_maintenance(ctx, message='', level=1, start=None, end=None):
from website.app import setup_django
setup_django()
from website.maintenance import set_maintenance
"""Display maintenance notice across OSF applications (incl. preprints, registries, etc.)
start - Start time for the maintenance period
end - End time for the mainteance period
NOTE: If no start or end values are provided, default to starting now
and ending 24 hours from now.
message - Message to display. If omitted, will be:
"The site will undergo maintenance between <localized start time> and <localized end time>. Thank you
for your patience."
level - Severity level. Modifies the color of the displayed notice. Must be one of 1 (info), 2 (warning), 3 (danger).
Examples:
invoke set_maintenance --start 2016-03-16T15:41:00-04:00 --end 2016-03-16T15:42:00-04:00
invoke set_maintenance --message 'The OSF is experiencing issues connecting to a 3rd party service' --level 2 --start 2016-03-16T15:41:00-04:00 --end 2016-03-16T15:42:00-04:00
"""
state = set_maintenance(message, level, start, end)
print('Maintenance notice up {} to {}.'.format(state['start'], state['end']))
@task
def unset_maintenance(ctx):
from website.app import setup_django
setup_django()
from website.maintenance import unset_maintenance
print('Taking down maintenance notice...')
unset_maintenance()
print('...Done.')
|
|
from __future__ import absolute_import, unicode_literals
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
ADMIN_MENU_ORDER = (
("Content", ("pages.Page", "blog.BlogPost",
"generic.ThreadedComment", ("Media Library", "fb_browse"),)),
("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
("Users", ("auth.User", "auth.Group",)),
("Feedback", ("feedback.Feedback",)),
)
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "Europe/London"
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = True
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
FILEBROWSER_EXTENSIONS = {
'Folder': [''],
'Image': ['.jpg','.jpeg','.gif','.png','.tif','.tiff'],
'Video': ['.mp4', '.mov','.wmv','.mpeg','.mpg','.avi','.rm'],
'Document': ['.pdf','.doc','.rtf','.txt','.xls','.csv'],
'Audio': ['.mp3','.wav','.aiff','.midi','.m4p', '.ogg'],
'Code': ['.html','.py','.js','.css']
}
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.sqlite3",
# DB name or path to database file if using sqlite3.
"NAME": "dev.db",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'assets'),
)
#STATIC_ROOT = ''
#STATICFILES_DIRS = (os.path.join('static'),)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
"mezzanine.twitter",
"mezzanine.accounts",
"flexipage", # Add the flexipage app
#"mezzanine.mobile",
#Our apps:
"feedback"
)
FLEXI_TEMPLATES = (
('contact.html','contact'),
('index.html','Home'),
)
FLEXI_FORMS = (
'feedback.forms.FeedbackForm',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"mezzanine.pages.context_processors.page",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username
# "SSH_PASS": "", # SSH password (consider key-based authentication)
# "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth
# "HOSTS": [], # List of hosts to deploy to
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "LIVE_HOSTNAME": "www.example.com", # Host for public site.
# "REPO_URL": "", # Git or Mercurial remote repo URL for the project
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError:
pass
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
|
from JumpScale import j
import JumpScale as jumpscale
try:
from configparser import ConfigParser
except:
from configparser import ConfigParser
# TODO: UGLY, validation should not happen on object (file) where you read
# from but on file where you populate values (kds)
class InifileTool:
def __init__(self):
self.__jslocation__ = "j.tools.inifile"
@staticmethod
def open(filename, createIfNonExisting=True):
'''Open an existing INI file
@param filename: Filename of INI file
@type filename: string
@raises RuntimeError: When the provided filename doesn't exist
@returns: Opened INI file object
@rtype: jumpscale.inifile.IniFile.IniFile
'''
if isinstance(filename, str) and not j.sal.fs.exists(filename):
if createIfNonExisting:
return InifileTool.new(filename)
else:
raise j.exceptions.RuntimeError('Attempt to open non-existing INI file %s' % filename)
return IniFile(filename, create=False)
@staticmethod
def new(filename):
'''Create a new INI file
@param filename: Filename of INI file
@type filename: string
@raises RuntimeError: When the provided filename exists
@returns: New INI file object
@rtype: jumpscale.inifile.IniFile.IniFile
'''
if isinstance(filename, str) and j.sal.fs.exists(filename):
raise j.exceptions.RuntimeError('Attempt to create existing INI file %s as a new file' % filename)
return IniFile(filename, create=True)
class IniFile:
"""
Use with care:
- addParam and setParam are 'auto-write'
- addSection isn't
- removeSection isn't
- removeParam isn't
"""
__configParser = None # ConfigParser
__inifilepath = None # string
__file = None # File-like object
__removeWhenDereferenced = False # bool
def __init__(self, iniFile, create=False, removeWhenDereferenced=False):
""" Initialize IniFile. If the file already exists, read it and parse the structure.
If the file did not yet exist. Don't do anything yet.
@param iniFile: The file to write to. This can be either a string representing a file-path or a file-like object
@type iniFile: string or file-like object
@param create: Whether or not to create a new file (Ignored if iniFile is a file-like object)
@type create: bool
@param removeWhenDereferenced: Whether or not to remove the file when this object is dereferenced
@type removeWhenDereferenced: bool
"""
self.logger = j.logger.get("j.tools.inifile")
self.__configParser = ConfigParser()
self.__removeWhenDereferenced = removeWhenDereferenced
if isinstance(iniFile, str): # iniFile is a filepath
self.__inifilepath = iniFile
if create:
j.sal.fs.createDir(j.sal.fs.getDirName(iniFile))
self.logger.info("Create config file: " + iniFile)
j.sal.fs.writeFile(iniFile, '')
if not j.sal.fs.isFile(iniFile):
raise j.exceptions.RuntimeError("Inifile could not be found on location %s" % iniFile)
else: # iniFile is a file-like object
self.__file = iniFile
self.__readFile()
def __str__(self):
"""Returns string representation of the IniFile"""
return '<IniFile> filepath: %s ' % self.__inifilepath
__repr__ = __str__
def __del__(self):
if self.__inifilepath and self.__removeWhenDereferenced:
j.sal.fs.remove(self.__inifilepath)
def __readFile(self):
# content=j.sal.fs.fileGetContents(self.__inifilepath)
fp = None
try:
if self.__inifilepath:
fp = open(self.__inifilepath, "r")
else:
fp = self.__file
return self.__configParser.readfp(fp)
except Exception as err:
print(err)
if fp and not fp.closed:
fp.close()
raise j.exceptions.RuntimeError("Failed to read the inifile \nERROR: %s" % (str(err)))
def getSections(self):
""" Return list of sections from this IniFile"""
try:
return self.__configParser.sections()
except Exception as err:
raise LookupError("Failed to get sections \nERROR: %s" % str(err))
def getParams(self, sectionName):
""" Return list of params in a certain section of this IniFile
@param sectionName: Name of the section for which you wish the param"""
if not self.checkSection(sectionName):
return
try:
return self.__configParser.options(sectionName)
except Exception as err:
raise LookupError("Failed to get parameters under the specified section: %s \nERROR: %s" %
(sectionName, str(err)))
def checkSection(self, sectionName):
""" Boolean indicating whether section exists in this IniFile
@param sectionName: name of the section"""
try:
return self.__configParser.has_section(sectionName)
except Exception as err:
raise ValueError('Failed to check if the specified section: %s exists \nERROR: %s' %
(sectionName, str(err)))
def checkParam(self, sectionName, paramName):
"""Boolean indicating whether parameter exists under this section in the IniFile
@param sectionName: name of the section where the param should be located
@param paramName: name of the parameter you wish to check"""
try:
return self.__configParser.has_option(sectionName, paramName)
except Exception as e:
raise ValueError('Failed to check if the parameter: %s under section: %s exists \nERROR: %s' %
(paramName, sectionName, str(e)))
def getValue(self, sectionName, paramName, raw=False, default=None):
""" Get value of the parameter from this IniFile
@param sectionName: name of the section
@param paramName: name of the parameter
@param raw: boolean specifying whether you wish the value to be returned raw
@param default: if given and the value does not exist the default value will be given
@return: The value"""
if default is not None and not self.checkParam(sectionName, paramName):
return default
try:
result = self.__configParser.get(sectionName, paramName, raw=raw)
self.logger.info("Inifile: get %s:%s from %s, result:%s" %
(sectionName, paramName, self.__inifilepath, result))
return result
except Exception as err:
raise LookupError('Failed to get value of the parameter: %s under section: %s in file %s.\nERROR: %s' % (
paramName, sectionName, self.__inifilepath, str(err)))
def getBooleanValue(self, sectionName, paramName):
"""Get boolean value of the specified parameter
@param sectionName: name of the section
@param paramName: name of the parameter"""
try:
result = self.__configParser.getboolean(sectionName, paramName)
self.logger.info("Inifile: get boolean %s:%s from %s, result:%s" %
(sectionName, paramName, self.__inifilepath, result))
return result
except Exception as e:
raise LookupError('Inifile: Failed to get boolean value of parameter:%s under section:%s \nERROR: %s' % (
paramName, sectionName, e))
def getIntValue(self, sectionName, paramName):
"""Get an integer value of the specified parameter
@param sectionName: name of the section
@param paramName: name of the parameter"""
try:
result = self.__configParser.getint(sectionName, paramName)
self.logger.info("Inifile: get integer %s:%s from %s, result:%s" %
(sectionName, paramName, self.__inifilepath, result))
return result
except Exception as e:
raise LookupError('Failed to get integer value of parameter: %s under section: %s\nERROR: %s' %
(paramName, sectionName, e))
def getFloatValue(self, sectionName, paramName):
"""Get float value of the specified parameter
@param sectionName: name of the section
@param paramName: name of the parameter"""
try:
result = self.__configParser.getfloat(sectionName, paramName)
self.logger.info("Inifile: get integer %s:%s from %s, result:%s" %
(sectionName, paramName, self.__inifilepath, result))
return result
except Exception as e:
raise LookupError('Failed to get float value of parameter:%s under section:%s \nERROR: %' %
(paramName, sectionName, e))
def addSection(self, sectionName):
""" Add a new section to this Inifile. If it already existed, silently pass
@param sectionName: name of the section"""
try:
if(self.checkSection(sectionName)):
return
self.logger.info("Inifile: add section %s to %s" % (sectionName, self.__inifilepath))
self.__configParser.add_section(sectionName)
if self.checkSection(sectionName):
return True
except Exception as err:
raise j.exceptions.RuntimeError(
'Failed to add section with sectionName: %s \nERROR: %s' % (sectionName, str(err)))
def addParam(self, sectionName, paramName, newvalue):
""" Add name-value pair to section of IniFile
@param sectionName: name of the section
@param paramName: name of the parameter
@param newValue: value you wish to assign to the parameter"""
try:
if str(newvalue) == "none":
newvalue == "*NONE*"
self.__configParser.set(sectionName, paramName, str(newvalue))
self.logger.info("Inifile: set %s:%s=%s on %s" %
(sectionName, paramName, str(newvalue), self.__inifilepath))
# if self.checkParam(sectionName, paramName):
# return True
self.write()
return False
except Exception as err:
raise j.exceptions.RuntimeError('Failed to add parameter with sectionName: %s, parameterName: %s, value: %s \nERROR: %s' % (
sectionName, paramName, newvalue, str(err)))
def setParam(self, sectionName, paramName, newvalue):
""" Add name-value pair to section of IniFile
@param sectionName: name of the section
@param paramName: name of the parameter
@param newValue: value you wish to assign to the parameter"""
self.addParam(sectionName, paramName, newvalue)
def removeSection(self, sectionName):
""" Remove a section from this IniFile
@param sectionName: name of the section"""
if not self.checkSection(sectionName):
return False
try:
self.__configParser.remove_section(sectionName)
self.logger.info("inifile: remove section %s on %s" % (sectionName, self.__inifilepath))
if self.checkSection(sectionName):
return False
return True
except Exception as err:
raise j.exceptions.RuntimeError('Failed to remove section %s with \nERROR: %s' % (sectionName, str(err)))
def removeParam(self, sectionName, paramName):
""" Remove a param from this IniFile
@param sectionName: name of the section
@param paramName: name of the parameter"""
if not self.checkParam(sectionName, paramName):
return False
try:
self.__configParser.remove_option(sectionName, paramName)
self.logger.info("Inifile:remove %s:%s from %s" % (sectionName, paramName, self.__inifilepath))
return True
except Exception as err:
raise j.exceptions.RuntimeError(
'Failed to remove parameter: %s under section: %s \nERROR: %s' % (paramName, sectionName, str(err)))
def write(self, filePath=None):
""" Write the IniFile content to disk
This completely overwrites the file
@param filePath: location where the file will be written
"""
closeFileHandler = True
fp = None
self.logger.info("Inifile: Write configfile %s to disk" % (self.__inifilepath))
if not filePath:
if self.__inifilepath: # Use the inifilepath that was set in the constructor
filePath = self.__inifilepath
elif self.__file: # write to the file-like object that was set in the constructor
closeFileHandler = False # We don't want to close this object
fp = self.__file
fp.seek(0)
fp.truncate() # Clear the file-like object before writing to it
else: # Nothing to write to
raise Exception("No filepath to write to")
try:
if not fp:
j.sal.fs.lock(filePath)
fp = open(filePath, 'w') # Completely overwrite the file.
self.__configParser.write(fp)
fp.flush()
if closeFileHandler:
fp.close()
j.sal.fs.unlock_(filePath)
except Exception as err:
if fp and closeFileHandler and not fp.closed:
fp.close()
j.sal.fs.unlock_(filePath)
raise j.exceptions.RuntimeError("Failed to update the inifile at '%s'\nERROR: %s\n" % (filePath, str(err)))
def getContent(self):
""" Get the Inifile content to a string
"""
# TODO: jumpscale primitives should be used (no fp...)
fp = None
if self.__file and not self.__file.closed:
fp = self.__file
fp.seek(0)
fp.truncate()
else:
try:
from io import StringIO
except ImportError:
from io import StringIO
fp = StringIO()
self.__configParser.write(fp)
fp.seek(0)
return fp.read()
def getSectionAsDict(self, section):
retval = {}
for key in self.getParams(section):
retval[key] = self.getValue(section, key)
return retval
def getFileAsDict(self):
retval = {}
for section in self.getSections():
retval[section] = self.getSectionAsDict(section)
return retval
|
|
import json
import logging
import string
from sqlalchemy.sql import text
from marshmallow import validate
from rdr_service.code_constants import PPI_SYSTEM
from rdr_service.dao.resource_dao import ResourceDataDao
from rdr_service.resource import fields
# TODO: Rework these from BigQuery schemas to resource schemas...
class _QuestionnaireSchema:
"""
Helper for dynamically generating a Schema for a specific questionnaire
"""
_module = ''
_excluded_fields = ()
_errors = list()
def __init__(self, module_name, excluded_fields=None, *args, **kwargs):
"""
:param module_name: Name of questionnaire module.
:param excluded_fields: A list of excluded fields.
"""
self._module = module_name
if excluded_fields:
self._excluded_fields = excluded_fields
super().__init__(*args, **kwargs)
def get_module_name(self):
""" Return the questionnaire module name """
return self._module
@staticmethod
def field_name_is_valid(name):
"""
Check that the field name meets naming requirements.
:param name: field name to check
:return: True if valid otherwise False, error message.
"""
# Check and make sure there are no other characters that are not allowed.
# Fields must contain only letters, numbers, and underscores, start with a letter or underscore,
# and be at most 128 characters long.
allowed_chars = string.ascii_letters + string.digits + '_'
if not all(c in allowed_chars for c in name):
return False, f'Field {name} contains invalid characters, skipping.'
if len(name) > 64:
return False, f'Field {name} must be less than 64 characters, skipping.'
if name[:1] not in string.ascii_letters and name[:1] not in string.digits and name[:1] != '_':
return False, f'Field {name} must start with a character, digit or underscore, skipping.'
return True, ''
def module_fields(self):
"""
Look up questionnaire concept to get fields.
:return: dict of fields
"""
# Standard fields that must be in every questionnaire schema.
_schema = {
'id': fields.Int64(required=True),
'created': fields.DateTime(required=True),
'modified': fields.DateTime(required=True),
'authored': fields.DateTime(),
'language': fields.String(validate=validate.Length(max=2)),
'participant_id': fields.String(validate=validate.Length(max=10), required=True),
'questionnaire_response_id': fields.Int32(required=True),
'questionnaire_id': fields.Int32(required=True),
'external_id': fields.String(validate=validate.Length(max=100)),
'status': fields.String(validate=validate.Length(max=50)),
'status_id': fields.Int64(),
'test_participant': fields.Int64()
}
dao = ResourceDataDao(backup=True)
# DEPRECATED after RDR 1.85.2: Load module field data from the code table if available, using stored proc
# results = dao.call_proc('sp_get_code_module_items', args=[self._module])
# This query replaces the sp_get_code_module_items stored procedure, which does not support the
# DRC-managed codebooks where codes may be shared between modules. Columns are returned in the
# same order as the stored procedure returned them (as a debug aid for comparing results).
_question_codes_sql = """
select c.code_id,
c.parent_id,
c.topic,
c.code_type,
c.value,
c.display,
c.system,
c.mapped,
c.created,
c.code_book_id,
c.short_value
from code c
inner join (
select distinct qq.code_id
from questionnaire_question qq where qq.questionnaire_id in (
select qc.questionnaire_id from questionnaire_concept qc
where qc.code_id = (
select code_id from code c2 where c2.value = :module_id and system = :system
)
)
) qq2 on qq2.code_id = c.code_id
where c.system = :system
order by c.code_id;
"""
with dao.session() as session:
results = session.execute(_question_codes_sql, {'module_id': self._module, 'system': PPI_SYSTEM})
if results:
for row in results:
# Verify field name meets BigQuery requirements.
name = row['value']
is_valid, msg = self.field_name_is_valid(name)
if not is_valid:
self._errors.append(msg)
continue
_schema[name] = fields.Text()
# This query makes better use of the indexes.
_sql_term = text("""
select convert(qh.resource using utf8) as resource
from questionnaire_history qh
where qh.questionnaire_id = (
select max(questionnaire_id) as questionnaire_id
from questionnaire_concept qc
inner join code c on qc.code_id = c.code_id
where qc.code_id in (select c1.code_id from code c1 where c1.value = :mod and system = :system)
);
""")
result = session.execute(_sql_term, {'mod': self._module, 'system': PPI_SYSTEM}).first()
if not result:
return _schema
qn_mod = json.loads(result[0])
if 'resourceType' not in qn_mod or 'group' not in qn_mod:
return _schema
for qn in qn_mod['group']['question']:
# To support
# 1) The user supplied answer,
# 2) question skipped or
# 3) user was not given this question.
# We have to store all question responses as Strings in BigQuery.
if not qn['concept'][0].get('code', None):
continue
name = qn['concept'][0]['code']
if name in self._excluded_fields:
continue
# Verify field name meets BigQuery requirements.
is_valid, msg = self.field_name_is_valid(name)
if not is_valid:
logging.warning(msg)
continue
# flag duplicate fields.
found = False
for fld in _schema:
if fld['name'].lower() == name.lower():
found = True
break
if not found:
_schema[name] = fields.Text()
# There seems to be duplicate column definitions we need to remove in some of the modules.
# tmpflds = [i for n, i in enumerate(fields) if i not in fields[n + 1:]]
# return tmpflds
return _schema
#
# ConsentPII
#
class BQPDRConsentPIISchema(_QuestionnaireSchema):
""" ConsentPII Module """
_module = 'ConsentPII'
_excluded_fields = (
'ConsentPII_PIIName',
'PIIName_First',
'PIIName_Middle',
'PIIName_Last',
'ConsentPII_PIIAddress',
'PIIAddress_StreetAddress',
'PIIAddress_StreetAddress2',
'StreetAddress_PIICity',
'PIIContactInformation_Phone',
'ConsentPII_EmailAddress',
'EHRConsentPII_Signature',
'ExtraConsent_CABoRSignature',
'ExtraConsent_Signature',
'ConsentPII_HelpWithConsentSignature',
'PIIContactInformation_VerifiedPrimaryPhoneNumber',
'PIIContactInformation_Email',
'PIIBirthInformation_BirthDate',
'ConsentPII_VerifiedPrimaryPhoneNumber'
)
#
# TheBasics
#
class TheBasicsSchema(_QuestionnaireSchema):
""" TheBasics Module """
_module = 'TheBasics'
_excluded_fields = (
'TheBasics_CountryBornTextBox',
'RaceEthnicityNoneOfThese_RaceEthnicityFreeTextBox',
'WhatTribeAffiliation_FreeText',
'AIANNoneOfTheseDescribeMe_AIANFreeText',
'NoneOfTheseDescribeMe_AsianFreeText',
'BlackNoneOfTheseDescribeMe_BlackFreeText',
'MENANoneOfTheseDescribeMe_MENAFreeText',
'NHPINoneOfTheseDescribeMe_NHPIFreeText',
'WhiteNoneOfTheseDescribeMe_WhiteFreeText',
'HispanicNoneOfTheseDescribeMe_HispanicFreeText',
'SpecifiedGender_SpecifiedGenderTextBox',
'SomethingElse_SexualitySomethingElseTextBox',
'SexAtBirthNoneOfThese_SexAtBirthTextBox',
'LivingSituation_LivingSituationFreeText',
'SocialSecurity_SocialSecurityNumber',
'SecondaryContactInfo_FirstContactsInfo',
'SecondaryContactInfo_PersonOneFirstName',
'SecondaryContactInfo_PersonOneMiddleInitial',
'SecondaryContactInfo_PersonOneLastName',
'SecondaryContactInfo_PersonOneAddressOne',
'SecondaryContactInfo_PersonOneAddressTwo',
'SecondaryContactInfo_PersonOneEmail',
'SecondaryContactInfo_PersonOneTelephone',
'PersonOneAddress_PersonOneAddressCity',
'SecondaryContactInfo_SecondContactsFirstName',
'SecondaryContactInfo_SecondContactsMiddleInitial',
'SecondaryContactInfo_SecondContactsLastName',
'SecondaryContactInfo_SecondContactsAddressOne',
'SecondaryContactInfo_SecondContactsAddressTwo',
'SecondContactsAddress_SecondContactCity',
'SecondaryContactInfo_SecondContactsEmail',
'SecondaryContactInfo_SecondContactsNumber',
'EmploymentWorkAddress_AddressLineOne',
'EmploymentWorkAddress_AddressLineTwo',
'EmploymentWorkAddress_City',
'EmploymentWorkAddress_Country',
'PersonOneAddress_PersonOneAddressZipCode',
'SecondContactsAddress_SecondContactZipCode',
'PersonOneAddress_PersonOneAddressZipCode',
'SecondContactsAddress_SecondContactZipCode',
'OtherHealthPlan_FreeText'
)
#
# Lifestyle
#
class BQPDRLifestyleSchema(_QuestionnaireSchema):
""" Lifestyle Module """
_module = 'Lifestyle'
_excluded_fields = (
'OtherSpecify_OtherDrugsTextBox'
)
#
# OverallHealthSchema
#
class BQPDROverallHealthSchema(_QuestionnaireSchema):
""" OverallHealth Module """
_module = 'OverallHealth'
_excluded_fields = (
'OrganTransplantDescription_OtherOrgan',
'OrganTransplantDescription_OtherTissue',
'OutsideTravel6Month_OutsideTravel6MonthWhereTraveled',
)
#
# EHRConsentPII
#
class BQPDREHRConsentPIISchema(_QuestionnaireSchema):
""" EHRConsentPII Module """
_module = 'EHRConsentPII'
_excluded_fields = (
'EHRConsentPII_Signature',
'EHRConsentPII_ILHIPPAWitnessSignature',
'EHRConsentPII_HelpWithConsentSignature',
'12MoEHRConsentPII_EmailCopy',
'30MoEHRConsentPII_EmailCopy'
)
#
# DVEHRSharing
#
class BQPDRDVEHRSharingSchema(_QuestionnaireSchema):
""" DVEHRSharing Module """
_module = 'DVEHRSharing'
_excluded_fields = (
'EHRConsentPII_Signature',
)
#
# FamilyHistory
#
class BQPDRFamilyHistorySchema(_QuestionnaireSchema):
""" FamilyHistory Module """
_module = 'FamilyHistory'
_excluded_fields = (
'DaughterDiagnosisHistory_WhichConditions',
'OtherCancer_DaughterFreeTextBox',
'OtherCancer_SonFreeTextBox',
'OtherCondition_DaughterFreeTextBox',
'OtherCondition_SonFreeTextBox',
'SonDiagnosisHistory_WhichConditions',
'OtherCancer_GrandparentFreeTextBox',
'OtherCondition_GrandparentFreeTextBox',
'FatherDiagnosisHistory_WhichConditions',
'MotherDiagnosisHistory_WhichConditions',
'OtherCancer_FatherFreeTextBox',
'OtherCancer_MotherFreeTextBox',
'OtherCondition_FatherFreeTextBox',
'OtherCondition_MotherFreeTextBox',
'OtherCancer_SiblingFreeTextBox',
'OtherCondition_SiblingFreeTextBox',
'SiblingDiagnosisHistory_WhichConditions',
)
#
# HealthcareAccess
#
class BQPDRHealthcareAccessSchema(_QuestionnaireSchema):
""" HealthcareAccess Module """
_module = 'HealthcareAccess'
_excluded_fields = (
'OtherDelayedMedicalCare_FreeText',
'OtherInsuranceType_FreeText',
)
#
# PersonalMedicalHistory
#
class BQPDRPersonalMedicalHistorySchema(_QuestionnaireSchema):
""" PersonalMedicalHistory Module """
_module = 'PersonalMedicalHistory'
_excluded_fields = (
'OtherHeartorBloodCondition_FreeTextBox',
'OtherRespiratory_FreeTextBox',
'OtherCancer_FreeTextBox',
'OtherDigestiveCondition_FreeTextBox',
'OtherDiabetes_FreeTextBox',
'OtherHormoneEndocrine_FreeTextBox',
'OtherThyroid_FreeTextBox',
'OtherKidneyCondition_FreeTextBox',
'OtherBoneJointMuscle_FreeTextBox',
'OtherArthritis_FreeTextBox',
'OtherHearingEye_FreeTextBox',
'OtherInfectiousDisease_FreeTextBox',
'OtherBrainNervousSystem_FreeTextBox',
'OtherMentalHealthSubstanceUse_FreeTextBox',
'OtherDiagnosis_FreeTextBox',
)
#
# COPE May Survey
#
class BQPDRCOPEMaySchema(_QuestionnaireSchema):
""" COPE Module """
_module = 'COPE'
_excluded_fields = ()
#
# COPE Nov Survey
#
class BQPDRCOPENovSchema(_QuestionnaireSchema):
""" COPE Module """
_module = 'cope_nov'
_excluded_fields = ()
#
# COPE Dec Survey
#
class BQPDRCOPEDecSchema(_QuestionnaireSchema):
""" COPE Module """
_module = 'cope_dec'
_excluded_fields = ()
#
# COPE Feb Survey
#
class BQPDRCOPEFebSchema(_QuestionnaireSchema):
""" COPE Module """
_module = 'cope_feb'
_excluded_fields = ()
|
|
import json
import requests
import sys
import siftpartner
from . import version
from . import response
API_URL = "https://partner.siftscience.com/v%s" % version.API_VERSION
API_TIMEOUT = 2
class Client(object):
def __init__(self, api_key=None, partner_id=None):
""" Initialize the client
:param api_key: Your Sift Science Partner API key associated with your
partner account. This can be found at
https://siftscience.com//console/api-keys
:param partner_id: Your partner account id, which can be found at
https://siftscience.com/console/settings
"""
if sys.version_info.major < 3:
self.UNICODE_STRING = basestring
else:
self.UNICODE_STRING = str
# set api key to module scoped key if not specified
if api_key is None:
api_key = siftpartner.api_key
# set partner id to module scoped key if not specified
if partner_id is None:
partner_id = siftpartner.partner_id
self.validate_argument(api_key, 'API key', self.UNICODE_STRING)
self.validate_argument(partner_id, 'Partner ID', self.UNICODE_STRING)
self.api_key = api_key
self.partner_id = partner_id
@staticmethod
def user_agent():
return 'SiftScience/v%s sift-partner-python/%s' % (version.API_VERSION,
version.VERSION)
def accounts_url(self):
return API_URL + "/partners/%s/accounts" % self.partner_id
def notifications_config_url(self):
return API_URL + "/accounts/%s/config" % self.partner_id
def validate_argument(self, argument, name, arg_type):
if not isinstance(argument, arg_type) or (
isinstance(argument, self.UNICODE_STRING)
and len(argument.strip()) == 0
):
raise RuntimeError(name + " must be a " + str(arg_type))
def new_account(self, site_url, site_email, analyst_email, password):
""" Creates a new merchant account
:param site_url: the url of the merchant site
:param site_email: an email address for the merchant
:param analyst_email: an email address which will be used to log in
at the Sift Console
:param password: password (at least 10 chars) to be used to sign into
the Console
:return:When successful, returns a dict including the new account id
and credentials.
When an error occurs, The exception is raised.
"""
self.validate_argument(site_url, 'Site url', self.UNICODE_STRING)
self.validate_argument(site_email, 'Site email', self.UNICODE_STRING)
self.validate_argument(analyst_email, 'Analyst email',
self.UNICODE_STRING)
self.validate_argument(password, 'Password', self.UNICODE_STRING)
properties = {'site_url': site_url,
'site_email': site_email,
'analyst_email': analyst_email,
'password': password
}
headers = {'Content-Type': 'application/json',
'Authorization': 'Basic ' + self.api_key,
'User-Agent': self.user_agent()
}
params = {}
try:
res = requests.post(self.accounts_url(),
data=json.dumps(properties),
headers=headers,
timeout=API_TIMEOUT,
params=params
)
return response.Response(res)
except requests.exceptions.RequestException as e:
raise e
def get_accounts(self, next_ref = None):
"""Gets a listing of the ids and keys for merchant accounts that
have been created by this partner. Results limited to 100 accounts
per request.
:return: When successful, returns a dict including the key data, which
is an array of account descriptions. (Each element has the
same structure as a single response from new_account). If
has_more is true, pass the value of next_ref back into this
function to get the next set of results.
When an error occurs, an exception is raised.
"""
headers = {'Authorization': 'Basic ' + self.api_key,
'User-Agent': self.user_agent()
}
try:
res = requests.get(self.accounts_url() if not next_ref else next_ref,
headers=headers,
timeout=API_TIMEOUT
)
return response.Response(res)
except requests.exceptions.RequestException as e:
raise e
def update_notification_config(
self,
notification_url = None,
notification_threshold = None
):
""" Updates the configuration which controls http notifications for
all merchant accounts under this partner.
:param notification_url: A String which determines the url to which
the POST notifications go,containing the string '%s' exactly
once. This allows the url to beused as a template, into which a
merchant account id can be substituted.
:param notification_threshold: A floating point number between 0.0 and
1.0, determining the score threshold at which to push
notifications. It represents the Sift Score/100
:return: When successful, a dict is returned containing the new
notification configuration.
When an error occurs, an exception is raised.
DEPRECATED USE:
notification_url may also be a Hash, with keys
http_notification_url and http_notification_threshold.
The value of the notification_url will be a url containing the
string '%s' exactly once. This allows the url to be used as a
template, into which a merchant account id can be substituted.
The notification threshold should be a floating point number
between 0.0 and 1.0
"""
properties = {}
# This is for backwards compatibility....DEPRECATED
if isinstance(notification_url, dict):
properties = notification_url
# This is for support of the new way of doing things
else:
# for each of the parameters, only set them if they are not None
if notification_url is not None:
self.validate_argument(notification_url,
'Notification url',
self.UNICODE_STRING
)
properties['http_notification_url'] = notification_url
if notification_threshold is not None:
self.validate_argument(notification_threshold,
'Notification threshold',
float
)
properties['http_notification_threshold'] = notification_threshold
headers = {'Content-Type': 'application/json',
'Authorization': 'Basic ' + self.api_key,
'User-Agent': self.user_agent()
}
try:
res = requests.put(self.notifications_config_url(),
data=json.dumps(properties),
headers=headers,
timeout=API_TIMEOUT
)
return response.Response(res)
except requests.exceptions.RequestException as e:
raise e
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
"storageInsightName": _SERIALIZER.url("storage_insight_name", storage_insight_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
"storageInsightName": _SERIALIZER.url("storage_insight_name", storage_insight_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-08-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
"storageInsightName": _SERIALIZER.url("storage_insight_name", storage_insight_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_list_by_workspace_request(
resource_group_name: str,
workspace_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class StorageInsightConfigsOperations(object):
"""StorageInsightConfigsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.loganalytics.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
parameters: "_models.StorageInsight",
**kwargs: Any
) -> "_models.StorageInsight":
"""Create or update a storage insight.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:param parameters: The parameters required to create or update a storage insight.
:type parameters: ~azure.mgmt.loganalytics.models.StorageInsight
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageInsight, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.StorageInsight
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsight"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'StorageInsight')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
storage_insight_name=storage_insight_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('StorageInsight', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('StorageInsight', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
**kwargs: Any
) -> "_models.StorageInsight":
"""Gets a storage insight instance.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageInsight, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.StorageInsight
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsight"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
storage_insight_name=storage_insight_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageInsight', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
workspace_name: str,
storage_insight_name: str,
**kwargs: Any
) -> None:
"""Deletes a storageInsightsConfigs resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
storage_insight_name=storage_insight_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> Iterable["_models.StorageInsightListResult"]:
"""Lists the storage insight instances within a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageInsightListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.loganalytics.models.StorageInsightListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsightListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_workspace_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("StorageInsightListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs'} # type: ignore
|
|
#!/usr/bin/env python3
import datetime
import os
import time
from collections import namedtuple
from typing import Dict, Optional, Tuple
import psutil
from smbus2 import SMBus
import cereal.messaging as messaging
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.hardware import EON, HARDWARE, TICI
from common.numpy_fast import clip, interp
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.loggerd.config import get_available_percent
from selfdrive.pandad import get_expected_signature
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import (PowerMonitoring,
get_battery_capacity,
get_battery_current,
get_battery_status,
get_battery_voltage,
get_usb_present)
from selfdrive.version import get_git_branch, terms_version, training_version
ThermalConfig = namedtuple('ThermalConfig', ['cpu', 'gpu', 'mem', 'bat', 'ambient'])
FW_SIGNATURE = get_expected_signature()
ThermalStatus = log.ThermalData.ThermalStatus
NetworkType = log.ThermalData.NetworkType
NetworkStrength = log.ThermalData.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
CPU_TEMP_TAU = 5. # 5s time constant
DAYS_NO_CONNECTIVITY_MAX = 7 # do not allow to engage after a week without internet
DAYS_NO_CONNECTIVITY_PROMPT = 4 # send an offroad prompt after 4 days with no internet
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
LEON = False
last_eon_fan_val = None
def get_thermal_config():
# (tz, scale)
if EON:
return ThermalConfig(cpu=((5, 7, 10, 12), 10), gpu=((16,), 10), mem=(2, 10), bat=(29, 1000), ambient=(25, 1))
elif TICI:
return ThermalConfig(cpu=((1, 2, 3, 4, 5, 6, 7, 8), 1000), gpu=((48,49), 1000), mem=(15, 1000), bat=(None, 1), ambient=(70, 1000))
else:
return ThermalConfig(cpu=((None,), 1), gpu=((None,), 1), mem=(None, 1), bat=(None, 1), ambient=(None, 1))
def read_tz(x):
if x is None:
return 0
try:
with open("/sys/devices/virtual/thermal/thermal_zone%d/temp" % x) as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('thermal')
dat.thermal.cpu = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.thermal.gpu = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.thermal.mem = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.thermal.ambient = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.thermal.bat = read_tz(thermal_config.bat[0]) / thermal_config.bat[1]
return dat
def setup_eon_fan():
global LEON
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
bus = SMBus(7, force=True)
try:
bus.write_byte_data(0x21, 0x10, 0xf) # mask all interrupts
bus.write_byte_data(0x21, 0x03, 0x1) # set drive current and global interrupt disable
bus.write_byte_data(0x21, 0x02, 0x2) # needed?
bus.write_byte_data(0x21, 0x04, 0x4) # manual override source
except IOError:
print("LEON detected")
LEON = True
bus.close()
def set_eon_fan(val):
global LEON, last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
if LEON:
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
#bus.write_i2c_block_data(0x67, 0x45, [1<<2])
else:
#bus.write_i2c_block_data(0x67, 0x45, [0])
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val - 1) << 6])
else:
bus.write_byte_data(0x21, 0x04, 0x2)
bus.write_byte_data(0x21, 0x03, (val*2)+1)
bus.write_byte_data(0x21, 0x04, 0x4)
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
# max fan speed only allowed if battery is hot
_BAT_TEMP_THRESHOLD = 45.
def handle_fan_eon(max_cpu_temp, bat_temp, fan_speed, ignition):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
if bat_temp < _BAT_TEMP_THRESHOLD:
# no max fan speed unless battery is hot
fan_speed = min(fan_speed, _FAN_SPEEDS[-2])
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(max_cpu_temp, bat_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(30, new_speed)
return new_speed
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def thermald_thread():
health_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected health frequency
# now loop
thermal_sock = messaging.pub_sock('thermal')
health_sock = messaging.sub_sock('health', timeout=health_timeout)
location_sock = messaging.sub_sock('gpsLocation')
fan_speed = 0
count = 0
startup_conditions = {
"ignition": False,
}
startup_conditions_prev = startup_conditions.copy()
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
current_branch = get_git_branch()
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
cpu_temp_filter = FirstOrderFilter(0., CPU_TEMP_TAU, DT_TRML)
health_prev = None
should_start_prev = False
handle_fan = None
is_uno = False
has_relay = False
params = Params()
pm = PowerMonitoring()
no_panda_cnt = 0
thermal_config = get_thermal_config()
while 1:
health = messaging.recv_sock(health_sock, wait=True)
location = messaging.recv_sock(location_sock)
location = location.gpsLocation if location else None
msg = read_thermal(thermal_config)
if health is not None:
usb_power = health.health.usbPowerMode != log.HealthData.UsbPowerMode.client
# If we lose connection to the panda, wait 5 seconds before going offroad
if health.health.hwType == log.HealthData.HwType.unknown:
no_panda_cnt += 1
if no_panda_cnt > DISCONNECT_TIMEOUT / DT_TRML:
if startup_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
startup_conditions["ignition"] = False
else:
no_panda_cnt = 0
startup_conditions["ignition"] = health.health.ignitionLine or health.health.ignitionCan
# Setup fan handler on first connect to panda
if handle_fan is None and health.health.hwType != log.HealthData.HwType.unknown:
is_uno = health.health.hwType == log.HealthData.HwType.uno
has_relay = health.health.hwType in [log.HealthData.HwType.blackPanda, log.HealthData.HwType.uno, log.HealthData.HwType.dos]
if (not EON) or is_uno:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
# Handle disconnect
if health_prev is not None:
if health.health.hwType == log.HealthData.HwType.unknown and \
health_prev.health.hwType != log.HealthData.HwType.unknown:
params.panda_disconnect()
health_prev = health
# get_network_type is an expensive call. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
network_strength = HARDWARE.get_network_strength(network_type)
except Exception:
cloudlog.exception("Error getting network status")
msg.thermal.freeSpace = get_available_percent(default=100.0) / 100.0
msg.thermal.memUsedPercent = int(round(psutil.virtual_memory().percent))
msg.thermal.cpuPerc = int(round(psutil.cpu_percent()))
msg.thermal.networkType = network_type
msg.thermal.networkStrength = network_strength
msg.thermal.batteryPercent = get_battery_capacity()
msg.thermal.batteryStatus = get_battery_status()
msg.thermal.batteryCurrent = get_battery_current()
msg.thermal.batteryVoltage = get_battery_voltage()
msg.thermal.usbOnline = get_usb_present()
# Fake battery levels on uno for frame
if (not EON) or is_uno:
msg.thermal.batteryPercent = 100
msg.thermal.batteryStatus = "Charging"
msg.thermal.bat = 0
current_filter.update(msg.thermal.batteryCurrent / 1e6)
# TODO: add car battery voltage check
max_cpu_temp = cpu_temp_filter.update(max(msg.thermal.cpu))
max_comp_temp = max(max_cpu_temp, msg.thermal.mem, max(msg.thermal.gpu))
bat_temp = msg.thermal.bat
if handle_fan is not None:
fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed, startup_conditions["ignition"])
msg.thermal.fanSpeed = fan_speed
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
# We only do this if there is a relay that prevents the car from faulting
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if max_cpu_temp > 107. or bat_temp >= 63. or (has_relay and is_offroad_for_5_min and max_cpu_temp > 70.0):
# onroad not allowed
thermal_status = ThermalStatus.danger
elif max_comp_temp > 96.0 or bat_temp > 60.:
# hysteresis between onroad not allowed and engage not allowed
thermal_status = clip(thermal_status, ThermalStatus.red, ThermalStatus.danger)
elif max_cpu_temp > 94.0:
# hysteresis between engage not allowed and uploader not allowed
thermal_status = clip(thermal_status, ThermalStatus.yellow, ThermalStatus.red)
elif max_cpu_temp > 80.0:
# uploader not allowed
thermal_status = ThermalStatus.yellow
elif max_cpu_temp > 75.0:
# hysteresis between uploader not allowed and all good
thermal_status = clip(thermal_status, ThermalStatus.green, ThermalStatus.yellow)
else:
# all good
thermal_status = ThermalStatus.green
# **** starting logic ****
# Check for last update time and display alerts if needed
now = datetime.datetime.utcnow()
# show invalid date/time alert
startup_conditions["time_valid"] = now.year >= 2019
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
# Show update prompt
try:
last_update = datetime.datetime.fromisoformat(params.get("LastUpdateTime", encoding='utf8'))
except (TypeError, ValueError):
last_update = now
dt = now - last_update
update_failed_count = params.get("UpdateFailedCount")
update_failed_count = 0 if update_failed_count is None else int(update_failed_count)
last_update_exception = params.get("LastUpdateException", encoding='utf8')
if update_failed_count > 15 and last_update_exception is not None:
if current_branch in ["release2", "dashcam"]:
extra_text = "Ensure the software is correctly installed"
else:
extra_text = last_update_exception
set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
set_offroad_alert_if_changed("Offroad_UpdateFailed", True, extra_text=extra_text)
elif dt.days > DAYS_NO_CONNECTIVITY_MAX and update_failed_count > 1:
set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", True)
elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT:
remaining_time = str(max(DAYS_NO_CONNECTIVITY_MAX - dt.days, 0))
set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", True, extra_text=f"{remaining_time} days.")
else:
set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
startup_conditions["not_uninstalling"] = not params.get("DoUninstall") == b"1"
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
completed_training = params.get("CompletedTrainingVersion") == training_version
panda_signature = params.get("PandaFirmware")
startup_conditions["fw_version_match"] = (panda_signature is None) or (panda_signature == FW_SIGNATURE) # don't show alert is no panda is connected (None)
set_offroad_alert_if_changed("Offroad_PandaFirmwareMismatch", (not startup_conditions["fw_version_match"]))
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.thermal.freeSpace > 0.02
startup_conditions["completed_training"] = completed_training or (current_branch in ['dashcam', 'dashcam-staging'])
startup_conditions["not_driver_view"] = not params.get("IsDriverViewEnabled") == b"1"
startup_conditions["not_taking_snapshot"] = not params.get("IsTakingSnapshot") == b"1"
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
startup_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not startup_conditions["device_temp_good"]))
should_start = all(startup_conditions.values())
if should_start:
if not should_start_prev:
params.delete("IsOffroad")
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
os.system('echo performance > /sys/class/devfreq/soc:qcom,cpubw/governor')
else:
if startup_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions)
if should_start_prev or (count == 0):
params.put("IsOffroad", "1")
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
os.system('echo powersave > /sys/class/devfreq/soc:qcom,cpubw/governor')
# Offroad power monitoring
pm.calculate(health)
msg.thermal.offroadPowerUsage = pm.get_power_used()
msg.thermal.carBatteryCapacity = max(0, pm.get_car_battery_capacity())
# Check if we need to disable charging (handled by boardd)
msg.thermal.chargingDisabled = pm.should_disable_charging(health, off_ts)
# Check if we need to shut down
if pm.should_shutdown(health, off_ts, started_seen, LEON):
cloudlog.info(f"shutting device down, offroad since {off_ts}")
# TODO: add function for blocking cloudlog instead of sleep
time.sleep(10)
os.system('LD_LIBRARY_PATH="" svc power shutdown')
msg.thermal.chargingError = current_filter.x > 0. and msg.thermal.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.thermal.started = started_ts is not None
msg.thermal.startedTs = int(1e9*(started_ts or 0))
msg.thermal.thermalStatus = thermal_status
thermal_sock.send(msg.to_bytes())
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# report to server once per minute
if (count % int(60. / DT_TRML)) == 0:
cloudlog.event("STATUS_PACKET",
count=count,
health=(health.to_dict() if health else None),
location=(location.to_dict() if location else None),
thermal=msg.to_dict())
count += 1
def main():
thermald_thread()
if __name__ == "__main__":
main()
|
|
__author__ = 'leif'
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from treconomics.experiment_functions import get_experiment_context
from treconomics.experiment_functions import log_event
from survey.views import handle_survey
from models import AnitaPreTaskSurveyForm, AnitaPostTask0SurveyForm, AnitaPostTask1SurveyForm, \
AnitaPostTask2SurveyForm, AnitaPostTask3SurveyForm, SnippetPostTaskSurveyForm, SystemSnippetPostTaskSurveyForm, SnippetPreTaskTopicKnowledgeSurveyForm
from models import AnitaDemographicsSurveyForm, AnitaExit1SurveyForm, AnitaExit2SurveyForm, \
AnitaExit3SurveyForm
from models import AnitaConsentForm
from models import MickeyPostTaskSurveyForm, SnippetDemographicsSurveyForm, SnippetExitSurveyForm
from models import BehaveDiversityPostTaskSurveyForm, SystemDiversityPostTaskSurveyForm, DiversityExitSurveyForm
from treconomics.models import TaskDescription, DocumentsExamined
from django.http import JsonResponse
def diversity_end_stats(request, taskid):
"""
"""
ec = get_experiment_context(request)
uname = ec['username']
resp = {}
if 'target' in ec:
resp['target'] = ec['target']
u = User.objects.get(username=uname)
docs = DocumentsExamined.objects.filter(user=u).filter(task=taskid).filter(judgement=1)
resp['marked'] = len(docs)
return JsonResponse(resp)
def handle_task_and_questions_survey(request, taskid, SurveyForm, survey_name, action, template, show_topic=True):
request.session['taskid'] = taskid
ec = get_experiment_context(request)
condition = ec["condition"]
topicnum = ec["topicnum"]
interface = ec["interface"]
diversity = ec["diversity"]
t = TaskDescription.objects.get(topic_num=topicnum)
errors = ""
uname = request.user.username
u = User.objects.get(username=uname)
request.session['diversity'] = diversity
# handle post within this element. save data to survey table,
if request.method == 'POST':
form = SurveyForm(request.POST)
if form.is_valid():
obj = form.save(commit=False)
obj.user = u
obj.task_id = taskid
obj.topic_num = topicnum
obj.condition = condition
obj.interface = interface
obj.diversity = diversity
obj.save()
log_event(event=survey_name.upper() + "_SURVEY_COMPLETED", request=request)
return redirect('next')
else:
print form.errors
errors = form.errors
survey = SurveyForm(request.POST)
else:
log_event(event=survey_name.upper() + "_SURVEY_STARTED", request=request)
survey = SurveyForm()
# if we had a survey questions we could ask them here
# else we can provide a link to a hosted questionnaire
action_url = action + taskid + '/'
# provide link to search interface / next system
context_dict = {'participant': uname,
'condition': condition,
'interface': interface,
'diversity': diversity,
'task': taskid,
'topic': t.topic_num,
'tasktitle': t.title,
'taskdescription': t.description,
'diversify': t.diversify,
'formset': survey,
'action': action_url,
'errors': errors,
'show_topic': show_topic}
return render(request, template, context_dict)
@login_required
def view_alt_pretask_survey(request, taskid):
return handle_task_and_questions_survey(request, taskid, AnitaPreTaskSurveyForm, 'ANITA_PRETASK',
'/treconomics/anitapretasksurvey/', 'survey/anita_pretask_survey.html')
@login_required
def view_alt_posttask0_survey(request, taskid):
return handle_task_and_questions_survey(request, taskid, AnitaPostTask0SurveyForm, 'ANITA_POSTTASK0',
'/treconomics/anitaposttask0survey/', 'survey/anita_posttask_survey.html')
@login_required
def view_alt_posttask1_survey(request, taskid):
return handle_task_and_questions_survey(request, taskid, AnitaPostTask1SurveyForm, 'ANITA_POSTTASK1',
'/treconomics/anitaposttask1survey/', 'survey/anita_posttask_survey.html')
@login_required
def view_anita_posttask2_survey(request, taskid):
return handle_task_and_questions_survey(request, taskid, AnitaPostTask2SurveyForm, 'ANITA_POSTTASK2',
'/treconomics/anitaposttask2survey/', 'survey/anita_posttask_survey.html')
@login_required
def view_alt_posttask3_survey(request, taskid):
return handle_task_and_questions_survey(request, taskid, AnitaPostTask3SurveyForm, 'ANITA_POSTTASK3',
'/treconomics/anitaposttask3survey/', 'survey/anita_posttask_survey.html')
@login_required
def view_alt_demographic_survey(request):
name = 'demographics'
return handle_survey(request, SnippetDemographicsSurveyForm, name, reverse(name),
'survey/demographics_survey.html')
@login_required
def view_snippet_exit_survey(request):
return handle_survey(request, SnippetExitSurveyForm, 'SNIPPET_EXIT', '/treconomics/snippetexitsurvey/',
'survey/anita_exit1_survey.html')
@login_required
def view_alt_exit1_survey(request):
return handle_survey(request, AnitaExit1SurveyForm, 'EXIT1', '/treconomics/anitaexit1survey/',
'survey/anita_exit1_survey.html')
@login_required
def view_alt_exit2_survey(request):
return handle_survey(request, AnitaExit2SurveyForm, 'EXIT2', '/treconomics/anitaexit2survey/',
'survey/anita_exit2_survey.html')
@login_required
def view_alt_exit3_survey(request):
return handle_survey(request, AnitaExit3SurveyForm, 'EXIT3', '/treconomics/anitaexit3survey/',
'survey/anita_exit3_survey.html')
@login_required
def view_consent(request):
ec = get_experiment_context(request)
uname = ec["username"]
condition = ec["condition"]
errors = ""
uname = request.user.username
u = User.objects.get(username=uname)
# handle post within this element. save data to survey table,
if request.method == 'POST':
form = AnitaConsentForm(request.POST)
if form.is_valid():
obj = form.save(commit=False)
obj.user = u
obj.save()
log_event("CONSENT_COMPLETED", request=request)
return redirect('next')
else:
print form.errors
errors = form.errors
survey = AnitaConsentForm(request.POST)
else:
log_event("CONSENT_STARTED", request=request)
survey = AnitaConsentForm()
# provide link to search interface / next system
context_dict = {'participant': uname,
'condition': condition,
'formset': survey,
'action': '/treconomics/consent/',
'errors': errors}
return render(request, 'survey/anita_consent_form.html', context_dict)
#@login_required
#def view_snippet_posttask(request, taskid):
# return handle_task_and_questions_survey(request, taskid, MickeyPostTaskSurveyForm, 'MICKEY_POSTTASK',
# '/treconomics/mickeyposttask/', 'survey/mickey_posttask_survey.html')
@login_required
def view_snippet_posttask(request, taskid):
return handle_task_and_questions_survey(request, taskid, SnippetPostTaskSurveyForm, 'SNIPPET_POSTTASK',
'/treconomics/snippetposttask/', 'survey/snippet_posttask_survey.html')
@login_required
def view_system_snippet_posttask(request, taskid):
return handle_task_and_questions_survey(request, taskid, SystemSnippetPostTaskSurveyForm, 'SYSTEM_SNIPPET_POSTTASK',
'/treconomics/systemsnippetposttask/', 'survey/system_snippet_posttask_survey.html')
@login_required
def view_snippet_pretask(request, taskid):
return handle_task_and_questions_survey(request, taskid, SnippetPreTaskTopicKnowledgeSurveyForm, 'SNIPPET_PRETASK',
'/treconomics/snippetpretask/', 'base/pre_task_with_questions.html')
#### NEW SURVEY VIEWS FOR DIVERSITY
@login_required
def view_diversity_posttask(request, taskid):
# This view is called after the task has been completed.
log_event(request=request, event="TASK_ENDED")
return handle_task_and_questions_survey(request, taskid, BehaveDiversityPostTaskSurveyForm, 'DIVERSITY_POSTTASK',
'/treconomics/diversityposttask/', 'survey/diversity_posttask_survey.html')
@login_required
def view_system_diversity_posttask(request, taskid):
return handle_task_and_questions_survey(request, taskid, SystemDiversityPostTaskSurveyForm, 'SYSTEM_DIVERSITY_POSTTASK',
'/treconomics/systemdiversityposttask/', 'survey/system_diversity_posttask_survey.html')
@login_required
def view_diversity_exit_survey(request):
return handle_survey(request, DiversityExitSurveyForm, 'DIVERSITY_EXIT', '/treconomics/diversityexitsurvey/',
'survey/anita_exit1_survey.html')
|
|
"""Host Reservation DHCPv6"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import srv_msg
import misc
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
def test_v6_host_reservation_all_values_mac():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.host_reservation_in_subnet('hostname',
'reserved-hostname',
0,
'hw-address',
'f6:f5:f4:f3:f2:01')
srv_control.host_reservation_in_subnet_add_value(0, 0, 'ip-address', '3000::100')
srv_control.host_reservation_in_subnet_add_value(0, 0, 'prefixes', '3001::/40')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('qualifying-suffix', 'my.domain.com')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_sets_value('Client', 'FQDN_domain_name', 'some-different-name')
srv_msg.client_sets_value('Client', 'FQDN_flags', 'S')
srv_msg.client_does_include('Client', 'fqdn')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::100')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_suboption_content(26, 25, 'prefix', '3001::')
srv_msg.response_check_include_option(39)
srv_msg.response_check_option_content(39, 'fqdn', 'reserved-hostname.my.domain.com.')
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
def test_v6_host_reservation_all_values_duid():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.host_reservation_in_subnet('hostname',
'reserved-hostname',
0,
'duid',
'00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.host_reservation_in_subnet_add_value(0, 0, 'ip-address', '3000::100')
srv_control.host_reservation_in_subnet_add_value(0, 0, 'prefixes', '3001::/40')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('qualifying-suffix', 'my.domain.com')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_sets_value('Client', 'FQDN_domain_name', 'some-different-name')
srv_msg.client_sets_value('Client', 'FQDN_flags', 'S')
srv_msg.client_does_include('Client', 'fqdn')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::100')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_suboption_content(26, 25, 'prefix', '3001::')
srv_msg.response_check_include_option(39)
srv_msg.response_check_option_content(39, 'fqdn', 'reserved-hostname.my.domain.com.')
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
def test_v6_host_reservation_all_values_duid_2():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_prefix('2001:db8:1::', 0, 32, 33)
srv_control.host_reservation_in_subnet('hostname',
'reserved-hostname',
0,
'duid',
'00:03:00:01:f6:f5:f4:f3:f2:01')
srv_control.host_reservation_in_subnet_add_value(0, 0, 'ip-address', '3000::100')
srv_control.host_reservation_in_subnet_add_value(0, 0, 'prefixes', '2001:db8:1::/40')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('qualifying-suffix', 'my.domain.com')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:01:00:01:52:7b:a8:f0:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_sets_value('Client', 'DUID', '00:01:00:01:52:7b:a8:f0:f6:f5:f4:f3:f2:01')
srv_msg.client_sets_value('Client', 'FQDN_domain_name', 'some-different-name')
srv_msg.client_sets_value('Client', 'FQDN_flags', 'S')
srv_msg.client_does_include('Client', 'fqdn')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::100', expect_include=False)
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_suboption_content(26, 25, 'prefix', '2001:db8:1::', expect_include=False)
srv_msg.response_check_include_option(39)
srv_msg.response_check_option_content(39, 'fqdn', 'reserved-hostname.my.domain.com.', expect_include=False)
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
def test_v6_host_reservation_classes_1():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::1')
srv_control.add_line_to_subnet(0, {"reservations": [{"duid": "00:03:00:01:f6:f5:f4:f3:f2:22",
"client-classes": ["reserved-class1"]}]})
srv_control.create_new_class('reserved-class1')
srv_control.add_option_to_defined_class(1, 'sip-server-addr', '2001:db8::1,2001:db8::2')
srv_control.add_option_to_defined_class(1, 'preference', '123')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_requests_option(7)
srv_msg.client_requests_option(22)
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(22, expect_include=False)
srv_msg.response_check_include_option(7, expect_include=False)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_requests_option(7)
srv_msg.client_requests_option(22)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(22)
srv_msg.response_check_option_content(22, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(7)
srv_msg.response_check_option_content(7, 'prefval', 123)
@pytest.mark.v6
@pytest.mark.host_reservation
@pytest.mark.kea_only
def test_v6_host_reservation_classes_2():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::1')
srv_control.add_line_to_subnet(0, {"reservations": [{"duid": "00:03:00:01:f6:f5:f4:f3:f2:22",
"client-classes": ["reserved-class1", "reserved-class2"]}]})
srv_control.create_new_class('reserved-class1')
srv_control.add_option_to_defined_class(1, 'sip-server-addr', '2001:db8::1,2001:db8::2')
srv_control.create_new_class('reserved-class2')
srv_control.add_option_to_defined_class(2, 'preference', '123')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_requests_option(7)
srv_msg.client_requests_option(22)
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(22, expect_include=False)
srv_msg.response_check_include_option(7, expect_include=False)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_requests_option(7)
srv_msg.client_requests_option(22)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(22)
srv_msg.response_check_option_content(22, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(7)
srv_msg.response_check_option_content(7, 'prefval', 123)
|
|
"""
Mini commands - Provides a template for writing quick
command classes in Python using the subprocess module.
Author: Anand B Pillai <abpillai@gmail.com>
"""
import os
import time
from subprocess import *
class CmdProcessor(object):
""" Class providing useful functions to execute system
commands using subprocess module """
def execute_command_in_shell(self, command,args=[]):
""" Execute a shell command
Parameters:
command - The command to execute
args - Command arguments, as a list
"""
execfn = ' '.join([command] + list(args))
try:
p = Popen(execfn, env=os.environ, shell=True)
p.wait()
return p.returncode
except Exception,e:
print e
return -1
def execute_command(self, command, args=[]):
""" Execute a command
Parameters:
command - The command to execute
args - Command arguments, as a list
"""
execfn = [command] + list(args)
try:
p = Popen(execfn, env=os.environ)
p.wait()
return p.returncode
except Exception,e:
print e
return -1
def execute_command_in_pipe(self, command, args=[], estdin=None, estdout=None):
""" Execute a command by reading/writing input/output from/to optional
streams like a pipe. After completion, return status """
execfn = [command] + list(args)
try:
in_stream = False
out_stream = False
# Check if this is a stream
if hasattr(estdin, 'read'):
fpin = estdin
in_stream = True
elif type(estdin) in (str, unicode):
fpin = open(estdin, 'r')
in_stream = True
if hasattr(estdout, 'write'):
fpout = estdout
out_stream = True
elif type(estdout) in (str, unicode):
fpout = open(estdout, 'w')
out_stream = True
if in_stream and out_stream:
p = Popen(execfn, stdin=fpin, stdout=fpout, stderr=PIPE)
elif in_stream and not out_stream:
p = Popen(execfn, stdin=fpin, stdout=PIPE, stderr=PIPE)
elif not in_stream and out_stream:
p = Popen(execfn, stdin=PIPE, stdout=fpout, stderr=PIPE)
elif not in_stream and not out_stream:
p = Popen(execfn, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return p.wait()
except Exception,e:
print str(e)
return -1
class MiniCommand(object):
""" Base class for mini-commands """
# This is the original command executed by the class
command = None
# Any prefix arguments which will be used by all
# sub-classes of this class
prefix_args = []
# A command template string which can be used
# to define the skeleton of a command.
template = ''
# The base function which can be overridden
func = 'execute_cmd'
cmdproc = CmdProcessor()
def __init__(self, command=None, prefix_args=[], template=''):
if command:
self.command = command
if prefix_args:
self.prefix_args = prefix_args
if template:
self.template = template
self.call_func = getattr(self, self.func)
def __call__(self, *args, **kwargs):
args = self.prefix_args + list(args)
if self.template:
args = self.template % tuple(args)
# args = args.split()
print 'ARGS=>',args
for item in args:
if item.find('=') != -1:
args.remove(item)
name, value = item.split('=')
kwargs[name] = value
return self.call_func(*args, **kwargs)
def execute_cmd(cls, *args, **kwargs):
return cls.cmdproc.execute_command(cls.command, args, **kwargs)
def execute_shell_cmd(cls, *args, **kwargs):
return cls.cmdproc.execute_command_in_shell(cls.command, args, **kwargs)
def execute_cmd_in_pipe(cls, *args, **kwargs):
return cls.cmdproc.execute_command_in_pipe(cls.command, args, **kwargs)
execute_cmd = classmethod(execute_cmd)
execute_shell_cmd = classmethod(execute_shell_cmd)
execute_cmd_in_pipe = classmethod(execute_cmd_in_pipe)
# Simple example : ls command
class ListDirCmd(MiniCommand):
""" This is a sample command added to display functionality """
if os.name == 'posix':
command = 'ls'
elif os.name == 'nt':
command = 'dir'
func = 'execute_shell_cmd'
class DirTreeCmd(MiniCommand):
if os.name == 'nt':
command = 'tree.com'
class DeltreeCmd(MiniCommand):
""" Command to remove a directory tree """
if os.name == 'posix':
command = 'rm'
prefix_args = ['-rf']
elif os.name == 'nt':
command = 'rmdir'
prefix_args = ['/S','/Q']
func = 'execute_shell_cmd'
class IPConfigCmd(MiniCommand):
command = "ipconfig"
class PythonCmd(MiniCommand):
command = 'python'
# Java key-tool command
class JavaKeytoolCommand(MiniCommand):
""" Class encapsulating java key-tool command """
command = 'keytool'
class SampleKeystoreGenCmd(JavaKeytoolCommand):
""" Generate sample key store using key-tool """
func = 'execute_cmd_in_pipe'
template = '-genkey -keystore %s -keyalg RSA -alias %s -trustcacerts estdin=%s'
if __name__ == '__main__':
# example: ls command
lsinst = ListDirCmd()
lsinst()
lsinst('-al')
cmd = IPConfigCmd()
cmd("/all")
cmd = PythonCmd()
cmd()
try:
os.makedirs("/tmp/abcd")
os.makedirs("/tmp/abcd2")
except os.error, e:
pass
cmd = DeltreeCmd()
if os.path.isdir('/tmp/abcd'):
print cmd('/tmp/abcd')
if os.path.isdir('/tmp/abcd2'):
print cmd('/tmp/abcd2')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FisherBlock definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.kfac.python.ops import fisher_factors
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# For blocks corresponding to convolutional layers, or any type of block where
# the parameters can be thought of as being replicated in time or space,
# we want to adjust the scale of the damping by
# damping /= num_replications ** NORMALIZE_DAMPING_POWER
NORMALIZE_DAMPING_POWER = 1.0
@six.add_metaclass(abc.ABCMeta)
class FisherBlock(object):
"""Abstract base class for objects modeling approximate Fisher matrix blocks.
Subclasses must implement multiply_inverse(), instantiate_factors(), and
tensors_to_compute_grads() methods.
"""
def __init__(self, layer_collection):
self._layer_collection = layer_collection
@abc.abstractmethod
def instantiate_factors(self, grads_list, damping):
"""Creates and registers the component factors of this Fisher block.
Args:
grads_list: A list gradients (each a Tensor or tuple of Tensors) with
respect to the tensors returned by tensors_to_compute_grads() that
are to be used to estimate the block.
damping: The damping factor (float or Tensor).
"""
pass
@abc.abstractmethod
def multiply_inverse(self, vector):
"""Multiplies the vector by the (damped) inverse of the block.
Args:
vector: The vector (a Tensor or tuple of Tensors) to be multiplied.
Returns:
The vector left-multiplied by the (damped) inverse of the block.
"""
pass
@abc.abstractmethod
def multiply(self, vector):
"""Multiplies the vector by the (damped) block.
Args:
vector: The vector (a Tensor or tuple of Tensors) to be multiplied.
Returns:
The vector left-multiplied by the (damped) block.
"""
pass
@abc.abstractmethod
def tensors_to_compute_grads(self):
"""Returns the Tensor(s) with respect to which this FisherBlock needs grads.
"""
pass
class FullFB(FisherBlock):
"""FisherBlock using a full matrix estimate (no approximations).
FullFB uses a full matrix estimate (no approximations), and should only ever
be used for very low dimensional parameters.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self, layer_collection, params, batch_size):
"""Creates a FullFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters of this layer (Tensor or tuple of Tensors).
batch_size: The batch size, used in the covariance estimator.
"""
self._batch_size = batch_size
self._params = params
super(FullFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._damping = damping
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullFactor, (grads_list, self._batch_size))
self._factor.register_damped_inverse(damping)
def multiply_inverse(self, vector):
inverse = self._factor.get_inverse(self._damping)
out_flat = math_ops.matmul(inverse, utils.tensors_to_column(vector))
return utils.column_to_tensors(vector, out_flat)
def multiply(self, vector):
vector_flat = utils.tensors_to_column(vector)
out_flat = (math_ops.matmul(self._factor.get_cov(), vector_flat) +
self._damping * vector_flat)
return utils.column_to_tensors(vector, out_flat)
def full_fisher_block(self):
"""Explicitly constructs the full Fisher block."""
return self._factor.get_cov()
def tensors_to_compute_grads(self):
return self._params
class NaiveDiagonalFB(FisherBlock):
"""FisherBlock using a diagonal matrix approximation.
This type of approximation is generically applicable but quite primitive.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self, layer_collection, params, batch_size):
"""Creates a NaiveDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters of this layer (Tensor or tuple of Tensors).
batch_size: The batch size, used in the covariance estimator.
"""
self._params = params
self._batch_size = batch_size
super(NaiveDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._damping = damping
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.NaiveDiagonalFactor, (grads_list, self._batch_size))
def multiply_inverse(self, vector):
vector_flat = utils.tensors_to_column(vector)
out_flat = vector_flat / (self._factor.get_cov() + self._damping)
return utils.column_to_tensors(vector, out_flat)
def multiply(self, vector):
vector_flat = utils.tensors_to_column(vector)
out_flat = vector_flat * (self._factor.get_cov() + self._damping)
return utils.column_to_tensors(vector, out_flat)
def full_fisher_block(self):
return array_ops.diag(array_ops.reshape(self._factor.get_cov(), (-1,)))
def tensors_to_compute_grads(self):
return self._params
class FullyConnectedDiagonalFB(FisherBlock):
"""FisherBlock for fully-connected (dense) layers using a diagonal approx.
Unlike NaiveDiagonalFB this uses the low-variance "sum of squares" estimator
that is computed using the well-known trick.
"""
# TODO(jamesmartens): add units tests for this class
def __init__(self, layer_collection, inputs, outputs, has_bias=False):
"""Creates a FullyConnectedDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
inputs: The Tensor of input activations to this layer.
outputs: The Tensor of output pre-activations from this layer.
has_bias: Whether the component Kronecker factors have an additive bias.
(Default: False)
"""
self._inputs = inputs
self._outputs = outputs
self._has_bias = has_bias
super(FullyConnectedDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._damping = damping
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedDiagonalFactor, (self._inputs, grads_list,
self._has_bias))
def multiply_inverse(self, vector):
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = reshaped_vect / (self._factor.get_cov() + self._damping)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def multiply(self, vector):
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = reshaped_vect * (self._factor.get_cov() + self._damping)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def tensors_to_compute_grads(self):
return self._outputs
class ConvDiagonalFB(FisherBlock):
"""FisherBlock for convolutional layers using a diagonal approx.
Unlike NaiveDiagonalFB this uses the low-variance "sum of squares" estimator.
"""
# TODO(jamesmartens): add units tests for this class
def __init__(self, layer_collection, params, inputs, outputs, strides,
padding):
"""Creates a ConvDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters (Tensor or tuple of Tensors) of this layer. If
kernel alone, a Tensor of shape [kernel_height, kernel_width,
in_channels, out_channels]. If kernel and bias, a tuple of 2 elements
containing the previous and a Tensor of shape [out_channels].
inputs: A Tensor of shape [batch_size, height, width, in_channels].
Input activations to this layer.
outputs: A Tensor of shape [batch_size, height, width, out_channels].
Output pre-activations from this layer.
strides: The stride size in this layer (1-D Tensor of length 4).
padding: The padding in this layer (1-D of Tensor length 4).
"""
self._inputs = inputs
self._outputs = outputs
self._strides = strides
self._padding = padding
self._has_bias = isinstance(params, (tuple, list))
fltr = params[0] if self._has_bias else params
self._filter_shape = tuple(fltr.shape.as_list())
input_shape = tuple(inputs.shape.as_list())
self._num_locations = (input_shape[1] * input_shape[2]
// (strides[1] * strides[2]))
super(ConvDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
if NORMALIZE_DAMPING_POWER:
damping /= self._num_locations ** NORMALIZE_DAMPING_POWER
self._damping = damping
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvDiagonalFactor,
(self._inputs, grads_list, self._filter_shape, self._strides,
self._padding, self._has_bias))
def multiply_inverse(self, vector):
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = reshaped_vect / (self._factor.get_cov() + self._damping)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def multiply(self, vector):
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = reshaped_vect * (self._factor.get_cov() + self._damping)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def tensors_to_compute_grads(self):
return self._outputs
class KroneckerProductFB(FisherBlock):
"""A base class for FisherBlocks with separate input and output factors.
The Fisher block is approximated as a Kronecker product of the input and
output factors.
"""
def _register_damped_input_and_output_inverses(self, damping):
"""Registers damped inverses for both the input and output factors.
Sets the instance members _input_damping and _output_damping. Requires the
instance members _input_factor and _output_factor.
Args:
damping: The base damping factor (float or Tensor) for the damped inverse.
"""
pi = utils.compute_pi(self._input_factor.get_cov(),
self._output_factor.get_cov())
self._input_damping = math_ops.sqrt(damping) * pi
self._output_damping = math_ops.sqrt(damping) / pi
self._input_factor.register_damped_inverse(self._input_damping)
self._output_factor.register_damped_inverse(self._output_damping)
@property
def _renorm_coeff(self):
return 1.0
def multiply_inverse(self, vector):
left_factor_inv = self._input_factor.get_inverse(self._input_damping)
right_factor_inv = self._output_factor.get_inverse(self._output_damping)
reshaped_vector = utils.layer_params_to_mat2d(vector)
reshaped_out = math_ops.matmul(left_factor_inv,
math_ops.matmul(reshaped_vector,
right_factor_inv))
if self._renorm_coeff != 1.0:
reshaped_out /= math_ops.cast(
self._renorm_coeff, dtype=reshaped_out.dtype)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def multiply(self, vector):
left_factor = self._input_factor.get_cov()
right_factor = self._output_factor.get_cov()
reshaped_vector = utils.layer_params_to_mat2d(vector)
reshaped_out = (math_ops.matmul(reshaped_vector, right_factor) +
self._output_damping * reshaped_vector)
reshaped_out = (math_ops.matmul(left_factor, reshaped_out) +
self._input_damping * reshaped_out)
if self._renorm_coeff != 1.0:
reshaped_out *= math_ops.cast(
self._renorm_coeff, dtype=reshaped_out.dtype)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def full_fisher_block(self):
"""Explicitly constructs the full Fisher block.
Used for testing purposes. (In general, the result may be very large.)
Returns:
The full Fisher block.
"""
left_factor = self._input_factor.get_cov()
right_factor = self._output_factor.get_cov()
return self._renorm_coeff * utils.kronecker_product(left_factor,
right_factor)
class FullyConnectedKFACBasicFB(KroneckerProductFB):
"""K-FAC FisherBlock for fully-connected (dense) layers.
This uses the Kronecker-factorized approximation from the original
K-FAC paper (https://arxiv.org/abs/1503.05671)
"""
def __init__(self, layer_collection, inputs, outputs, has_bias=False):
"""Creates a FullyConnectedKFACBasicFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
inputs: The Tensor of input activations to this layer.
outputs: The Tensor of output pre-activations from this layer.
has_bias: Whether the component Kronecker factors have an additive bias.
(Default: False)
"""
self._inputs = inputs
self._outputs = outputs
self._has_bias = has_bias
super(FullyConnectedKFACBasicFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedKroneckerFactor, ((self._inputs,),
self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedKroneckerFactor, (grads_list,))
self._register_damped_input_and_output_inverses(damping)
def tensors_to_compute_grads(self):
return self._outputs
class ConvKFCBasicFB(KroneckerProductFB):
"""FisherBlock for 2D convolutional layers using the basic KFC approx.
See https://arxiv.org/abs/1602.01407 for details.
"""
def __init__(self, layer_collection, params, inputs, outputs, strides,
padding):
"""Creates a ConvKFCBasicFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters (Tensor or tuple of Tensors) of this layer. If
kernel alone, a Tensor of shape [kernel_height, kernel_width,
in_channels, out_channels]. If kernel and bias, a tuple of 2 elements
containing the previous and a Tensor of shape [out_channels].
inputs: A Tensor of shape [batch_size, height, width, in_channels].
Input activations to this layer.
outputs: A Tensor of shape [batch_size, height, width, out_channels].
Output pre-activations from this layer.
strides: The stride size in this layer (1-D Tensor of length 4).
padding: The padding in this layer (1-D of Tensor length 4).
"""
self._inputs = inputs
self._outputs = outputs
self._strides = strides
self._padding = padding
self._has_bias = isinstance(params, (tuple, list))
fltr = params[0] if self._has_bias else params
self._filter_shape = tuple(fltr.shape.as_list())
input_shape = tuple(inputs.shape.as_list())
self._num_locations = (input_shape[1] * input_shape[2] //
(strides[1] * strides[2]))
super(ConvKFCBasicFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvInputKroneckerFactor,
(self._inputs, self._filter_shape, self._strides, self._padding,
self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvOutputKroneckerFactor, (grads_list,))
if NORMALIZE_DAMPING_POWER:
damping /= self._num_locations**NORMALIZE_DAMPING_POWER
self._register_damped_input_and_output_inverses(damping)
@property
def _renorm_coeff(self):
return self._num_locations
def tensors_to_compute_grads(self):
return self._outputs
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers.extensions.routerrules\
import rulemanager
from openstack_dashboard.dashboards.project.routers import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class RouterMixin(object):
@test.create_stubs({
api.neutron: ('router_get', 'port_list',
'network_get', 'is_extension_supported'),
})
def _get_detail(self, router, extraroute=True):
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(extraroute)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
return res
def _mock_external_network_list(self, alter_ids=False):
search_opts = {'router:external': True}
ext_nets = [n for n in self.networks.list() if n['router:external']]
if alter_ids:
for ext_net in ext_nets:
ext_net.id += 'some extra garbage'
api.neutron.network_list(
IsA(http.HttpRequest),
**search_opts).AndReturn(ext_nets)
def _mock_external_network_get(self, router):
ext_net_id = router.external_gateway_info['network_id']
ext_net = self.networks.list()[2]
api.neutron.network_get(IsA(http.HttpRequest), ext_net_id,
expand_subnet=False).AndReturn(ext_net)
def _mock_network_list(self, tenant_id):
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=tenant_id).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
class RouterTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index_router_list_exception(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).MultipleTimes().AndRaise(self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertEqual(len(res.context['table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_set_external_network_empty(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).MultipleTimes().AndReturn([router])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list(alter_ids=True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
table_data = res.context['table'].data
self.assertEqual(len(table_data), 1)
self.assertIn('(Not Found)',
table_data[0]['external_gateway_info']['network'])
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertMessageCount(res, error=1)
def test_router_detail(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, '%s/routers/detail.html' % self.DASHBOARD)
ports = res.context['interfaces_table'].data
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('router_get',)})
def test_router_detail_exception(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_delete(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn([])
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'Routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name, res.content)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_remove_interface',
'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_with_interface_delete(self):
router = self.routers.first()
ports = self.ports.list()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn(ports)
for port in ports:
api.neutron.router_remove_interface(IsA(http.HttpRequest),
router.id, port_id=port.id)
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'Routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name, res.content)
class RouterActionTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_mode_server_default(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(True)
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'server_default',
'ha': 'server_default',
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_dvr_ha_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(True)
param = {'name': router.name,
'distributed': True,
'ha': True,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **param)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'distributed',
'ha': 'enabled',
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_exception_error_case_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
self.exceptions.neutron.status_code = 409
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',)})
def test_router_create_post_exception_error_case_non_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(False)
self.exceptions.neutron.status_code = 999
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'get_feature_permission')})
def _test_router_update_get(self, dvr_enabled=False,
current_dvr=False,
ha_enabled=False):
router = [r for r in self.routers.list()
if r.distributed == current_dvr][0]
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(dvr_enabled)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(ha_enabled)
self.mox.ReplayAll()
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
return self.client.get(url)
def test_router_update_get_dvr_disabled(self):
res = self._test_router_update_get(dvr_enabled=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertNotContains(res, 'Router Type')
self.assertNotContains(res, 'id="id_mode"')
def test_router_update_get_dvr_enabled_mode_centralized(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
# Check both menu are displayed.
self.assertContains(
res,
'<option value="centralized" selected="selected">'
'Centralized</option>',
html=True)
self.assertContains(
res,
'<option value="distributed">Distributed</option>',
html=True)
def test_router_update_get_dvr_enabled_mode_distributed(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=True)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
self.assertContains(
res,
'<input class="form-control" id="id_mode" name="mode" '
'readonly="readonly" type="text" value="distributed" />',
html=True)
self.assertNotContains(res, 'centralized')
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_disabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(False)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(False)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up)\
.AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_enabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(True)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(True)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up,
# ha=True,
distributed=True).AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up,
'mode': 'distributed',
'ha': True}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
def _test_router_addinterface(self, raise_error=False):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
add_interface = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, subnet_id=subnet.id)
if raise_error:
add_interface.AndRaise(self.exceptions.neutron)
else:
add_interface.AndReturn({'subnet_id': subnet.id,
'port_id': port.id})
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
self._check_router_addinterface(router, subnet)
def _check_router_addinterface(self, router, subnet, ip_address=''):
# mock APIs used to show router detail
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest), device_id=router.id)\
.AndReturn([])
self._mock_network_list(router['tenant_id'])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'subnet_id': subnet.id,
'ip_address': ip_address}
url = reverse('horizon:%s:routers:addinterface' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'port_get',
'network_list',
'port_list')})
def test_router_addinterface(self):
self._test_router_addinterface()
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'network_list',
'port_list')})
def test_router_addinterface_exception(self):
self._test_router_addinterface(raise_error=True)
def _test_router_addinterface_ip_addr(self, errors=[]):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
ip_addr = port['fixed_ips'][0]['ip_address']
self._setup_mock_addinterface_ip_addr(router, subnet, port,
ip_addr, errors)
self._check_router_addinterface(router, subnet, ip_addr)
def _setup_mock_addinterface_ip_addr(self, router, subnet, port,
ip_addr, errors=[]):
subnet_get = api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)
if 'subnet_get' in errors:
subnet_get.AndRaise(self.exceptions.neutron)
return
subnet_get.AndReturn(subnet)
params = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_addr}]}
port_create = api.neutron.port_create(IsA(http.HttpRequest), **params)
if 'port_create' in errors:
port_create.AndRaise(self.exceptions.neutron)
return
port_create.AndReturn(port)
add_inf = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, port_id=port.id)
if 'add_interface' not in errors:
return
add_inf.AndRaise(self.exceptions.neutron)
port_delete = api.neutron.port_delete(IsA(http.HttpRequest), port.id)
if 'port_delete' in errors:
port_delete.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr(self):
self._test_router_addinterface_ip_addr()
@test.create_stubs({api.neutron: ('subnet_get', 'router_get',
'network_list', 'port_list')})
def test_router_addinterface_ip_addr_exception_subnet_get(self):
self._test_router_addinterface_ip_addr(errors=['subnet_get'])
@test.create_stubs({api.neutron: ('subnet_get', 'port_create',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_port_create(self):
self._test_router_addinterface_ip_addr(errors=['port_create'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_add_interface(self):
self._test_router_addinterface_ip_addr(errors=['add_interface'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_port_delete(self):
self._test_router_addinterface_ip_addr(errors=['add_interface',
'port_delete'])
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndReturn(None)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway_exception(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndRaise(self.exceptions.neutron)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
class RouterRuleTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def test_extension_hides_without_rules(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, '%s/routers/detail.html' % self.DASHBOARD)
self.assertTemplateNotUsed(
res,
'%s/routers/extensions/routerrules/grid.html' % self.DASHBOARD)
@test.create_stubs({api.neutron: ('network_list',)})
def test_routerrule_detail(self):
router = self.routers_with_rules.first()
if self.DASHBOARD == 'project':
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=router['tenant_id']).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
res = self._get_detail(router)
self.assertTemplateUsed(res, '%s/routers/detail.html' % self.DASHBOARD)
if self.DASHBOARD == 'project':
self.assertTemplateUsed(
res,
'%s/routers/extensions/routerrules/grid.html' % self.DASHBOARD)
rules = res.context['routerrules_table'].data
self.assertItemsEqual(rules, router['router_rules'])
def _test_router_addrouterrule(self, raise_error=False):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
rule = {'source': '1.2.3.4/32', 'destination': '4.3.2.1/32', 'id': 99,
'action': 'permit', 'nexthops': ['1.1.1.1', '2.2.2.2']}
post_router['router_rules'].insert(0, rule)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
if raise_error:
router_update.AndRaise(self.exceptions.neutron)
else:
router_update.AndReturn({'router': post_router})
self.mox.ReplayAll()
form_data = {'router_id': pre_router.id,
'source': rule['source'],
'destination': rule['destination'],
'action': rule['action'],
'nexthops': ','.join(rule['nexthops'])}
url = reverse('horizon:%s:routers:addrouterrule' % self.DASHBOARD,
args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[pre_router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_update')})
def test_router_addrouterrule(self):
self._test_router_addrouterrule()
@test.create_stubs({api.neutron: ('router_get',
'router_update')})
def test_router_addrouterrule_exception(self):
self._test_router_addrouterrule(raise_error=True)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'port_list', 'network_get',
'is_extension_supported')})
def test_router_removerouterrule(self):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
rule = post_router['router_rules'].pop()
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.AndReturn(False)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self.mox.ReplayAll()
form_rule_id = rule['source'] + rule['destination']
form_data = {'router_id': pre_router.id,
'action': 'routerrules__delete__%s' % form_rule_id}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'network_list', 'port_list',
'network_get',
'is_extension_supported')})
def test_router_resetrouterrules(self):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
default_rules = [{'source': 'any', 'destination': 'any',
'action': 'permit', 'nexthops': [], 'id': '2'}]
del post_router['router_rules'][:]
post_router['router_rules'].extend(default_rules)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.AndReturn(False)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self._mock_network_list(pre_router['tenant_id'])
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
self.mox.ReplayAll()
form_data = {'router_id': pre_router.id,
'action': 'routerrules__resetrules'}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
class RouterRouteTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def test_extension_hides_without_routes(self):
router = self.routers_with_routes.first()
res = self._get_detail(router, extraroute=False)
self.assertTemplateUsed(res, '%s/routers/detail.html' % self.DASHBOARD)
self.assertNotIn('extra_routes_table', res.context)
def test_routerroute_detail(self):
router = self.routers_with_routes.first()
res = self._get_detail(router, extraroute=True)
self.assertTemplateUsed(res, '%s/routers/detail.html' % self.DASHBOARD)
routes = res.context['extra_routes_table'].data
routes_dict = [r._apidict for r in routes]
self.assertItemsEqual(routes_dict, router['routes'])
@test.create_stubs({api.neutron: ('router_get', 'router_update')})
def _test_router_addrouterroute(self, raise_error=False):
pre_router = self.routers_with_routes.first()
post_router = copy.deepcopy(pre_router)
route = {'nexthop': '10.0.0.5', 'destination': '40.0.1.0/24'}
post_router['routes'].insert(0, route)
api.neutron.router_get(IsA(http.HttpRequest), pre_router.id)\
.MultipleTimes().AndReturn(pre_router)
params = {}
params['routes'] = post_router['routes']
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
if raise_error:
router_update.AndRaise(self.exceptions.neutron)
else:
router_update.AndReturn({'router': post_router})
self.mox.ReplayAll()
form_data = copy.deepcopy(route)
form_data['router_id'] = pre_router.id
url = reverse('horizon:%s:routers:addrouterroute' % self.DASHBOARD,
args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[pre_router.id])
self.assertRedirectsNoFollow(res, detail_url)
def test_router_addrouterroute(self):
if self.DASHBOARD == 'project':
self._test_router_addrouterroute()
self.assertMessageCount(success=1)
def test_router_addrouterroute_exception(self):
if self.DASHBOARD == 'project':
self._test_router_addrouterroute(raise_error=True)
self.assertMessageCount(error=1)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'network_get', 'port_list',
'is_extension_supported')})
def test_router_removeroute(self):
if self.DASHBOARD == 'admin':
return
pre_router = self.routers_with_routes.first()
post_router = copy.deepcopy(pre_router)
route = post_router['routes'].pop()
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(True)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['routes'] = post_router['routes']
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self.mox.ReplayAll()
form_route_id = route['nexthop'] + ":" + route['destination']
form_data = {'action': 'extra_routes__delete__%s' % form_route_id}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
class RouterViewTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_disabled_when_quota_exceeded(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers']['available'] = 0
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['Routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_link = tables.CreateRouter()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='Routers__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_shown_when_quota_disabled(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers'].pop('available')
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['Routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_link = tables.CreateRouter()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s" % (six.text_type(create_link.verbose_name))
expected_string = "<a href='%s' title='%s' class='%s' "\
"id='Routers__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not displayed")
|
|
#############################################################################
# Documentation #
#############################################################################
# Author: Todd Whiteman
# Date: 16th March, 2009
# Version: 2.0.1
# License: Public Domain - free to do as you wish
# Homepage: http://twhiteman.netfirms.com/des.html
#
# This is a pure python implementation of the DES encryption algorithm.
# It's pure python to avoid portability issues, since most DES
# implementations are programmed in C (for performance reasons).
#
# Triple DES class is also implemented, utilising the DES base. Triple DES
# is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key.
#
# See the README.txt that should come with this python module for the
# implementation methods used.
#
# Thanks to:
# * David Broadwell for ideas, comments and suggestions.
# * Mario Wolff for pointing out and debugging some triple des CBC errors.
# * Santiago Palladino for providing the PKCS5 padding technique.
# * Shaya for correcting the PAD_PKCS5 triple des CBC errors.
#
"""A pure python implementation of the DES and TRIPLE DES encryption algorithms.
Class initialization
--------------------
pyDes.des(key, [mode], [IV], [pad], [padmode])
pyDes.triple_des(key, [mode], [IV], [pad], [padmode])
key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes
for Triple DES
mode -> Optional argument for encryption type, can be either
pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Length must be 8 bytes.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use during
all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5)
to use during all encrypt/decrpt operations done with this instance.
I recommend to use PAD_PKCS5 padding, as then you never need to worry about any
padding issues, as the padding can be removed unambiguously upon decrypting
data that was encrypted using PAD_PKCS5 padmode.
Common methods
--------------
encrypt(data, [pad], [padmode])
decrypt(data, [pad], [padmode])
data -> Bytes to be encrypted/decrypted
pad -> Optional argument. Only when using padmode of PAD_NORMAL. For
encryption, adds this characters to the end of the data block when
data is not a multiple of 8 bytes. For decryption, will remove the
trailing characters that match this pad character from the last 8
bytes of the unencrypted data block.
padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL
or PAD_PKCS5). Defaults to PAD_NORMAL.
Example
-------
from pyDes import *
data = "Please encrypt my data"
k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
# For Python3, you'll need to use bytes, i.e.:
# data = b"Please encrypt my data"
# k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
d = k.encrypt(data)
print "Encrypted: %r" % d
print "Decrypted: %r" % k.decrypt(d)
assert k.decrypt(d, padmode=PAD_PKCS5) == data
See the module source (pyDes.py) for more examples of use.
You can also run the pyDes.py file without and arguments to see a simple test.
Note: This code was not written for high-end systems needing a fast
implementation, but rather a handy portable solution with small usage.
"""
import sys
# _pythonMajorVersion is used to handle Python2 and Python3 differences.
_pythonMajorVersion = sys.version_info[0]
# Modes of crypting / cyphering
ECB = 0
CBC = 1
# Modes of padding
PAD_NORMAL = 1
PAD_PKCS5 = 2
# PAD_PKCS5: is a method that will unambiguously remove all padding
# characters after decryption, when originally encrypted with
# this padding mode.
# For a good description of the PKCS5 padding technique, see:
# http://www.faqs.org/rfcs/rfc1423.html
# The base class shared by des and triple des.
class _baseDes(object):
def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
if IV:
IV = self._guardAgainstUnicode(IV)
if pad:
pad = self._guardAgainstUnicode(pad)
self.block_size = 8
# Sanity checking of arguments.
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if IV and len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
# Set the passed in variables
self._mode = mode
self._iv = IV
self._padding = pad
self._padmode = padmode
def getKey(self):
"""getKey() -> bytes"""
return self.__key
def setKey(self, key):
"""Will set the crypting key for this object."""
key = self._guardAgainstUnicode(key)
self.__key = key
def getMode(self):
"""getMode() -> pyDes.ECB or pyDes.CBC"""
return self._mode
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
self._mode = mode
def getPadding(self):
"""getPadding() -> bytes of length 1. Padding character."""
return self._padding
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
if pad is not None:
pad = self._guardAgainstUnicode(pad)
self._padding = pad
def getPadMode(self):
"""getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
return self._padmode
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
self._padmode = mode
def getIV(self):
"""getIV() -> bytes"""
return self._iv
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
if not IV or len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
IV = self._guardAgainstUnicode(IV)
self._iv = IV
def _padData(self, data, pad, padmode):
# Pad data depending on the mode
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode == PAD_NORMAL:
if len(data) % self.block_size == 0:
# No padding required.
return data
if not pad:
# Get the default padding.
pad = self.getPadding()
if not pad:
raise ValueError("Data must be a multiple of " + str(self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.")
data += (self.block_size - (len(data) % self.block_size)) * pad
elif padmode == PAD_PKCS5:
pad_len = 8 - (len(data) % self.block_size)
if _pythonMajorVersion < 3:
data += pad_len * chr(pad_len)
else:
data += bytes([pad_len] * pad_len)
return data
def _unpadData(self, data, pad, padmode):
# Unpad data depending on the mode.
if not data:
return data
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if padmode == PAD_NORMAL:
if not pad:
# Get the default padding.
pad = self.getPadding()
if pad:
data = data[:-self.block_size] + \
data[-self.block_size:].rstrip(pad)
elif padmode == PAD_PKCS5:
if _pythonMajorVersion < 3:
pad_len = ord(data[-1])
else:
pad_len = data[-1]
data = data[:-pad_len]
return data
def _guardAgainstUnicode(self, data):
# Only accept byte strings or ascii unicode values, otherwise
# there is no way to correctly decode the data into bytes.
if _pythonMajorVersion < 3:
if isinstance(data, unicode):
raise ValueError("pyDes can only work with bytes, not Unicode strings.")
else:
if isinstance(data, str):
# Only accept ascii unicode values.
try:
return data.encode('ascii')
except UnicodeEncodeError:
pass
raise ValueError("pyDes can only work with encoded strings, not Unicode.")
return data
#############################################################################
# DES #
#############################################################################
class des(_baseDes):
"""DES encryption/decrytpion class
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key,[mode], [IV])
key -> Bytes containing the encryption key, must be exactly 8 bytes
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
# Permutation and translation tables for DES
__pc1 = [56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
]
# number left rotations of pc1
__left_rotations = [
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
]
# permuted choice key (table 2)
__pc2 = [
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
]
# initial permutation IP
__ip = [57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
]
# Expansion table for turning 32 bit blocks into 48 bits
__expansion_table = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
# The (in)famous S-boxes
__sbox = [
# S1
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
# S2
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
# S3
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
# S4
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
# S5
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
# S6
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
# S7
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
# S8
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
# 32-bit permutation function P used on the output of the S-boxes
__p = [
15, 6, 19, 20, 28, 11,
27, 16, 0, 14, 22, 25,
4, 17, 30, 9, 1, 7,
23,13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10,
3, 24
]
# final permutation IP^-1
__fp = [
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
]
# Type of crypting being done
ENCRYPT = 0x00
DECRYPT = 0x01
# Initialisation
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
# Sanity checking of arguments.
if len(key) != 8:
raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.")
_baseDes.__init__(self, mode, IV, pad, padmode)
self.key_size = 8
self.L = []
self.R = []
self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16)
self.final = []
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Must be 8 bytes."""
_baseDes.setKey(self, key)
self.__create_sub_keys()
def __String_to_BitList(self, data):
"""Turn the string data, into a list of bits (1, 0)'s"""
if _pythonMajorVersion < 3:
# Turn the strings into integers. Python 3 uses a bytes
# class, which already has this behaviour.
data = [ord(c) for c in data]
l = len(data) * 8
result = [0] * l
pos = 0
for ch in data:
i = 7
while i >= 0:
if ch & (1 << i) != 0:
result[pos] = 1
else:
result[pos] = 0
pos += 1
i -= 1
return result
def __BitList_to_String(self, data):
"""Turn the list of bits -> data, into a string"""
result = []
pos = 0
c = 0
while pos < len(data):
c += data[pos] << (7 - (pos % 8))
if (pos % 8) == 7:
result.append(c)
c = 0
pos += 1
if _pythonMajorVersion < 3:
return ''.join([ chr(c) for c in result ])
else:
return bytes(result)
def __permutate(self, table, block):
"""Permutate this block with the specified table"""
return list(map(lambda x: block[x], table))
# Transform the secret key, so that it is ready for data processing
# Create the 16 subkeys, K[1] - K[16]
def __create_sub_keys(self):
"""Create the 16 subkeys K[1] to K[16] from the given key"""
key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey()))
i = 0
# Split into Left and Right sections
self.L = key[:28]
self.R = key[28:]
while i < 16:
j = 0
# Perform circular left shifts
while j < des.__left_rotations[i]:
self.L.append(self.L[0])
del self.L[0]
self.R.append(self.R[0])
del self.R[0]
j += 1
# Create one of the 16 subkeys through pc2 permutation
self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R)
i += 1
# Main part of the encryption algorithm, the number cruncher :)
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = self.__permutate(des.__ip, block)
self.L = block[:32]
self.R = block[32:]
# Encryption starts from Kn[1] through to Kn[16]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
# Decryption starts from Kn[16] down to Kn[1]
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
# Make a copy of R[i-1], this will later become L[i]
tempR = self.R[:]
# Permutate R[i - 1] to start creating R[i]
self.R = self.__permutate(des.__expansion_table, self.R)
# Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
# Optimization: Replaced below commented code with above
#j = 0
#B = []
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.Kn[iteration][j]
# j += 1
# if j % 6 == 0:
# B.append(self.R[j-6:j])
# Permutate B[1] to B[8] using the S-Boxes
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
# Work out the offsets
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
# Find the permutation value
v = des.__sbox[j][(m << 4) + n]
# Turn value into bits, add it to result: Bn
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
# Permutate the concatination of B[1] to B[8] (Bn)
self.R = self.__permutate(des.__p, Bn)
# Xor with L[i - 1]
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
# Optimization: This now replaces the below commented code
#j = 0
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.L[j]
# j += 1
# L[i] becomes R[i - 1]
self.L = tempR
i += 1
iteration += iteration_adjustment
# Final permutation of R[16]L[16]
self.final = self.__permutate(des.__fp, self.R + self.L)
return self.final
# Data to be encrypted/decrypted
def crypt(self, data, crypt_type):
"""Crypt the data in blocks, running it through des_crypt()"""
# Error check the data
if not data:
return ''
if len(data) % self.block_size != 0:
if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.")
if not self.getPadding():
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character")
else:
data += (self.block_size - (len(data) % self.block_size)) * self.getPadding()
# print "Len of data: %f" % (len(data) / self.block_size)
if self.getMode() == CBC:
if self.getIV():
iv = self.__String_to_BitList(self.getIV())
else:
raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering")
# Split the data into blocks, crypting each one seperately
i = 0
dict = {}
result = []
#cached = 0
#lines = 0
while i < len(data):
# Test code for caching encryption results
#lines += 1
#if dict.has_key(data[i:i+8]):
#print "Cached result for: %s" % data[i:i+8]
# cached += 1
# result.append(dict[data[i:i+8]])
# i += 8
# continue
block = self.__String_to_BitList(data[i:i+8])
# Xor with IV if using CBC mode
if self.getMode() == CBC:
if crypt_type == des.ENCRYPT:
block = list(map(lambda x, y: x ^ y, block, iv))
#j = 0
#while j < len(block):
# block[j] = block[j] ^ iv[j]
# j += 1
processed_block = self.__des_crypt(block, crypt_type)
if crypt_type == des.DECRYPT:
processed_block = list(map(lambda x, y: x ^ y, processed_block, iv))
#j = 0
#while j < len(processed_block):
# processed_block[j] = processed_block[j] ^ iv[j]
# j += 1
iv = block
else:
iv = processed_block
else:
processed_block = self.__des_crypt(block, crypt_type)
# Add the resulting crypted block to our list
#d = self.__BitList_to_String(processed_block)
#result.append(d)
result.append(self.__BitList_to_String(processed_block))
#dict[data[i:i+8]] = d
i += 8
# print "Lines: %d, cached: %d" % (lines, cached)
# Return the full crypted string
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self._padData(data, pad, padmode)
return self.crypt(data, des.ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after decrypting.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self.crypt(data, des.DECRYPT)
return self._unpadData(data, pad, padmode)
#############################################################################
# Triple DES #
#############################################################################
class triple_des(_baseDes):
"""Triple DES encryption/decrytpion class
This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or
the DES-EDE2 (when a 16 byte key is supplied) encryption methods.
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key, [mode], [IV])
key -> Bytes containing the encryption key, must be either 16 or
24 bytes long
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
_baseDes.__init__(self, mode, IV, pad, padmode)
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Either 16 or 24 bytes long."""
self.key_size = 24 # Use DES-EDE3 mode
if len(key) != self.key_size:
if len(key) == 16: # Use DES-EDE2 mode
self.key_size = 16
else:
raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long")
if self.getMode() == CBC:
if not self.getIV():
# Use the first 8 bytes of the key
self._iv = key[:self.block_size]
if len(self.getIV()) != self.block_size:
raise ValueError("Invalid IV, must be 8 bytes in length")
self.__key1 = des(key[:8], self._mode, self._iv,
self._padding, self._padmode)
self.__key2 = des(key[8:16], self._mode, self._iv,
self._padding, self._padmode)
if self.key_size == 16:
self.__key3 = self.__key1
else:
self.__key3 = des(key[16:], self._mode, self._iv,
self._padding, self._padmode)
_baseDes.setKey(self, key)
# Override setter methods to work on all 3 keys.
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
_baseDes.setMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setMode(mode)
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
_baseDes.setPadding(self, pad)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadding(pad)
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
_baseDes.setPadMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadMode(mode)
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
_baseDes.setIV(self, IV)
for key in (self.__key1, self.__key2, self.__key3):
key.setIV(IV)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
# Pad the data accordingly.
data = self._padData(data, pad, padmode)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
block = self.__key1.crypt(data[i:i+8], ENCRYPT)
block = self.__key2.crypt(block, DECRYPT)
block = self.__key3.crypt(block, ENCRYPT)
self.__key1.setIV(block)
self.__key2.setIV(block)
self.__key3.setIV(block)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
else:
data = self.__key1.crypt(data, ENCRYPT)
data = self.__key2.crypt(data, DECRYPT)
return self.__key3.crypt(data, ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after
decrypting, no pad character is required for PAD_PKCS5.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
iv = data[i:i+8]
block = self.__key3.crypt(iv, DECRYPT)
block = self.__key2.crypt(block, ENCRYPT)
block = self.__key1.crypt(block, DECRYPT)
self.__key1.setIV(iv)
self.__key2.setIV(iv)
self.__key3.setIV(iv)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
data = ''.join(result)
else:
data = bytes.fromhex('').join(result)
else:
data = self.__key3.crypt(data, DECRYPT)
data = self.__key2.crypt(data, ENCRYPT)
data = self.__key1.crypt(data, DECRYPT)
return self._unpadData(data, pad, padmode)
|
|
# -*- coding: utf-8 -*-
"""
Calculates rho(z) to maintain hydrostatic equilibrium in a thin disc.
Assumes uniform temperature in the disc, and an infinite disc where
rho can be treated (at least locally) as only a function of z.
Created on Mon Jan 20 12:30:06 2014
@author: ibackus
"""
import isaac
import numpy as np
import scipy
import scipy.integrate as nInt
import scipy.optimize as opt
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import pynbody
from pynbody.array import SimArray
from warnings import warn
# TEMP
import cPickle as pickle
def rho_z(ICobj, r):
"""
rho,z = rho_z(...)
Calculates rho(z) to maintain hydrostatic equilibrium in a thin disc.
Assumes uniform temperature in the disc, and an infinite disc where
rho can be treated (locally) as only a function of z.
Only calculates for z>=0, since the density is assumed to be symmetric
about z=0
The initial guess for rho (a gaussian) only really seems to work for
Mstar >> Mdisc. Otherwise the solution can diverge violently.
* NUMERICAL CALCULATION OF RHO(Z) *
The calculation proceeds using several steps.
1) Make an initial guess for I, the integral of rho from z to inf. This
is an error function
2) Modify length scale of the initial guess to minimize the residual
for the differential equation governing I. Use this as the new
initial guess.
3) Find the root I(z) for the differential equation governing I, with
the boundary condition that I(0) = sigma/2
4) Set rho = -dI/dz
5) Find the root rho(z) for the diff. eq. governing rho.
6) In order to satisfy the BC on I, scale rho so that:
Integral(rho) = I(0)
7) Repeat (5) and (6) until rho is rescaled by a factor closer to unity
than rho_tol
Steps 5-7 are done because the solution for I does not seem to
satisfy the diff. eq. for rho very well. But doing it this way
allows rho to satisfy the surface density profile
* Arguments *
ICobj - the initial conditions object for which rho is being calculated.
r - The radius at which rho is being calculated. Should have units
* Output *
Returns a 1D SimArray (see pynbody) of rho(z) and a 1D SimArray of z,
with the same units as ICobj.settings.rho_calc.zmax
"""
# Load from ICobj
settings = ICobj.settings
T = ICobj.T(r)
sigma = ICobj.sigma(r)
# Parse settings
rho_tol = settings.rho_calc.rho_tol
nz = settings.rho_calc.nz
zmax = settings.rho_calc.zmax
m = settings.physical.m
M = settings.physical.M
# Physical constants
kB = SimArray(1.0,'k')
G = SimArray(1.0,'G')
# Set up default units
mass_unit = M.units
length_unit = zmax.units
r = (r.in_units(length_unit)).copy()
# Initial conditions/physical parameters
rho_int = 0.5*sigma.in_units(mass_unit/length_unit**2) # Integral of rho from 0 to inf
a = (G*M*m/(kB*T)).in_units(length_unit)
b = (2*np.pi*G*m/(kB*T)).in_units(length_unit/mass_unit)
z0guess = np.sqrt(2*r*r*r/a).in_units(length_unit)# Est. scale height of disk
z0_dummy = (2/(b*sigma)).in_units(length_unit)
z = np.linspace(0.0,zmax,nz)
dz = z[[1]]-z[[0]]
# Echo parameters used
print '***********************************************'
print '* Calculating rho(z)'
print '***********************************************'
print 'sigma = {0} {1}'.format(sigma,sigma.units)
print 'zmax = {0} {1}'.format(zmax,zmax.units)
print 'r = {0} {1}'.format(r,r.units)
print 'molecular mass = {0} {1}'.format(m,m.units)
print 'Star mass = {0} {1}'.format(M,M.units)
print 'Temperature = {0} {1}'.format(T,T.units)
print ''
print 'rho_tol = {0}'.format(rho_tol)
print 'nz = {0}'.format(nz)
print '***********************************************'
print 'a = {0} {1}'.format(a,a.units)
print 'b = {0} {1}'.format(b,b.units)
print 'z0guess = {0} {1}'.format(z0guess,z0guess.units)
print '***********************************************'
print 'z0 (from sech^2) = {0} {1}'.format(z0_dummy,z0_dummy.units)
# --------------------------------------------------------
# STRIP THE UNITS FROM EVERYTHING!!!
# This has to be done because many of the scipy/numpy functions used cannot
# handle pynbody units. Before returning z, rho, or anything else, the
# Units must be re-introduced
# --------------------------------------------------------
rho_int, a, b, z0guess, z0_dummy, z, dz, r, T, sigma \
= isaac.strip_units([rho_int, a, b, z0guess, z0_dummy, z, dz, r, T, sigma])
# --------------------------------------------------------
# Check sigma and T
# --------------------------------------------------------
if sigma < 1e-100:
warn('Sigma too small. setting rho = 0')
rho0 = np.zeros(len(z))
# Set up units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return rho0, z
if T > 1e100:
warn('Temperature too large. Setting rho = 0')
rho0 = np.zeros(len(z))
# Set up units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return rho0, z
# -------------------------------------------------------------------
# FUNCTION DEFINITIONS
# -------------------------------------------------------------------
def dI_dz(I_in):
"""
Finite difference approximation of dI/dz, assuming I is odd around I(0)
"""
I = I_in.copy()
dI = np.zeros(len(I))
# Fourth order center differencing
dI[0] = (-I[2] + 8*I[1] - 7*I[0])/(6*dz)
dI[1] = (-I[3] + 8*I[2] - 6*I[0] - I[1])/(12*dz)
dI[2:-2] = (-I[4:] + 8*I[3:-1] -8*I[1:-3] + I[0:-4])/(12*dz)
# Second order backward differencing for right edge
dI[-2:] = (3*I[-2:] -4*I[-3:-1] + I[-4:-2])/(2*dz)
return dI
def d2I_dz2(I_in):
# Finite difference for d2I/dz2 assuming it is 0 at the origin
I = I_in.copy()
d2I = np.zeros(len(I))
# Boundary condition
d2I[0] = 0
# Centered 4th order finite difference
d2I[1] = (-I[3] + 16*I[2] - 30*I[1] + 16*I[0] -(2*I[0] - I[1]))/(12*dz**2)
d2I[2:-2] = (-I[4:] + 16*I[3:-1] - 30*I[2:-2] + 16*I[1:-3] - I[0:-4])/(12*(dz**2))
# second order backward difference for right edge
d2I[-2:] = (-2*I[-2:] + 5*I[-3:-1] -4*I[-4:-2] + I[-5:-3])/dz**2
return d2I
def Ires(I_in):
"""
Calculate the residual for the differential equation governing I,
the integral of rho from z to "infinity."
"""
# DEFINE INITIAL CONDITION:
I = I_in.copy()
I[0] = rho_int
#I[-1] = 0.0
weight = 1.0
res = d2I_dz2(I) + dI_dz(I)*(a*z/((z**2 + r**2)**(1.5)) + 2*b*(I[0] - I))
return weight*res
def drho_dz(rho_in):
"""
Fourth order, centered finite difference for d(rho)/dz, assumes that
rho is an even function. The right-hand boundary is done using
backward differencing
"""
rho = rho_in.copy()
drho = np.zeros(len(rho))
drho[0] = 0.0 # defined by boundary condition, rho[0] = max(rho)
drho[1] = (-rho[3] + 8*rho[2] - 8*rho[0] + rho[1])/(12*dz)
drho[2:-2] = (-rho[4:] + 8*rho[3:-1] - 8*rho[1:-3] + rho[0:-4])/(12*dz)
drho[-2:] = (3*rho[-2:] - 4*rho[-3:-1] + rho[-4:-2])/(2*dz)
return drho
def residual(rho_in):
"""
Estimate d(rho)/dz
"""
rho = rho_in.copy()
# Estimate integral of rho
I = np.zeros(len(rho))
I[1:] = nInt.cumtrapz(rho,z)
# Estimate residual
res = drho_dz(rho) + a*rho*z/((z**2 + r**2)**(1.5)) + 2*b*rho*I
return res
def erf_res(scale_size):
testfct = rho_int*(1 - scipy.special.erf(z/scale_size))
return abs(Ires(testfct)).sum()
pass
# -------------------------------------------------------------------
# FIND RHO
# -------------------------------------------------------------------
# Estimate the scale length of the error function
z0 = opt.fminbound(erf_res,z0guess/100.0,5.0*z0guess)
print 'Length scale guess: {0} {1}'.format(z0guess, length_unit)
print 'Final length scale: {0} {1}'.format(z0, length_unit)
# Begin by finding I, the integral of rho (from z to inf)
# Assuming rho is gaussian, I is an error function
guess = rho_int*(1 - scipy.special.erf(z/z0))
# Find the root of the differential equation for I
Isol = opt.newton_krylov(Ires,guess,iter=50)
# rho is the negative derivative
rho0 = -dI_dz(Isol)
rhoguess = rho0.copy()
# Now apply the diff eq on rho
maxiter = 50
for n in range(maxiter):
print 'Iteration {0}'.format(n+1)
rho0 = opt.newton_krylov(residual,rho0,iter=50)
rho_scale = rho_int/nInt.cumtrapz(rho0,z)[-1]
print 'Scaling rho by {0}'.format(rho_scale)
rho0 = rho0*rho_scale
if abs(1-rho_scale) < rho_tol - 1:
break
# Re-introduce units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return SimArray(rho0,'Msol au**-3'), SimArray(z,'au')
def cdfinv_z(z,rho):
"""
Calculates the inverse of the cumulative distribution function for
probability as a function of z for a given r
*** Arguments ***
* z * z positions to calculate over. 1D array
* rho * Density as a function of z. Treated as an un-normalized
probability. 1D array
IF Z doesn't have units, units of 'au' are assumed
*** Returns ***
Returns the inverse normalized CDF as 1D spline interpolation
"""
# Check for units
if pynbody.units.has_units(z):
zunit = z.units
else:
zunit = pynbody.units.au
# Calculate the CDF from prob
nz = len(z)
f = np.zeros(nz)
f[1:] = nInt.cumtrapz(rho,z)
if f.max() <= 0.0:
# The density (rho) is zero here for all z or neg or something.
# Make all particles go to z = 0.0
def finv(m_in):
return m_in*0.0
return finv
f /= f.max()
# Calculate the inverse CDF.
# Assume CDF is approximately monotonic and sort to force it to be
ind = f.argsort()
f = f[ind]
z = z[ind]
# Drop values where CDF is constant (ie, prob = 0)
mask = np.ones(nz,dtype='bool')
for n in range(1,nz):
if f[n] == f[n-1]:
mask[n] = False
f = f[mask]
z = z[mask]
finv_spline = interp1d(f,z,kind='linear')
def finv(m):
return SimArray(finv_spline(m), zunit)
return finv
|
|
from __future__ import absolute_import, print_function, division
import copy
import numpy
import logging
import pdb
from six.moves import xrange
import theano
from theano import tensor, scalar, gof, config
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer,
SequenceDB, Optimizer, toolbox)
from theano.gof.optdb import LocalGroupDB
from theano.ifelse import IfElse
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet.conv import ConvOp
from theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter
from theano.tensor.nnet.abstract_conv import (AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs)
from theano.tests.breakpoint import PdbBreakpoint
from .type import (GpuArrayType, GpuArrayConstant, get_context,
ContextNotDefined)
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous, gpu_contiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch,
gpugemm_no_inplace, gpugemm_inplace, gpugemmbatch_no_inplace,
gpugemv_no_inplace, gpugemv_inplace)
from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter,
gpu_sparse_block_outer, gpu_sparse_block_outer_inplace,
gpu_sparse_block_gemv, gpu_sparse_block_gemv_inplace)
from .nnet import (gpu_crossentropy_softmax_1hot_with_bias_dx,
gpu_crossentropy_softmax_argmax_1hot_with_bias,
gpu_softmax_with_bias, gpu_softmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)
from .opt_util import alpha_merge, output_merge
_logger = logging.getLogger("theano.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
gpu_seqopt = SequenceDB()
# Don't register this right now
conv_groupopt = LocalGroupDB()
conv_groupopt.__name__ = "gpua_conv_opts"
gpu_seqopt.register('gpuarray_local_optimiziations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'gpuarray')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
optdb.register(
name, TopoOptimizer(
local_opt, failure_callback=TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
register_opt(final_opt=True, name='gpua_constant_folding')(
tensor.opt.constant_folding)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return host_from_gpu(x)
else:
return x
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if i.owner and i.owner.op == host_from_gpu:
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != b'cuda')):
return False
# tag the inputs with the context in case
# the context was derived from the outputs
for i in node.inputs:
i.tag.context_name = context_name
new_op = maker(node, context_name)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
return [safe_to_cpu(o) for o in
new_op(*node.inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [safe_to_cpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [host_from_gpu(new_op)]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input to the gpu to start the rolling wave.
"""
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
# If all clients are outputs or transfers don't do anything.
if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost)
for cl in input.clients)):
continue
target = getattr(input.tag, 'target', None)
if target == 'cpu':
continue
try:
new_input = host_from_gpu(GpuFromHost(target)(input))
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ContextNotDefined:
if hasattr(input.tag, 'target'):
raise
# If there is no context tag and no default context
# then it stays on the CPU
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
@local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (isinstance(node.op, HostFromGpu) and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [host_from_gpu(n2.inputs[0])]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [as_gpuarray_variable(n2.inputs[0],
node.op.context_name)]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpuaalloc2(node):
"""
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
try:
get_context(None)
except ContextNotDefined:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
c.op == tensor.join and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [host_from_gpu(GpuAlloc(None)(*node.inputs))]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
def local_gpuaalloc(node, context_name):
return GpuAlloc(context_name)(*node.inputs)
@register_opt('fast_compile')
@op_lifter([tensor.AllocEmpty])
def local_gpuaallocempty(node, context_name):
# We use _props_dict() to make sure that the GPU op know all the
# CPU op props.
return GpuAllocEmpty(context_name=context_name,
**node.op._props_dict())(*node.inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(numpy.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
return [new_op(*node.inputs)]
# Don't register by default.
@gof.local_optimizer([GpuAllocEmpty])
def local_gpua_alloc_empty_to_zeros(node):
if isinstance(node.op, GpuAllocEmpty):
context_name = infer_context_name(*node.inputs)
z = numpy.asarray(0, dtype=node.outputs[0].dtype)
return [GpuAlloc()(as_gpuarray_variable(z, context_name),
*node.inputs)]
optdb.register('local_gpua_alloc_empty_to_zeros',
theano.tensor.opt.in2out(local_gpua_alloc_empty_to_zeros),
# After move to gpu and merge2, before inplace.
49.3,
'alloc_empty_to_zeros',)
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.extra_ops.CpuContiguous])
def local_gpu_contiguous(node, context_name):
return gpu_contiguous
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
def local_gpureshape(node, context_name):
op = node.op
name = op.name
if name:
name = 'Gpu' + name
res = GpuReshape(op.ndim, op.name)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
def local_gpu_rebroadcast(node, context_name):
return node.op(as_gpuarray_variable(node.inputs[0], context_name))
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
def local_gpuflatten(node, context_name):
op = node.op
shp = []
if op.outdim != 1:
shp = [node.inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim, None)
o = res(node.inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
def local_gpu_elemwise(node, context_name):
op = node.op
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(node.outputs) > 1:
return
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
# If the elemwise operation is a pow, casts might be required on the
# inputs and or outputs because only the (float, float)->float and
# (double, double)->double cases are implemented at the moment.
if isinstance(op.scalar_op, Pow):
# Only transfer the computation on the gpu if the output dtype is
# floating point. Else, give up on the transfer to the gpu.
out_dtype = node.outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
# Transfer the inputs on the GPU and cast them to the right dtype.
new_inputs = []
for inp in node.inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name)))
else:
new_inputs.append(as_gpuarray_variable(inp, context_name))
# Perform the exponent on the gpu and transfer the output back to the
# cpu.
gpu_output = res(*new_inputs)
cpu_output = host_from_gpu(gpu_output)
return [cpu_output]
else:
return res
def max_inputs_to_GpuElemwise(node):
ptr_size = 8
int_size = 4
# we take the limit from CUDA for now
argument_limit = 232
ndim = node.inputs[0].type.ndim
# number of elements and shape
size_param_mandatory = (int_size * (ndim + 1)) + \
(ptr_size + int_size * ndim) * len(node.outputs)
nb_bytes_avail = argument_limit - size_param_mandatory
nb_bytes_per_input = ptr_size + ndim * int_size
max_nb_inputs = nb_bytes_avail // nb_bytes_per_input
return max_nb_inputs
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 71.00,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.inplace_elemwise_optimizer_op(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
def local_gpua_dimshuffle(node, context_name):
return GpuDimShuffle(node.op.input_broadcastable,
node.op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
def local_gpua_specifyShape(node, context_name):
if isinstance(node.inputs[0].type, GpuArrayType):
return
inp = [as_gpuarray_variable(node.inputs[0], context_name)]
inp += node.inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(node, context_name):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(node.inputs[0].type, GpuArrayType):
return
return [as_gpuarray_variable(node.inputs[0], context_name).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, numpy.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
def local_gpu_print_op(node, context_name):
x, = node.inputs
gpu_x = as_gpuarray_variable(x, context_name=context_name)
new_op = node.op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = node.op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transfering on GPU those
# for which the input comes from the GPU or the output will be
# transfered on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transfered to the gpu
new_inputs.append(as_gpuarray_variable(inp, context_name))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transfered to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(host_from_gpu(new_op_outputs[i]))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([IfElse])
def local_gpua_lazy_ifelse(node, context_name):
if node.op.gpu:
return
c = node.inputs[0]
inps = []
for v in node.inputs[1:]:
if isinstance(v.type, (tensor.TensorType, GpuArrayType)):
inps.append(as_gpuarray_variable(v, context_name))
else:
inps.append(v)
return IfElse(node.op.n_outs, gpu=True)(c, *inps, return_list=True)
@register_opt('fast_compile')
@op_lifter([tensor.Join])
def local_gpua_join(node, context_name):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpuajoin_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
def local_gpua_split(node, context_name):
return GpuSplit(node.op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(node, context_name):
x = node.inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in node.outputs[0].clients]):
return
else:
return [host_from_gpu(gpu_x.owner.op(node.outputs[0]))]
return GpuSubtensor(node.op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
def local_gpua_incsubtensor(node, context_name):
op = GpuIncSubtensor(node.op.idx_list, node.op.inplace,
node.op.set_instead_of_inc,
node.op.destroyhandler_tolerate_aliased)
ret = op(*node.inputs)
val = getattr(node.outputs[0].tag, 'nan_guard_mode_check', True)
ret.tag.nan_guard_mode_check = val
return ret
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
def local_gpua_advanced_subtensor(node, context_name):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
def local_gpua_advanced_incsubtensor(node, context_name):
context = get_context(context_name)
# This is disabled on non-cuda contexts
if context.kind != b'cuda':
return None
x, y, ilist = node.inputs
# Gpu Ops needs both inputs to have the same dtype
if (x.type.dtype != y.type.dtype):
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if x.type.dtype != dtype:
x = tensor.cast(x, dtype)
if y.type.dtype != dtype:
y = tensor.cast(y, dtype)
set_instead_of_inc = node.op.set_instead_of_inc
compute_capability = int(context.bin_id[-2])
if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
return GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
return GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
@register_inplace()
@local_optimizer([GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20])
def local_advincsub1_gpua_inplace(node):
if isinstance(node.op, (GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)):
if not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
def local_gpua_careduce(node, context_name):
if isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == b'opencl':
op = GpuCAReduceCPY
if node.op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == b'cuda':
op = GpuCAReduceCuda
else:
return False
x, = node.inputs
greduce = op(
node.op.scalar_op, axis=node.op.axis,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([
as_gpuarray_variable(x, context_name)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if node.op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in node.op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
new_in_shp = [shape_i(x, 0)]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= shape_i(x, i)
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(shape_i(x, i))
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op(
node.op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = as_gpuarray_variable(reshaped_x, context_name)
gvar = greduce(gpu_reshaped_x)
# We need to have the make node called, otherwise the mask can
# be None
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = host_from_gpu(
greduce(gpu_reshaped_x))
if reduce_reshaped_x.ndim != node.outputs[0].ndim:
out_shp = []
for i in range(x.ndim):
if i not in node.op.axis:
out_shp.append(shape_i(x, i))
unreshaped_reduce = reduce_reshaped_x.reshape(
tensor.stack(out_shp))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
def local_gpua_gemv(node, context_name):
if node.op.inplace:
return gpugemv_inplace
else:
return gpugemv_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
def local_gpua_gemm(node, context_name):
if node.op.inplace:
return gpugemm_inplace
else:
return gpugemm_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.BatchedDot])
def local_gpua_gemmbatch(node, context_name):
a, b = node.inputs
c = tensor.AllocEmpty(a.dtype)(a.shape[0], a.shape[1], b.shape[2])
return gpugemmbatch_no_inplace(c, 1.0, a, b, 0.0)
@register_opt('fast_compile')
@op_lifter([tensor.basic.Dot])
def local_gpua_hgemm(node, context_name):
from theano.sandbox.cuda import nvcc_compiler
if nvcc_compiler.nvcc_version < '7.5':
_logger.warning("Not performing dot of float16 on the GPU since "
"cuda 7.5 is not available. Updating could speed up "
"your code.")
return
A = node.inputs[0]
B = node.inputs[1]
if (A.ndim == 2 and B.ndim == 2 and
A.dtype == 'float16' and B.dtype == 'float16'):
fgraph = node.inputs[0].fgraph
C = GpuAllocEmpty(dtype='float16', context_name=context_name)(
shape_i(A, 0, fgraph),
shape_i(B, 1, fgraph))
return gpugemm_no_inplace(C, 1.0, A, B, 0.0)
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpuagemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpuagemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@alpha_merge(GpuGemmBatch, alpha_in=1, beta_in=4)
def local_gpuagemmbatch_alpha_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemmBatch, alpha_in=1, beta_in=4, out_in=0)
def local_gpuagemmbatch_output_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
def local_gpua_ger(node, context_name):
return GpuGer(inplace=node.op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
def local_gpua_dot22(node, context_name):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22Scalar])
def local_gpua_dot22scalar(node, context_name):
x, y, a = node.inputs
x = as_gpuarray_variable(x, context_name)
y = as_gpuarray_variable(y, context_name)
z = GpuAllocEmpty(x.dtype, context_name)(x.shape[0], y.shape[1])
return [gpugemm_no_inplace(z, a, x, y, 0)]
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
def local_gpua_eye(node, context_name):
return GpuEye(dtype=node.op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], cuda_only=True)
def local_gpua_crossentropysoftmaxargmax1hotwithbias(node, context_name):
return gpu_crossentropy_softmax_argmax_1hot_with_bias
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], cuda_only=True)
def local_gpua_crossentropysoftmax1hotwithbiasdx(node, context_name):
return gpu_crossentropy_softmax_1hot_with_bias_dx
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax], cuda_only=True)
def local_gpua_softmax(node, context_name):
return gpu_softmax
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias], cuda_only=True)
def local_gpua_softmaxwithbias(node, context_name):
return gpu_softmax_with_bias
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_assert(node, context_name):
# Check if input nodes are already on the GPU
if isinstance(node.inputs[0].type, GpuArrayType):
return
return [host_from_gpu(node.op(as_gpuarray_variable(node.inputs[0],
context_name),
*node.inputs[1:]))]
@register_opt('fast_compile')
@op_lifter([ConvOp])
def local_error_convop(node, context_name):
assert False, """
ConvOp does not work with the gpuarray backend.
Use the new convolution interface to have GPU convolution working:
theano.tensor.nnet.conv2d()
"""
@register_opt('fast_compile')
@op_lifter([SparseBlockGemv])
def local_lift_sparseblockgemv(node, context_name):
if node.op.inplace:
return gpu_sparse_block_gemv_inplace
else:
return gpu_sparse_block_gemv
@register_opt('fast_compile')
@op_lifter([SparseBlockOuter])
def local_lift_sparseblockouter(node, context_name):
if node.op.inplace:
return gpu_sparse_block_outer_inplace
else:
return gpu_sparse_block_outer
@register_inplace()
@local_optimizer([GpuSparseBlockGemv], inplace=True)
def local_inplace_sparseblockgemv(node):
if isinstance(node.op, GpuSparseBlockGemv) and not node.op.inplace:
return [gpu_sparse_block_gemv_inplace(*node.inputs)]
@register_inplace()
@local_optimizer([GpuSparseBlockOuter], inplace=True)
def local_inplace_sparseblockouter(node):
if isinstance(node.op, GpuSparseBlockOuter) and not node.op.inplace:
return [GpuSparseBlockOuter(inplace=True)(*node.inputs)]
# This deals with any abstract convs that have a transfer somewhere
@register_opt('fast_compile')
@op_lifter([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs])
def local_lift_abstractconv2d(node, context_name):
if isinstance(node.outputs[0].type, GpuArrayType):
# Don't handle this node here, it's already on the GPU.
return
inps = list(node.inputs)
inps[0] = as_gpuarray_variable(node.inputs[0],
context_name=context_name)
inps[1] = as_gpuarray_variable(node.inputs[1],
context_name=context_name)
return [node.op(*inps)]
# Register this here so that it goes after the abstract lifting
register_opt('fast_compile')(conv_groupopt)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
"""
Merge some GpuCAReduceCuda and GPUElemwise.
"""
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably results
# in slow down.
isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
op = node.op
inp = node.inputs[0].owner.inputs[0]
return [GpuCAReduceCuda(scalar_op=op.scalar_op,
axis=op.axis,
reduce_mask=op.reduce_mask,
pre_scalar_op=scalar.basic.sqr)(inp)]
@local_optimizer(None)
def local_assert_no_cpu_op(node):
if (all([var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs]) and
any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs])):
if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU Op %s is detected in the computation "
"graph") % node)
elif config.assert_no_cpu_op == "raise":
raise AssertionError("The Op %s is on CPU." % node)
elif config.assert_no_cpu_op == "pdb":
pdb.set_trace()
# Register the local_assert_no_cpu_op:
assert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op,
name='assert_no_cpu_op')
# 49.2 is after device specialization & fusion optimizations for last transfers
optdb.register('gpua_assert_no_cpu_op', assert_no_cpu_op, 49.2,
'assert_no_cpu_op')
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
def local_scan_to_gpua(node, context_name):
info = copy.deepcopy(node.op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [node.inputs[0]]
e = (1 +
node.op.n_seqs +
node.op.n_mit_mot +
node.op.n_mit_sot +
node.op.n_sit_sot +
node.op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[1:e]]
b = e
e = e + node.op.n_nit_sot
nw_ins += node.inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in node.op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if node.op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs[:-1]]
scan_outs += [node.op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(node.op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
# Do not register in fast_run or fast_compile.
# It will be added to fast_run if the GPU is enabled.
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'inplace',
'scan')
|
|
"""Convenient access to an SQLObject or SQLAlchemy managed database."""
import sys
import time
import logging
import cherrypy
from cherrypy import request
try:
import sqlobject
from sqlobject.dbconnection import ConnectionHub, Transaction, TheURIOpener
from sqlobject.util.threadinglocal import local as threading_local
have_sqlobject = True
except ImportError:
have_sqlobject = False
from gearshift import config
from gearshift.util import remove_keys
log = logging.getLogger("gearshift.database")
hub_registry = set()
_hubs = dict() # stores the AutoConnectHubs used for each connection URI
# Provide support for SQLObject
def _mysql_timestamp_converter(raw):
"""Convert a MySQL TIMESTAMP to a floating point number representing
the seconds since the Un*x Epoch. It uses custom code the input seems
to be the new (MySQL 4.1+) timestamp format, otherwise code from the
MySQLdb module is used."""
if raw[4] == '-':
return time.mktime(time.strptime(raw, '%Y-%m-%d %H:%M:%S'))
else:
import MySQLdb.converters
return MySQLdb.converters.mysql_timestamp_converter(raw)
class SODataManager:
"""Integrates with TurboGears SQLObject PackageHubs.
One phase variant.
"""
transaction_manager = None
def __init__(self):
request.in_transaction = True
def abort(self, transaction):
## print "SODataManager.abort()"
rollback_all()
def tpc_begin(self, transaction):
## print "SODataManager.tpc_begin()"
pass
def commit(self, transaction):
## print "SODataManager.commit()"
pass
def tpc_vote(self, transaction):
## print "SODataManager.tpc_vote()"
# for a one phase data manager commit last in tpc_vote
commit_all()
def tpc_finish(self, transaction):
pass
def tpc_abort(self, transaction):
raise TypeError("Already committed")
def sortKey(self):
# Try to sort last, so that we vote last - we may commit in tpc_vote(),
return "~tg:sqlobject:%d" % id(self.hub)
if have_sqlobject:
class AutoConnectHub(ConnectionHub):
"""Connects to the database once per thread. The AutoConnectHub also
provides convenient methods for managing transactions."""
uri = None
params = {}
def __init__(self, uri=None, supports_transactions=True):
if not uri:
uri = config.get("sqlobject.dburi")
self.uri = uri
self.supports_transactions = supports_transactions
hub_registry.add(self)
ConnectionHub.__init__(self)
def _is_interesting_version(self):
"""Return True only if version of MySQLdb <= 1.0."""
import MySQLdb
module_version = MySQLdb.version_info[0:2]
major = module_version[0]
minor = module_version[1]
# we can't use Decimal here because it is only available for Python 2.4
return (major < 1 or (major == 1 and minor < 2))
def _enable_timestamp_workaround(self, connection):
"""Enable a workaround for an incompatible timestamp format change
in MySQL 4.1 when using an old version of MySQLdb. See trac ticket
#1235 - http://trac.gearshift.org/ticket/1235 for details."""
# precondition: connection is a MySQLConnection
import MySQLdb
import MySQLdb.converters
if self._is_interesting_version():
conversions = MySQLdb.converters.conversions.copy()
conversions[MySQLdb.constants.FIELD_TYPE.TIMESTAMP] = \
_mysql_timestamp_converter
# There is no method to use custom keywords when using
# "connectionForURI" in sqlobject so we have to insert the
# conversions afterwards.
connection.kw["conv"] = conversions
def getConnection(self):
try:
conn = self.threadingLocal.connection
return self.begin(conn)
except AttributeError:
if self.uri:
conn = sqlobject.connectionForURI(self.uri)
# the following line effectively turns off the DBAPI connection
# cache. We're already holding on to a connection per thread,
# and the cache causes problems with sqlite.
if self.uri.startswith("sqlite"):
TheURIOpener.cachedURIs = {}
elif self.uri.startswith("mysql") and \
config.get("gearshift.enable_mysql41_timestamp_workaround", False):
self._enable_timestamp_workaround(conn)
self.threadingLocal.connection = conn
return self.begin(conn)
raise AttributeError(
"No connection has been defined for this thread "
"or process")
def reset(self):
"""Used for testing purposes. This drops all of the connections
that are being held."""
self.threadingLocal = threading_local()
def begin(self, conn=None):
"""Start a transaction."""
if not self.supports_transactions:
return conn
if not conn:
conn = self.getConnection()
if isinstance(conn, Transaction):
if conn._obsolete:
conn.begin()
return conn
self.threadingLocal.old_conn = conn
trans = conn.transaction()
self.threadingLocal.connection = trans
return trans
def commit(self):
"""Commit the current transaction."""
if not self.supports_transactions:
return
try:
conn = self.threadingLocal.connection
except AttributeError:
return
if isinstance(conn, Transaction):
self.threadingLocal.connection.commit()
def rollback(self):
"""Rollback the current transaction."""
if not self.supports_transactions:
return
try:
conn = self.threadingLocal.connection
except AttributeError:
return
if isinstance(conn, Transaction) and not conn._obsolete:
self.threadingLocal.connection.rollback()
def end(self):
"""End the transaction, returning to a standard connection."""
if not self.supports_transactions:
return
try:
conn = self.threadingLocal.connection
except AttributeError:
return
if not isinstance(conn, Transaction):
return
if not conn._obsolete:
conn.rollback()
self.threadingLocal.connection = self.threadingLocal.old_conn
del self.threadingLocal.old_conn
self.threadingLocal.connection.expireAll()
class PackageHub(object):
"""Transparently proxies to an AutoConnectHub for the URI
that is appropriate for this package. A package URI is
configured via "packagename.dburi" in the TurboGears config
settings. If there is no package DB URI configured, the
default (provided by "sqlobject.dburi") is used.
The hub is not instantiated until an attempt is made to
use the database.
"""
def __init__(self, packagename):
self.packagename = packagename
self.hub = None
def __get__(self, obj, type):
if self.hub:
return self.hub.__get__(obj, type)
else:
return self
def __set__(self, obj, type):
if not self.hub:
self.set_hub()
return self.hub.__set__(obj, type)
def __getattr__(self, name):
if not self.hub:
self.set_hub()
try:
return getattr(self.hub, name)
except AttributeError:
return getattr(self.hub.getConnection(), name)
def set_hub(self):
dburi = config.get("%s.dburi" % self.packagename, None)
if not dburi:
dburi = config.get("sqlobject.dburi", None)
if not dburi:
raise KeyError, "No database configuration found!"
if dburi.startswith("notrans_"):
dburi = dburi[8:]
trans = False
else:
trans = True
hub = _hubs.get(dburi, None)
if not hub:
hub = AutoConnectHub(dburi, supports_transactions=trans)
_hubs[dburi] = hub
self.hub = hub
def set_db_uri(dburi, package=None):
"""Sets the database URI to use either globally or for a specific
package. Note that once the database is accessed, calling
setDBUri will have no effect.
@param dburi: database URI to use
@param package: package name this applies to, or None to set the default.
"""
if package:
config.update({"%s.dburi" % package : dburi})
else:
config.update({"sqlobject.dburi" : dburi})
def commit_all():
"""Commit the transactions in all registered hubs (for this thread)."""
for hub in hub_registry:
hub.commit()
def rollback_all():
"""Rollback the transactions in all registered hubs (for this thread)."""
for hub in hub_registry:
hub.rollback()
def end_all():
"""End the transactions in all registered hubs (for this thread)."""
for hub in hub_registry:
hub.end()
def restart_transaction(args):
if have_transactions:
zope_transaction.begin()
def dispatch_exception(exception, args, kw):
# errorhandling import here to avoid circular imports
from gearshift.errorhandling import dispatch_error
# Keep in mind func is not the real func but _expose
real_func, accept, allow_json, controller = args[:4]
args = args[4:]
exc_type, exc_value, exc_trace = sys.exc_info()
remove_keys(kw, ("tg_source", "tg_errors", "tg_exceptions"))
try:
output = dispatch_error(
controller, real_func, None, exception, *args, **kw)
except NoApplicableMethods:
raise exc_type, exc_value, exc_trace
else:
del exc_trace
return output
def so_to_dict(sqlobj):
"""Convert SQLObject to a dictionary based on columns."""
d = {}
if sqlobj is None:
return d # stops recursion
for name in sqlobj.sqlmeta.columns.keys():
d[name] = getattr(sqlobj, name)
d['id'] = sqlobj.id # id must be added explicitly
if sqlobj._inheritable:
d.update(so_to_dict(sqlobj._parent))
d.pop('childName')
return d
def so_columns(sqlclass, columns=None):
"""Return a dict with all columns from a SQLObject.
This includes the columns from InheritableSO's bases.
"""
if columns is None:
columns = {}
columns.update(filter(lambda i: i[0] != 'childName',
sqlclass.sqlmeta.columns.items()))
if sqlclass._inheritable:
so_columns(sqlclass.__base__, columns)
return columns
def so_joins(sqlclass, joins=None):
"""Return a list with all joins from a SQLObject.
The list includes the columns from InheritableSO's bases.
"""
if joins is None:
joins = []
joins.extend(sqlclass.sqlmeta.joins)
if sqlclass._inheritable:
so_joins(sqlclass.__base__, joins)
return joins
def EndTransactions():
end_all()
__all__ = ["metadata", "session",
"get_engine", "get_metadata",
"PackageHub", "AutoConnectHub", "set_db_uri",
"commit_all", "rollback_all", "end_all", "so_to_dict",
"so_columns", "so_joins", "EndTransactions"]
|
|
import unittest, collections
import grf
class GrfTest(unittest.TestCase):
def testNodes(self):
self.assertEqual([], grf.nodes([]))
self.assertEqual(list("ABD"), grf.nodes(["AB", "AD"]))
self.assertEqual(list("ABCD"), grf.nodes(["AB", "CD"]))
def testIsConnected(self):
self.assertTrue(grf.is_connected([]))
self.assertTrue(grf.is_connected(["AB"]))
self.assertFalse(grf.is_connected(["AB", "CD"]))
self.assertFalse(grf.is_connected(["AB", "CD", "DF"]))
self.assertTrue(grf.is_connected(["AB", "CD", "DB"]))
# TODO: deterministic test
def testHamiltonianPath(self):
def checkHamiltonian(graph):
path = grf.hamiltonian_path(graph)
self.assertEqual(sorted(path), sorted(grf.nodes(graph)))
graph = list(map(tuple, graph))
for node0, node1 in zip(path, path[1:]):
self.assertTrue((node0, node1) in graph or (node1, node0) in graph)
checkHamiltonian(["AB"])
checkHamiltonian("AB BC CD DE".split())
checkHamiltonian("AB BC CD AD".split())
self.assertFalse(grf.hamiltonian_path("AB CD".split()))
self.assertFalse(grf.hamiltonian_path("AB AC AD".split()))
def testExactCoverAPI(self):
self.assertEqual([], grf.exact_cover([]))
self.assertEqual([], grf.exact_cover({}))
self.assertEqual([], grf.exact_cover(""))
self.assertEqual([], grf.exact_cover(set()))
self.assertIsNotNone(grf.exact_cover([[]]))
self.assertEqual(["AB", "CD"], grf.exact_cover("AB BC CD".split()))
self.assertEqual([0, 2], grf.exact_cover({0: "AB", 1: "BC", 2: "CD"}))
# The nodes argument is generally unnecessary.
self.assertEqual(["AB", "CD"], grf.exact_cover("AB BC CD".split(), "ABCD"))
# No solution will be found if any node does not appear in any subset.
self.assertIsNone(grf.exact_cover("AB BC CD".split(), "ABCDE"))
# It is a ValueError for a subset to contain a node not in the set of all nodes.
self.assertRaises(ValueError, grf.exact_cover, "AB BC CD".split(), "ABC")
# It is a ValueError for the set of all nodes to have a node appear more than once.
self.assertRaises(ValueError, grf.exact_cover, "AB BC CD".split(), "ABCDD")
# If a subset contains a node more than once, that subset cannot appear in a solution.
self.assertIsNone(grf.exact_cover("AA B".split()))
def testExactCover(self):
def checkExactCover(pieces):
cover = grf.exact_cover(pieces)
self.assertTrue(all(piece in pieces for piece in cover))
nodes = [node for piece in cover for node in piece]
all_nodes = set(node for piece in pieces for node in piece)
self.assertEqual(sorted(nodes), sorted(all_nodes))
checkExactCover("AB CD".split())
checkExactCover("AB BC CD".split())
checkExactCover([(1, 4, 7), (1, 4), (4, 5, 7), (3, 5, 6), (2, 3, 6, 7), (2, 7)])
self.assertIsNone(grf.exact_cover("AB BC".split()))
def testExactCoversAPI(self):
# exact_covers
self.assertEqual(1, len(grf.exact_covers([])))
self.assertEqual(2, len(grf.exact_covers([[]])))
self.assertEqual(8, len(grf.exact_covers([[], [], []])))
self.assertEqual(2, len(grf.exact_covers([[0], []])))
self.assertEqual(0, len(grf.exact_covers([[0, 0], []])))
self.assertEqual(1, len(grf.exact_covers("AB CD".split())))
self.assertEqual(0, len(grf.exact_covers("AB BC".split())))
self.assertEqual(6, len(grf.exact_covers("A A A B B".split())))
self.assertEqual(6, len(grf.exact_covers("A A A B B".split(), max_solutions=10)))
self.assertEqual(2, len(grf.exact_covers("A A A B B".split(), max_solutions=2)))
# can_exact_cover
self.assertTrue(grf.can_exact_cover("AB CD".split()))
self.assertFalse(grf.can_exact_cover("AB BC".split()))
# unique_exact_cover
self.assertTrue(grf.unique_exact_cover("AB CD".split()))
self.assertFalse(grf.unique_exact_cover("AB BC".split()))
self.assertFalse(grf.unique_exact_cover("AB CD AD BC".split()))
# can_unique_exact_cover
solution, solution_is_unique = grf.can_unique_exact_cover("AB CD AD BC".split())
self.assertIsNotNone(solution)
self.assertFalse(solution_is_unique)
solution, solution_is_unique = grf.can_unique_exact_cover("AB CD AD".split())
self.assertIsNotNone(solution)
self.assertTrue(solution_is_unique)
solution, solution_is_unique = grf.can_unique_exact_cover("AB BC".split())
self.assertIsNone(solution)
self.assertFalse(solution_is_unique)
def testPartialCoverAPI(self):
self.assertEqual([], grf.partial_cover([], []))
self.assertEqual([], grf.partial_cover({}, set()))
self.assertEqual([], grf.partial_cover("", ""))
self.assertIsNotNone(grf.partial_cover([[]], []))
self.assertEqual(["AB", "CD"], grf.partial_cover("AB BC CD".split(), "ABD"))
self.assertEqual([0, 2], grf.partial_cover({0: "AB", 1: "BC", 2: "CD"}, "ABD"))
# No solution will be found if any node does not appear in any subset.
self.assertIsNone(grf.partial_cover("AB BC CD".split(), "ABCDE"))
# It is a ValueError for the set of all nodes to have a node appear more than once.
self.assertRaises(ValueError, grf.partial_cover, "AB BC CD".split(), "ABCDD")
# If a subset contains a node more than once, that subset cannot appear in a solution.
self.assertIsNone(grf.partial_cover("AA B".split(), "AB"))
def testPartialCover(self):
def checkPartialCover(pieces, nodes):
cover = grf.partial_cover(pieces, nodes)
self.assertTrue(all(piece in pieces for piece in cover))
for node in nodes:
self.assertTrue(any(node in piece for piece in cover))
counts = collections.Counter([node for piece in cover for node in piece])
(_, count), = counts.most_common(1)
self.assertEqual(count, 1)
checkPartialCover("AB CD".split(), "AD")
checkPartialCover("AB BC CD".split(), "ABCD")
checkPartialCover("AB CD ACE".split(), "AD")
self.assertFalse(grf.partial_cover("AB BC".split(), "AC"))
def testPartialCoversAPI(self):
# Empty and optional subsets.
self.assertEqual(1, len(grf.partial_covers([], [])))
self.assertEqual(2, len(grf.partial_covers([[]], [])))
self.assertEqual(8, len(grf.partial_covers([[], [], []], [])))
self.assertEqual(8, len(grf.partial_covers([[0], [1], [2]], [])))
self.assertEqual(5, len(grf.partial_covers([[0, 1], [1, 2], [2, 3]], [])))
def testMultiCoversAPI(self):
self.assertEqual(1, len(grf.multi_covers([], [], [])))
self.assertEqual(2, len(grf.multi_covers([[]], [], [])))
self.assertEqual(8, len(grf.multi_covers([[], [], []], [], [])))
self.assertEqual(1, len(grf.multi_covers([[0]], [(0, 1)], [(0, 1)])))
self.assertEqual(6, len(grf.multi_covers([[0], [0], [0]], [(0, 1)], [(0, 2)])))
self.assertEqual(4, len(grf.multi_covers([[0], {0: 1}, {0: 2}], [(0, 1)], [(0, 2)])))
self.assertEqual(1, len(grf.multi_covers([[0], [0]], [(0, 0)], [(0, 0)])))
self.assertEqual(3, len(grf.multi_covers([[0], [0]], [(0, 1)], [(0, 3)])))
# exact_covers
self.assertEqual(1, len(grf.exact_covers([])))
self.assertEqual(2, len(grf.exact_covers([[]])))
self.assertEqual(8, len(grf.exact_covers([[], [], []])))
self.assertEqual(2, len(grf.exact_covers([[0], []])))
self.assertEqual(0, len(grf.exact_covers([[0, 0], []])))
self.assertEqual(1, len(grf.exact_covers("AB CD".split())))
self.assertEqual(0, len(grf.exact_covers("AB BC".split())))
self.assertEqual(6, len(grf.exact_covers("A A A B B".split())))
self.assertEqual(6, len(grf.exact_covers("A A A B B".split(), max_solutions=10)))
self.assertEqual(2, len(grf.exact_covers("A A A B B".split(), max_solutions=2)))
# can_exact_cover
self.assertTrue(grf.can_exact_cover("AB CD".split()))
self.assertFalse(grf.can_exact_cover("AB BC".split()))
# unique_exact_cover
self.assertTrue(grf.unique_exact_cover("AB CD".split()))
self.assertFalse(grf.unique_exact_cover("AB BC".split()))
self.assertFalse(grf.unique_exact_cover("AB CD AD BC".split()))
# can_unique_exact_cover
solution, solution_is_unique = grf.can_unique_exact_cover("AB CD AD BC".split())
self.assertIsNotNone(solution)
self.assertFalse(solution_is_unique)
solution, solution_is_unique = grf.can_unique_exact_cover("AB CD AD".split())
self.assertIsNotNone(solution)
self.assertTrue(solution_is_unique)
solution, solution_is_unique = grf.can_unique_exact_cover("AB BC".split())
self.assertIsNone(solution)
self.assertFalse(solution_is_unique)
#def parse_polys(spec, annotate = False, align = True, allow_disconnected = False):
def testParsePolys(self):
self.assertEqual([((0, 0),)], grf.parse_polys("#"))
self.assertEqual([((0, 0),)], grf.parse_polys(" #"))
self.assertEqual([((1, 0),)], grf.parse_polys(" #", align=False))
self.assertEqual([((0, 0),), ((0, 0),)], grf.parse_polys("# #"))
self.assertEqual([((0, 0), (2, 0),)], grf.parse_polys("# #", allow_disconnected=True))
self.assertEqual([((0, 0), (0, 1), (1, 1), (2, 0), (2, 1))], grf.parse_polys("# #\n###"))
self.assertEqual([((0, 0), (1, 0))], grf.parse_polys("AA"))
self.assertEqual([((0, 0),), ((0, 0),)], grf.parse_polys("AB"))
self.assertEqual([((0, 0),), ((0, 0),), ((0, 0),)], grf.parse_polys("ABA"))
self.assertEqual([((0, 0), (2, 0)), ((0, 0),)], grf.parse_polys("ABA", allow_disconnected=True))
self.assertEqual([("A", (0, 0))], grf.parse_polys("#", annotate=True))
self.assertEqual([("A", (0, 0), (1, 0))], grf.parse_polys("##", annotate=True))
self.assertEqual([("A", (0, 0)), ("B", (0, 0))], grf.parse_polys("# #", annotate=True))
self.assertEqual([("X", (0, 0)), ("Y", (0, 0))], grf.parse_polys("X Y", annotate=True))
self.assertEqual([("X", (0, 0)), ("Y", (0, 0))], grf.parse_polys("XY", annotate=True))
self.assertEqual([("X", (0, 0)), ("X", (0, 0)), ("Y", (0, 0))], grf.parse_polys("XYX", annotate=True))
self.assertEqual([("X", (0, 0), (2, 0)), ("Y", (0, 0))], grf.parse_polys("XYX", annotate=True, allow_disconnected=True))
def testParseGrid(self):
self.assertEqual({}, grf.parse_grid(""))
self.assertEqual({(0, 0): "A"}, grf.parse_grid("A"))
self.assertEqual({(0, 0): "A", (1, 0): "B"}, grf.parse_grid("AB"))
self.assertEqual({(0, 0): "A", (1, 1): "A"}, grf.parse_grid("A\n A"))
def testPolyWithinGrid(self):
self.assertEqual(0, len(grf.poly_within_grid(((0, 0),), {})))
self.assertEqual(1, len(grf.poly_within_grid(((0, 0),), grf.rect_grid(1, 1))))
self.assertEqual(0, len(grf.poly_within_grid(((0, 0), (1, 0)), grf.rect_grid(1, 1))))
self.assertEqual(2, len(grf.poly_within_grid(((0, 0), (1, 0)), grf.rect_grid(3, 1))))
self.assertEqual(7, len(grf.poly_within_grid(((0, 0), (1, 0)), grf.rect_grid(3, 2))))
self.assertEqual(4, len(grf.poly_within_grid(((0, 0), (1, 0)), grf.rect_grid(3, 2), rotate=False)))
def testAstarUniform(self):
neighbors = dict(zip("ABCDEFG", "BC ACF ABDF CFE DG BCD E".split())).get
h = dict(zip("ABCDEFG", (2,2,2,1,0,1,0))).get
self.assertEqual("".join(grf.astar_uniform("A", "G", neighbors, h)), "ACDEG")
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
import argparse
import os
import sys
import random
debug_mode = False
def create_bram(dsc_f, sim_f, ref_f, tb_f, k1, k2, or_next):
while True:
init = 0 # random.randrange(2)
abits = random.randrange(1, 8)
dbits = random.randrange(1, 8)
groups = random.randrange(2, 5)
if random.randrange(2):
abits = 2 ** random.randrange(1, 4)
if random.randrange(2):
dbits = 2 ** random.randrange(1, 4)
while True:
wrmode = [ random.randrange(0, 2) for i in range(groups) ]
if wrmode.count(1) == 0: continue
if wrmode.count(0) == 0: continue
break
if random.randrange(2):
maxpol = 4
maxtransp = 1
maxclocks = 4
else:
maxpol = None
clkpol = random.randrange(4)
maxtransp = 2
maxclocks = 1
def generate_enable(i):
if wrmode[i]:
v = 2 ** random.randrange(0, 4)
while dbits < v or dbits % v != 0:
v //= 2
return v
return 0
def generate_transp(i):
if wrmode[i] == 0:
return random.randrange(maxtransp)
return 0
def generate_clkpol(i):
if maxpol is None:
return clkpol
return random.randrange(maxpol)
ports = [ random.randrange(1, 3) for i in range(groups) ]
enable = [ generate_enable(i) for i in range(groups) ]
transp = [ generate_transp(i) for i in range(groups) ]
clocks = [ random.randrange(maxclocks)+1 for i in range(groups) ]
clkpol = [ generate_clkpol(i) for i in range(groups) ]
break
print("bram bram_%02d_%02d" % (k1, k2), file=dsc_f)
print(" init %d" % init, file=dsc_f)
print(" abits %d" % abits, file=dsc_f)
print(" dbits %d" % dbits, file=dsc_f)
print(" groups %d" % groups, file=dsc_f)
print(" ports %s" % " ".join(["%d" % i for i in ports]), file=dsc_f)
print(" wrmode %s" % " ".join(["%d" % i for i in wrmode]), file=dsc_f)
print(" enable %s" % " ".join(["%d" % i for i in enable]), file=dsc_f)
print(" transp %s" % " ".join(["%d" % i for i in transp]), file=dsc_f)
print(" clocks %s" % " ".join(["%d" % i for i in clocks]), file=dsc_f)
print(" clkpol %s" % " ".join(["%d" % i for i in clkpol]), file=dsc_f)
print("endbram", file=dsc_f)
print("match bram_%02d_%02d" % (k1, k2), file=dsc_f)
if random.randrange(2):
non_zero_enables = [chr(ord('A') + i) for i in range(len(enable)) if enable[i]]
if len(non_zero_enables):
print(" shuffle_enable %c" % random.choice(non_zero_enables), file=dsc_f)
if or_next:
print(" or_next_if_better", file=dsc_f)
print("endmatch", file=dsc_f)
states = set()
v_ports = set()
v_stmts = list()
v_always = dict()
tb_decls = list()
tb_clocks = list()
tb_addr = list()
tb_din = list()
tb_dout = list()
tb_addrlist = list()
for i in range(10):
tb_addrlist.append(random.randrange(1048576))
t = random.randrange(1048576)
for i in range(10):
tb_addrlist.append(t ^ (1 << i))
v_stmts.append("(* nomem2reg *) reg [%d:0] memory [0:%d];" % (dbits-1, 2**abits-1))
portindex = 0
last_always_hdr = (-1, "")
for p1 in range(groups):
for p2 in range(ports[p1]):
pf = "%c%d" % (chr(ord("A") + p1), p2 + 1)
portindex += 1
v_stmts.append("`ifndef SYNTHESIS")
v_stmts.append(" event UPDATE_%s;" % pf)
v_stmts.append("`endif")
if clocks[p1] and not ("CLK%d" % clocks[p1]) in v_ports:
v_ports.add("CLK%d" % clocks[p1])
v_stmts.append("input CLK%d;" % clocks[p1])
tb_decls.append("reg CLK%d = 0;" % clocks[p1])
tb_clocks.append("CLK%d" % clocks[p1])
v_ports.add("%sADDR" % pf)
v_stmts.append("input [%d:0] %sADDR;" % (abits-1, pf))
if transp[p1]:
v_stmts.append("reg [%d:0] %sADDR_Q;" % (abits-1, pf))
tb_decls.append("reg [%d:0] %sADDR;" % (abits-1, pf))
tb_addr.append("%sADDR" % pf)
v_ports.add("%sDATA" % pf)
v_stmts.append("%s [%d:0] %sDATA;" % ("input" if wrmode[p1] else "output reg", dbits-1, pf))
if wrmode[p1]:
tb_decls.append("reg [%d:0] %sDATA;" % (dbits-1, pf))
tb_din.append("%sDATA" % pf)
else:
tb_decls.append("wire [%d:0] %sDATA;" % (dbits-1, pf))
tb_decls.append("wire [%d:0] %sDATA_R;" % (dbits-1, pf))
tb_dout.append("%sDATA" % pf)
if wrmode[p1] and enable[p1]:
v_ports.add("%sEN" % pf)
v_stmts.append("input [%d:0] %sEN;" % (enable[p1]-1, pf))
tb_decls.append("reg [%d:0] %sEN;" % (enable[p1]-1, pf))
tb_din.append("%sEN" % pf)
assign_op = "<="
if clocks[p1] == 0:
always_hdr = "always @* begin"
assign_op = "="
elif clkpol[p1] == 0:
always_hdr = "always @(negedge CLK%d) begin" % clocks[p1]
elif clkpol[p1] == 1:
always_hdr = "always @(posedge CLK%d) begin" % clocks[p1]
else:
if not ("CP", clkpol[p1]) in states:
v_stmts.append("parameter CLKPOL%d = 0;" % clkpol[p1])
states.add(("CP", clkpol[p1]))
if not ("CPW", clocks[p1], clkpol[p1]) in states:
v_stmts.append("wire CLK%d_CLKPOL%d = CLK%d == CLKPOL%d;" % (clocks[p1], clkpol[p1], clocks[p1], clkpol[p1]))
states.add(("CPW", clocks[p1], clkpol[p1]))
always_hdr = "always @(posedge CLK%d_CLKPOL%d) begin" % (clocks[p1], clkpol[p1])
if last_always_hdr[1] != always_hdr:
last_always_hdr = (portindex, always_hdr)
v_always[last_always_hdr] = list()
if wrmode[p1]:
for i in range(enable[p1]):
enrange = "[%d:%d]" % ((i+1)*dbits/enable[p1]-1, i*dbits/enable[p1])
v_always[last_always_hdr].append((portindex, pf, "if (%sEN[%d]) memory[%sADDR]%s = %sDATA%s;" % (pf, i, pf, enrange, pf, enrange)))
elif transp[p1]:
v_always[last_always_hdr].append((sum(ports)+1, pf, "%sADDR_Q %s %sADDR;" % (pf, assign_op, pf)))
v_stmts.append("always @* %sDATA = memory[%sADDR_Q];" % (pf, pf))
else:
v_always[last_always_hdr].append((0, pf, "%sDATA %s memory[%sADDR];" % (pf, assign_op, pf)))
for always_hdr in sorted(v_always):
v_stmts.append(always_hdr[1])
triggered_events = set()
time_cursor = 0
v_always[always_hdr].sort()
for t, p, s in v_always[always_hdr]:
if time_cursor != t or not p in triggered_events:
v_stmts.append(" `ifndef SYNTHESIS")
stmt = ""
if time_cursor != t:
stmt += " #%d;" % (t-time_cursor)
time_cursor = t
if not p in triggered_events:
stmt += (" -> UPDATE_%s;" % p)
triggered_events.add(p)
v_stmts.append(" %s" % stmt)
v_stmts.append(" `endif")
v_stmts.append(" %s" % s)
v_stmts.append("end")
print("module bram_%02d_%02d(%s);" % (k1, k2, ", ".join(v_ports)), file=sim_f)
for stmt in v_stmts:
print(" %s" % stmt, file=sim_f)
print("endmodule", file=sim_f)
print("module bram_%02d_%02d_ref(%s);" % (k1, k2, ", ".join(v_ports)), file=ref_f)
for stmt in v_stmts:
print(" %s" % stmt, file=ref_f)
print("endmodule", file=ref_f)
print("module bram_%02d_%02d_tb;" % (k1, k2), file=tb_f)
for stmt in tb_decls:
print(" %s" % stmt, file=tb_f)
print(" bram_%02d_%02d uut (" % (k1, k2), file=tb_f)
print(" " + ",\n ".join([".%s(%s)" % (p, p) for p in (tb_clocks + tb_addr + tb_din + tb_dout)]), file=tb_f)
print(" );", file=tb_f)
print(" bram_%02d_%02d_ref ref (" % (k1, k2), file=tb_f)
print(" " + ",\n ".join([".%s(%s)" % (p, p) for p in (tb_clocks + tb_addr + tb_din)]) + ",", file=tb_f)
print(" " + ",\n ".join([".%s(%s_R)" % (p, p) for p in tb_dout]), file=tb_f)
print(" );", file=tb_f)
expr_dout = "{%s}" % ", ".join(tb_dout)
expr_dout_ref = "{%s}" % ", ".join(i + "_R" for i in tb_dout)
print(" wire error = %s !== %s;" % (expr_dout, expr_dout_ref), file=tb_f)
print(" initial begin", file=tb_f)
if debug_mode:
print(" $dumpfile(`vcd_file);", file=tb_f)
print(" $dumpvars(0, bram_%02d_%02d_tb);" % (k1, k2), file=tb_f)
print(" #%d;" % (1000 + k2), file=tb_f)
for p in (tb_clocks + tb_addr + tb_din):
if p[-2:] == "EN":
print(" %s <= ~0;" % p, file=tb_f)
else:
print(" %s <= 0;" % p, file=tb_f)
print(" #1000;", file=tb_f)
for v in [1, 0, 1, 0]:
for p in tb_clocks:
print(" %s = %d;" % (p, v), file=tb_f)
print(" #1000;", file=tb_f)
for i in range(20 if debug_mode else 100):
if len(tb_clocks):
c = random.choice(tb_clocks)
print(" %s = !%s;" % (c, c), file=tb_f)
print(" #100;", file=tb_f)
print(" $display(\"bram_%02d_%02d %3d: %%b %%b %%s\", %s, %s, error ? \"ERROR\" : \"OK\");" %
(k1, k2, i, expr_dout, expr_dout_ref), file=tb_f)
for p in tb_din:
print(" %s <= %d;" % (p, random.randrange(1048576)), file=tb_f)
for p in tb_addr:
print(" %s <= %d;" % (p, random.choice(tb_addrlist)), file=tb_f)
print(" #900;", file=tb_f)
print(" end", file=tb_f)
print("endmodule", file=tb_f)
parser = argparse.ArgumentParser(formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-S', '--seed', type = int, help = 'seed for PRNG')
parser.add_argument('-c', '--count', type = int, default = 5, help = 'number of test cases to generate')
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args()
debug_mode = args.debug
if args.seed is not None:
seed = args.seed
else:
seed = (int(os.times()[4]*100) + os.getpid()) % 900000 + 100000
print("PRNG seed: %d" % seed)
random.seed(seed)
for k1 in range(args.count):
dsc_f = open("temp/brams_%02d.txt" % k1, "w")
sim_f = open("temp/brams_%02d.v" % k1, "w")
ref_f = open("temp/brams_%02d_ref.v" % k1, "w")
tb_f = open("temp/brams_%02d_tb.v" % k1, "w")
for f in [sim_f, ref_f, tb_f]:
print("`timescale 1 ns / 1 ns", file=f)
lenk2 = 1 if debug_mode else 10
for k2 in range(lenk2):
create_bram(dsc_f, sim_f, ref_f, tb_f, k1, k2, random.randrange(2 if k2+1 < lenk2 else 1))
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class PhoneNumberTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://trunking.twilio.com/v1/Trunks/TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PhoneNumbers/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2010-12-10T17:27:34Z",
"date_updated": "2015-10-09T11:36:32Z",
"friendly_name": "(415) 867-5309",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"phone_number": "+14158675309",
"api_version": "2010-04-01",
"voice_caller_id_lookup": null,
"voice_url": "https://webhooks.twilio.com/v1/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Proxy/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks/Call",
"voice_method": "POST",
"voice_fallback_url": null,
"voice_fallback_method": null,
"status_callback": "",
"status_callback_method": "POST",
"voice_application_sid": "",
"trunk_sid": "TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_url": "https://webhooks.twilio.com/v1/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Proxy/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks/Message",
"sms_method": "POST",
"sms_fallback_url": "",
"sms_fallback_method": "POST",
"sms_application_sid": "APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address_requirements": "none",
"beta": false,
"url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"capabilities": {
"voice": true,
"sms": true,
"mms": true
},
"links": {
"phone_number": "https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
}
'''
))
actual = self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://trunking.twilio.com/v1/Trunks/TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PhoneNumbers/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers("PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.create(phone_number_sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
values = {'PhoneNumberSid': "PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", }
self.holodeck.assert_has_request(Request(
'post',
'https://trunking.twilio.com/v1/Trunks/TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PhoneNumbers',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2010-12-10T17:27:34Z",
"date_updated": "2015-10-09T11:36:32Z",
"friendly_name": "(415) 867-5309",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"phone_number": "+14158675309",
"api_version": "2010-04-01",
"voice_caller_id_lookup": null,
"voice_url": "https://webhooks.twilio.com/v1/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Proxy/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks/Call",
"voice_method": "POST",
"voice_fallback_url": null,
"voice_fallback_method": null,
"status_callback": "",
"status_callback_method": "POST",
"voice_application_sid": "",
"trunk_sid": "TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_url": "https://webhooks.twilio.com/v1/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Proxy/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks/Message",
"sms_method": "POST",
"sms_fallback_url": "",
"sms_fallback_method": "POST",
"sms_application_sid": "APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address_requirements": "none",
"beta": false,
"url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"capabilities": {
"voice": true,
"sms": true,
"mms": true
},
"links": {
"phone_number": "https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
}
'''
))
actual = self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.create(phone_number_sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.list()
self.holodeck.assert_has_request(Request(
'get',
'https://trunking.twilio.com/v1/Trunks/TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PhoneNumbers',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers?PageSize=1&Page=0",
"key": "phone_numbers",
"next_page_url": null,
"page": 0,
"page_size": 1,
"previous_page_url": null,
"url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers?PageSize=1&Page=0"
},
"phone_numbers": [
{
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2010-12-10T17:27:34Z",
"date_updated": "2015-10-09T11:36:32Z",
"friendly_name": "(415) 867-5309",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"phone_number": "+14158675309",
"api_version": "2010-04-01",
"voice_caller_id_lookup": null,
"voice_url": "https://webhooks.twilio.com/v1/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Proxy/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks/Call",
"voice_method": "POST",
"voice_fallback_url": null,
"voice_fallback_method": null,
"status_callback": "",
"status_callback_method": "POST",
"voice_application_sid": "",
"trunk_sid": "TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_url": "https://webhooks.twilio.com/v1/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Proxy/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks/Message",
"sms_method": "POST",
"sms_fallback_url": "",
"sms_fallback_method": "POST",
"sms_application_sid": "APaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address_requirements": "none",
"beta": false,
"url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"capabilities": {
"voice": true,
"sms": true,
"mms": true
},
"links": {
"phone_number": "https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
}
]
}
'''
))
actual = self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers?PageSize=1&Page=0",
"key": "phone_numbers",
"next_page_url": null,
"page": 0,
"page_size": 1,
"previous_page_url": null,
"url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers?PageSize=1&Page=0"
},
"phone_numbers": []
}
'''
))
actual = self.client.trunking.v1.trunks("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.list()
self.assertIsNotNone(actual)
|
|
#!/usr/bin/python
from __future__ import print_function
def c_compiler_rule(b, name, description, compiler, flags):
command = "%s -MMD -MF $out.d %s -c -o $out $in" % (compiler, flags)
b.rule(name, command, description + " $out", depfile="$out.d")
version_major = 0;
version_minor = 2;
version_patch = 0;
from optparse import OptionParser
import os
import string
from subprocess import *
import sys
srcdir = os.path.dirname(sys.argv[0])
sys.path.insert(0, os.path.join(srcdir, 'build'))
import metabuild
p = OptionParser()
p.add_option('--with-llvm-config', metavar='PATH',
help='use given llvm-config script')
p.add_option('--with-cxx-compiler', metavar='PATH',
help='use given C++ compiler')
p.add_option('--prefix', metavar='PATH',
help='install to given prefix')
p.add_option('--libexecdir', metavar='PATH',
help='install *.bc to given dir')
p.add_option('--includedir', metavar='PATH',
help='install include files to given dir')
p.add_option('--pkgconfigdir', metavar='PATH',
help='install clc.pc to given dir')
p.add_option('-g', metavar='GENERATOR', default='make',
help='use given generator (default: make)')
p.add_option('--enable-runtime-subnormal', action="store_true", default=False,
help='Allow runtimes to choose subnormal support')
(options, args) = p.parse_args()
llvm_config_exe = options.with_llvm_config or "llvm-config"
prefix = options.prefix
if not prefix:
prefix = '/usr/local'
libexecdir = options.libexecdir
if not libexecdir:
libexecdir = os.path.join(prefix, 'lib/clc')
includedir = options.includedir
if not includedir:
includedir = os.path.join(prefix, 'include')
pkgconfigdir = options.pkgconfigdir
if not pkgconfigdir:
pkgconfigdir = os.path.join(prefix, 'share/pkgconfig')
def llvm_config(args):
try:
# Universal newlines translate different newline formats to '\n'
# it also force the input to be string instead of bytes in python 3
proc = Popen([llvm_config_exe] + args, stdout=PIPE, universal_newlines=True)
return proc.communicate()[0].rstrip().replace('\n', ' ')
except OSError:
print("Error executing llvm-config.")
print("Please ensure that llvm-config is in your $PATH, or use --with-llvm-config.")
sys.exit(1)
llvm_version = llvm_config(['--version']).replace('svn', '').split('.')
llvm_int_version = int(llvm_version[0]) * 100 + int(llvm_version[1]) * 10
llvm_string_version = llvm_version[0] + '.' + llvm_version[1]
if llvm_int_version < 390:
print("libclc requires LLVM >= 3.9")
sys.exit(1)
llvm_system_libs = llvm_config(['--system-libs'])
llvm_bindir = llvm_config(['--bindir'])
llvm_core_libs = llvm_config(['--libs', 'core', 'bitreader', 'bitwriter']) + ' ' + \
llvm_system_libs + ' ' + \
llvm_config(['--ldflags'])
llvm_cxxflags = llvm_config(['--cxxflags']) + ' -fno-exceptions -fno-rtti ' + \
'-DHAVE_LLVM=0x{:0=4}'.format(llvm_int_version)
llvm_libdir = llvm_config(['--libdir'])
llvm_clang = os.path.join(llvm_bindir, 'clang')
llvm_as = os.path.join(llvm_bindir, 'llvm-as')
llvm_link = os.path.join(llvm_bindir, 'llvm-link')
llvm_opt = os.path.join(llvm_bindir, 'opt')
cxx_compiler = options.with_cxx_compiler
if not cxx_compiler:
cxx_compiler = os.path.join(llvm_bindir, 'clang++')
available_targets = {
'r600--' : { 'devices' :
[{'gpu' : 'cedar', 'aliases' : ['palm', 'sumo', 'sumo2', 'redwood', 'juniper']},
{'gpu' : 'cypress', 'aliases' : ['hemlock'] },
{'gpu' : 'barts', 'aliases' : ['turks', 'caicos'] },
{'gpu' : 'cayman', 'aliases' : ['aruba']} ]},
'amdgcn--': { 'devices' :
[{'gpu' : 'tahiti', 'aliases' : ['pitcairn', 'verde', 'oland', 'hainan', 'bonaire', 'kabini', 'kaveri', 'hawaii', 'mullins', 'tonga', 'iceland', 'carrizo', 'fiji', 'stoney', 'polaris10', 'polaris11']} ]},
'amdgcn--amdhsa': { 'devices' :
[{'gpu' : '', 'aliases' : ['bonaire', 'kabini', 'kaveri', 'hawaii', 'mullins', 'tonga', 'iceland', 'carrizo', 'fiji', 'stoney', 'polaris10', 'polaris11']} ]},
'nvptx--' : { 'devices' : [{'gpu' : '', 'aliases' : []} ]},
'nvptx64--' : { 'devices' : [{'gpu' : '', 'aliases' : []} ]},
'nvptx--nvidiacl' : { 'devices' : [{'gpu' : '', 'aliases' : []} ]},
'nvptx64--nvidiacl' : { 'devices' : [{'gpu' : '', 'aliases' : []} ]},
}
# Support for gfx9 was added in LLVM 5 (r295554)
if llvm_int_version >= 500:
available_targets['amdgcn--']['devices'][0]['aliases'] += ['gfx900', 'gfx902']
available_targets['amdgcn--amdhsa']['devices'][0]['aliases'] += ['gfx900', 'gfx902']
# Support for Vega12 and Vega20 was added in LLVM 7 (r331215)
if llvm_int_version >= 700:
available_targets['amdgcn--']['devices'][0]['aliases'] += ['gfx904', 'gfx906']
available_targets['amdgcn--amdhsa']['devices'][0]['aliases'] += ['gfx904', 'gfx906']
default_targets = ['nvptx--nvidiacl', 'nvptx64--nvidiacl', 'r600--', 'amdgcn--', 'amdgcn--amdhsa']
#mesa is using amdgcn-mesa-mesa3d since llvm-4.0
if llvm_int_version > 390:
available_targets['amdgcn-mesa-mesa3d'] = available_targets['amdgcn--']
default_targets.append('amdgcn-mesa-mesa3d')
targets = args
if not targets:
targets = default_targets
b = metabuild.from_name(options.g)
b.rule("LLVM_AS", "%s -o $out $in" % llvm_as, 'LLVM-AS $out')
b.rule("LLVM_LINK", command = llvm_link + " -o $out $in",
description = 'LLVM-LINK $out')
b.rule("OPT", command = llvm_opt + " -O3 -o $out $in",
description = 'OPT $out')
c_compiler_rule(b, "LLVM_TOOL_CXX", 'CXX', cxx_compiler, llvm_cxxflags)
b.rule("LLVM_TOOL_LINK", cxx_compiler + " -o $out $in %s" % llvm_core_libs + " -Wl,-rpath %s" % llvm_libdir, 'LINK $out')
prepare_builtins = os.path.join('utils', 'prepare-builtins')
b.build(os.path.join('utils', 'prepare-builtins.o'), "LLVM_TOOL_CXX",
os.path.join(srcdir, 'utils', 'prepare-builtins.cpp'))
b.build(prepare_builtins, "LLVM_TOOL_LINK",
os.path.join('utils', 'prepare-builtins.o'))
b.rule("PREPARE_BUILTINS", "%s -o $out $in" % prepare_builtins,
'PREPARE-BUILTINS $out')
b.rule("PYTHON_GEN", "python < $in > $out", "PYTHON_GEN $out")
b.build('generic/lib/convert.cl', "PYTHON_GEN", ['generic/lib/gen_convert.py'])
manifest_deps = set([sys.argv[0], os.path.join(srcdir, 'build', 'metabuild.py'),
os.path.join(srcdir, 'build', 'ninja_syntax.py')])
install_files_bc = []
install_deps = []
# Create rules for subnormal helper objects
for src in ['subnormal_disable.ll', 'subnormal_use_default.ll']:
obj_name = src[:-2] + 'bc'
obj = os.path.join('generic--', 'lib', obj_name)
src_file = os.path.join('generic', 'lib', src)
b.build(obj, 'LLVM_AS', src_file)
b.default(obj)
install_files_bc.append((obj, obj))
install_deps.append(obj)
# Create libclc.pc
clc = open('libclc.pc', 'w')
clc.write('includedir=%(inc)s\nlibexecdir=%(lib)s\n\nName: libclc\nDescription: Library requirements of the OpenCL C programming language\nVersion: %(maj)s.%(min)s.%(pat)s\nCflags: -I${includedir}\nLibs: -L${libexecdir}' %
{'inc': includedir, 'lib': libexecdir, 'maj': version_major, 'min': version_minor, 'pat': version_patch})
clc.close()
for target in targets:
(t_arch, t_vendor, t_os) = target.split('-')
archs = [t_arch]
if t_arch == 'nvptx' or t_arch == 'nvptx64':
archs.append('ptx')
archs.append('generic')
subdirs = []
for arch in archs:
subdirs.append("%s-%s-%s" % (arch, t_vendor, t_os))
subdirs.append("%s-%s" % (arch, t_os))
subdirs.append(arch)
if arch == 'amdgcn' or arch == 'r600':
subdirs.append('amdgpu')
incdirs = filter(os.path.isdir,
[os.path.join(srcdir, subdir, 'include') for subdir in subdirs])
libdirs = filter(lambda d: os.path.isfile(os.path.join(d, 'SOURCES')) or
os.path.isfile(os.path.join(d, 'SOURCES_' + llvm_string_version)),
[os.path.join(srcdir, subdir, 'lib') for subdir in subdirs])
# The above are iterables in python3 but we might use them multiple times
# if more then one device is supported.
incdirs = list(incdirs)
libdirs = list(libdirs)
clang_cl_includes = ' '.join(["-I%s" % incdir for incdir in incdirs])
for device in available_targets[target]['devices']:
# The rule for building a .bc file for the specified architecture using clang.
clang_bc_flags = "-target %s -I`dirname $in` %s " \
"-fno-builtin " \
"-D__CLC_INTERNAL " \
"-emit-llvm" % (target, clang_cl_includes)
if device['gpu'] != '':
clang_bc_flags += ' -mcpu=' + device['gpu']
clang_bc_rule = "CLANG_CL_BC_" + target + "_" + device['gpu']
c_compiler_rule(b, clang_bc_rule, "LLVM-CC", llvm_clang, clang_bc_flags)
as_bc_rule = "LLVM_AS_BC_" + target + "_" + device['gpu']
b.rule(as_bc_rule, "%s -E -P %s -x cl $in -o - | %s -o $out" % (llvm_clang, clang_bc_flags, llvm_as), 'LLVM-AS $out')
objects = []
sources_seen = set()
compats = []
if device['gpu'] == '':
full_target_name = target
obj_suffix = ''
else:
full_target_name = device['gpu'] + '-' + target
obj_suffix = '.' + device['gpu']
for libdir in libdirs:
subdir_list_file = os.path.join(libdir, 'SOURCES')
if os.path.exists(subdir_list_file):
manifest_deps.add(subdir_list_file)
override_list_file = os.path.join(libdir, 'OVERRIDES')
compat_list_file = os.path.join(libdir,
'SOURCES_' + llvm_string_version)
compat_list_override = os.path.join(libdir,
'OVERRIDES_' + llvm_string_version)
# Build compat list
if os.path.exists(compat_list_file):
manifest_deps.add(compat_list_file)
for compat in open(compat_list_file).readlines():
compat = compat.rstrip()
compats.append(compat)
# Add target compat overrides
if os.path.exists(compat_list_override):
for override in open(compat_list_override).readlines():
override = override.rstrip()
sources_seen.add(override)
# Add target overrides
if os.path.exists(override_list_file):
for override in open(override_list_file).readlines():
override = override.rstrip()
sources_seen.add(override)
files = open(subdir_list_file).readlines() if os.path.exists(subdir_list_file) else []
for src in files + compats:
src = src.rstrip()
if src not in sources_seen:
sources_seen.add(src)
obj = os.path.join(target, 'lib', src + obj_suffix + '.bc')
objects.append(obj)
src_path = libdir
src_file = os.path.join(src_path, src)
ext = os.path.splitext(src)[1]
if ext == '.ll':
b.build(obj, as_bc_rule, src_file)
else:
b.build(obj, clang_bc_rule, src_file)
obj = os.path.join('generic--', 'lib', 'subnormal_use_default.bc')
if not options.enable_runtime_subnormal:
objects.append(obj)
builtins_link_bc = os.path.join(target, 'lib', 'builtins.link' + obj_suffix + '.bc')
builtins_opt_bc = os.path.join(target, 'lib', 'builtins.opt' + obj_suffix + '.bc')
builtins_bc = os.path.join('built_libs', full_target_name + '.bc')
b.build(builtins_link_bc, "LLVM_LINK", objects)
b.build(builtins_opt_bc, "OPT", builtins_link_bc)
b.build(builtins_bc, "PREPARE_BUILTINS", builtins_opt_bc, prepare_builtins)
install_files_bc.append((builtins_bc, builtins_bc))
install_deps.append(builtins_bc)
for alias in device['aliases']:
# Ninja cannot have multiple rules with same name so append suffix
ruleName = "CREATE_ALIAS_{0}_for_{1}".format(alias, device['gpu'])
b.rule(ruleName, "ln -fs %s $out" % os.path.basename(builtins_bc)
,"CREATE-ALIAS $out")
alias_file = os.path.join('built_libs', alias + '-' + target + '.bc')
b.build(alias_file, ruleName, builtins_bc)
install_files_bc.append((alias_file, alias_file))
install_deps.append(alias_file)
b.default(builtins_bc)
install_cmd = ' && '.join(['mkdir -p ${DESTDIR}/%(dst)s && cp -r %(src)s ${DESTDIR}/%(dst)s' %
{'src': file,
'dst': libexecdir}
for (file, dest) in install_files_bc])
install_cmd = ' && '.join(['%(old)s && mkdir -p ${DESTDIR}/%(dst)s && cp -r %(srcdir)s/generic/include/clc ${DESTDIR}/%(dst)s' %
{'old': install_cmd,
'dst': includedir,
'srcdir': srcdir}])
install_cmd = ' && '.join(['%(old)s && mkdir -p ${DESTDIR}/%(dst)s && cp -r libclc.pc ${DESTDIR}/%(dst)s' %
{'old': install_cmd,
'dst': pkgconfigdir}])
b.rule('install', command = install_cmd, description = 'INSTALL')
b.build('install', 'install', install_deps)
b.rule("configure", command = ' '.join(sys.argv), description = 'CONFIGURE',
generator = True)
b.build(b.output_filename(), 'configure', list(manifest_deps))
b.finish()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import operator
import os
import re
import struct
import sys
import iso639
import misc
import opensubtitles
import tvsubtitles
class Movie(object):
MOVIE = "movie"
EPISODE = "episode"
TVSHOW = "tv series"
def __init__(self, name, kind=MOVIE, imdbid=0, season=0, episode=0):
self.name = str(name)
try:
self.imdbid = int(imdbid)
except ValueError:
self.imdbid = 0
self.kind = str(kind)
try:
self.season = int(season)
except ValueError:
self.season = 0
try:
self.episode = int(episode)
except ValueError:
self.episode = 0
def __str__(self):
if self.kind == self.EPISODE:
tvshow = "\nSeason {0.season} Episode {0.episode}".format(self)
else:
tvshow = ""
return "Name: {0.name}\nKind: {0.kind}\nIMDb Id: {0.imdbid}{1}".format(
self, tvshow)
def update_info(self, movie):
self.name = movie.name
self.imdbid = movie.imdbid
self.kind = movie.kind
self.season = movie.season
self.episode = movie.episode
class MovieFile(Movie):
def __init__(self, path):
super(MovieFile, self).__init__("", "")
# File info
self.path = str(path)
self.hash = self.__hash(path)
self.size = os.path.getsize(path)
self.extension = path.split('.')[-1]
def __str__(self):
return "Movie {0.path} ({0.hash} size {0.size}):\n{1}".format(
self, super(MovieFile, self).__str__())
@staticmethod
def __hash(path):
"""
Calculates the hash value of a movie.
Source:
http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes
"""
longlongformat = 'q' # long long
bytesize = struct.calcsize(longlongformat)
f = open(path, "rb")
filesize = os.path.getsize(path)
hash = filesize
if filesize < 65536 * 2:
return "SizeError"
for x in range(65536 // bytesize):
buffer = f.read(bytesize)
(l_value,) = struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF # to remain as 64bit number
f.seek(max(0, filesize - 65536), 0)
for x in range(65536 // bytesize):
buffer = f.read(bytesize)
(l_value,) = struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF
f.close()
returnedhash = "%016x" % hash
return returnedhash
def filename(self):
return os.path.basename(self.path)
def subname(self):
return '.'.join(self.path.split('.')[:-1]) + '.srt'
def has_subtitle(self):
try:
os.stat(self.subname())
except OSError:
return False
else:
return True
def osdb_criteria(self):
return {
'hash': self.hash,
'size': self.size,
'name': self.filename(),
}
def guess(self):
"""
Let's try to guess what movie it can be.
@return: Movie with the info we guessed
"""
# XXX: Potentially, we could remove some garbage here
name = self.filename()
epsea = self.__guess_episode_season()
if not epsea:
return Movie(name=name)
else:
return Movie(name=name,
kind="episode",
season=epsea[0],
episode=epsea[1])
def __guess_episode_season(self):
base = os.path.basename(self.path)
match = re.search(
"[sS]?(?P<season>\d{1,2})[-xXeE](?P<episode>\d{1,2})",
base)
if match:
return (int(match.groupdict()['season']),
int(match.groupdict()['episode']))
else:
return None
class Asker(object):
"""
This class gives opportunity to user to select the correct movie.
Well, this is an abstract class because it doesn't implement a function
to allow the user to do it. There should be a TextAsker for exemple
to allow the user to type in the name, etc, or maybe if we have a graphic
interface, we can implement some other kind of functions.
Function to be implemented: select
"""
def __init__(self, ask_threshold):
"""
Create asker
@param ask_threshold: Below this score, we ask for suggestions
"""
self.ask_threshold = ask_threshold
def pick(self, moviefile, choices):
"""
Pick a movie amongst choices
Here goes the algorithm:
- If one is higher than ask_threshold, pick the highest and notify
- Else, call select() for all choices
@param moviefile: MovieFile we are trying to identify
@param choices: List of tuples: [(Movie, Score), ...]
@return: Selected movie
"""
try:
max_choice = choices[-1]
except IndexError:
max_choice = None
if max_choice and max_choice[1] > self.ask_threshold:
return max_choice[0]
else:
return self.select(moviefile, choices)
def select(self, moviefile, choices):
"""
Allow user to select a movie
This is an abstract base class, and this function should probably
be implemented by inheriting classes.
@param moviefile: MovieFile we are trying to identify
@param choices: List of (movies, score)
@return: Selected movie
"""
raise NotImplementedError
class TextAsker(Asker):
"""
This gives the user the opportunity to fill in the movie name and other
information manually from a terminal.
Either select from a list of movies or type in the information.
"""
def select(self, moviefile, choices):
"""
Output choices, and read the input
"""
print 'Identifying movie:', moviefile.path
print self.__show_choices(choices)
result = None
while result is None:
last_choice = len(choices) or 1
result = raw_input("Choice [{0}]: ".format(last_choice - 1))
if result == "":
result = "0"
try:
result = int(result)
except TypeError:
print result, "is not a valid choice (not a number)"
result = None
if not 0 <= result <= len(choices):
print result, "is not a valid choice (invalid number)"
result = None
# At this point, either result is a valid choice,
# or we have the max value which is: manual type-in
if result < len(choices):
return choices[result][0]
else:
return self.__get_from_user()
def __show_choices(self, choices):
num = -1
text = "Select one amongst those choices:\n"
for num, choice in enumerate(choices):
text += "[%d] Score: (%2d%%)\n%s\n" % (
num, int(choice[1] * 100), choice[0])
text += "[%d] I will give my inputs" % (num + 1)
return text
def __get_from_user(self):
name = raw_input("Movie/Show name: ")
show_info = None
while show_info is None:
show_info = raw_input("SXXEXX or empty if movie: ")
if show_info == "":
season = 0
episode = 0
kind = Movie.MOVIE
break
match = re.match("S(\d{1,2})E(\d{1,2})", show_info)
if not match:
show_info = None
continue
else:
season = match.group(1)
episode = match.group(2)
kind = Movie.EPISODE
return Movie(name=name, episode=episode, season=season, kind=kind)
class AutomaticAsker(Asker):
"""
Picks the best matching movie, and that's it.
It does not interact with anything or anybody.
It just returns the best match.
"""
def __init__(self, minimum=0):
super(AutomaticAsker, self).__init__(minimum)
def select(self, choices):
"""
Choose no choice if we have to select
If we have to select a movie, it means we are below our minimum score,
it means that we should pick no choice at all
"""
return None
class MovieScore(object):
def __score_kind(self, given, guessed):
"""
Calculate score based on movie kind, season and episode
This modify the movie given if we fill we can do better
@param given: Movie info given
@param guessed: Movie info we tried to guess
@return: score calculated. Range is 0 to 1
@rtype: float
"""
score = 0
# Handle score for kind, season and episode
if given.kind == Movie.TVSHOW and guessed.kind == Movie.EPISODE:
given.kind = Movie.EPISODE
given.season = guessed.season
given.episode = guessed.episode
score = 0.75
elif (given.kind == Movie.EPISODE and
guessed.kind == Movie.EPISODE):
score += 0.5
if given.season == guessed.season:
score += 0.25
if given.episode == guessed.episode:
score += 0.25
# We need to get show name from episode name, if possible
m = re.search("\"(.*)\"", given.name)
if m:
given.name = m.group(1)
elif given.kind == Movie.MOVIE and guessed.kind == Movie.MOVIE:
score = 1
assert 0 <= score <= 1
return score
def __score_name(self, given, guessed):
"""
Calculate score based on name
@param given: Given movie name
@param guessed: Guessed movie name
@return: score calculated. Range from 0 to 1.
@rtype: float
"""
score = misc.strings_contained(guessed, given)
score += misc.dice_coefficient(guessed, given)
assert 0 <= score <= 2
return score / 2
def score(self, movie_given, movie_guessed):
"""
Give a score for matching two movies
Kind score is 40%
Name score is 60%
@warning: This method can modify the movie_given
@param movie_guessed: This is what we guessed from the filename
@param movie_given: This is the movie given by osdb
@return: (movie_given, score), score is what we calculated
score range is from 0 to 1
@rtype: float
"""
kind_score = self.__score_kind(movie_given, movie_guessed)
name_score = self.__score_name(movie_given.name, movie_guessed.name)
score = kind_score * 0.4 + name_score * 0.6
return (movie_given, score)
def identify_one_movie(moviefile, movies, asker):
"""
Identify one movie
@param moviefile: Movie to identify
@param movies: Movies we found from osdb
@param asker: Asker instance to get opinion from user
"""
movie_guess = moviefile.guess()
# Give a note to each movies, against what we have
scores = [MovieScore().score(movie, movie_guess) for movie in movies]
# sort scores
scores.sort(key=operator.itemgetter(1))
# Finally, let's decide amongst all movies
movie = asker.pick(moviefile, scores)
if movie:
moviefile.update_info(movie)
def identify_movies(moviefiles, osdb, asker = None):
"""
Identify movie information from moviesfiles
Most of the logic to identify movies is obviously here.
It would be here to add an exhaustive description of how we
try to identify the movie
@param moviefiles: Movies we want to identify
@param osdb: OSDb Handler
@param asker: Asker instance to get input from user
"""
if not asker:
asker = AutomaticAsker()
movies_info = osdb.check_hashes(moviefiles.keys())
for moviehash, moviefile in moviefiles.items():
try:
identify_one_movie(
moviefile,
[Movie(info['MovieName'],
kind=info['MovieKind'],
imdbid=info['MovieImdbID'],
season=info['SeriesSeason'],
episode=info['SeriesEpisode'])
for info in movies_info[moviehash]],
asker)
except KeyError:
pass
def select_language(code):
"""
Get 2 letters and 3 letters code for language given as code
If one of the version doesn't exist, use the other version also
@param code: Language code we want to evaluate
@return: Tuple (two_letters_code, three_letters_code)
"""
language = iso639.find_language(code)
if not language:
# Use default value
return ('en', 'eng')
else:
if not language['2L']:
language['2L'] = language['3L']
if not language['3L']:
language['3L'] = language['2L']
return (language['2L'], language['3L'])
def main():
parser = argparse.ArgumentParser(
description="Get information about a movie")
parser.add_argument('movie', help='Movie to investigate', nargs='+')
parser.add_argument('-l', '--language', default='eng')
parser.add_argument('-f', '--force', action='store_true')
args = parser.parse_args()
osdb = opensubtitles.OpenSubtitles()
asker = TextAsker(0.7)
moviefiles = [MovieFile(movie) for movie in args.movie]
if not args.force:
for moviefile in list(moviefiles):
if moviefile.has_subtitle():
print moviefile.path, \
'already has a subtitle (use -f to force)'
moviefiles.remove(moviefile)
identify_movies({mfile.hash: mfile for mfile in moviefiles},
osdb, asker)
print
print 'Identification summary'
print
for moviefile in moviefiles:
if not moviefile.name:
print 'Unable to identify:'
print moviefile
lang_2l, lang_3l = select_language(args.language)
subs = osdb.download_subtitles(
[moviefile.osdb_criteria() for moviefile in moviefiles],
language=lang_3l)
for moviefile in moviefiles:
sub = None
if moviefile.hash in subs:
sub = subs[moviefile.hash]
elif moviefile.kind == Movie.EPISODE:
sub = tvsubtitles.download_subtitle(moviefile.name,
moviefile.season,
moviefile.episode,
lang_2l)
if not sub:
print "No subtitle found for this movie"
continue
with open(moviefile.subname(), 'w') as f:
f.write(sub)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from . import conf
from .base import (
_key, Base, MixinSerializable, BaseHour, BaseDay, BaseWeek, BaseMonth,
BaseYear
)
from .timelines import _totimerange
__all__ = ['TIME_INDEX_KEY_NAMESAPCE', 'TimeIndexedKey', 'HourIndexedKey',
'DayIndexedKey', 'WeekIndexedKey', 'MonthIndexedKey',
'YearIndexedKey']
TIME_INDEX_KEY_NAMESAPCE = 'tik'
class TimeIndexedKey(MixinSerializable, Base):
"""
Key/value storage where keys indexed by time. This allows you continuously
process newly created/updated keys.
Examples ::
index = TimeIndexedKey('users')
# Save users profiles:
index.set('uid1', user_data1, update_index=True)
index.set('uid2', user_data2, update_index=True)
# Get user ids added in the last 10 sec. (sorted by time added)
result = index.keys(time.time() - 10, limit=2)
for uid, timestamp in result:
print 'User {0} added at {1}'.format(uid, timestamp)
# Get assotiated data for these ids
result = index.values('uid1', 'uid2')
for uid, data in result:
print 'User {0} -> {1}'.format(uid, data)
"""
namespace = TIME_INDEX_KEY_NAMESAPCE
clonable_attrs = ['serializer']
key_format = '{self.name}'
index_key_format = '{self.name}_index'
def __init__(self, name, client='default', serializer=None):
super(TimeIndexedKey, self).__init__(name, client)
self.serializer = conf.get_serializer(serializer)
@property
def index_key(self):
base_key = self.index_key_format.format(self=self)
return _key(base_key, self.namespace)
def value_key(self, key):
return '{0}:{1}'.format(self.key, key)
def __len__(self):
return self.count()
def __contains__(self, key):
value_key = self.value_key(key)
return self.client.exists(value_key)
def __setitem__(self, key, value):
self.set(key, value, update_index=True)
def __getitem__(self, key):
value, timestamp = self.get(key)
if value is None:
raise KeyError(key)
return value, timestamp
def __delitem__(self, key):
existed = self.remove(key)
if not existed:
raise KeyError(key)
def set(self, key, value, timestamp=None, update_index=None):
""" By default we trying to create index if it doesn't exist. """
# If `update_index` is True force to update index
if update_index:
return self._set(key, value, timestamp)
# If `update_index` is None create index if it not exists.
index_time = self.client.zscore(self.index_key, key)
if index_time is None and update_index is None:
return self._set(key, value, timestamp)
# Else just update assotiated value
value_key, value = self.value_key(key), self.dumps(value)
self.client.set(value_key, value)
return index_time
def _set(self, key, value, timestamp=None):
timestamp = timestamp or time.time()
value_key, value = self.value_key(key), self.dumps(value)
with self.client.pipeline() as pipe:
pipe.multi()
pipe.zrem(self.index_key, key)
pipe.zadd(self.index_key, timestamp, key)
pipe.set(value_key, value)
pipe.execute()
return timestamp
def get(self, key):
value_key = self.value_key(key)
with self.client.pipeline() as pipe:
pipe.zscore(self.index_key, key)
pipe.get(value_key)
timestamp, value = pipe.execute()
if value is not None:
return self.loads(value), timestamp
return value, timestamp
def remove(self, key):
value_key = self.value_key(key)
with self.client.pipeline() as pipe:
pipe.multi()
pipe.delete(value_key)
pipe.zrem(self.index_key, key)
existed, _ = pipe.execute()
return existed
def values(self, *keys):
assert keys, 'Al least one key should be given.'
value_keys = [self.value_key(k) for k in keys]
values = self.client.mget(*value_keys)
result = []
for key, value in zip(keys, values):
if value is not None:
value = self.loads(value)
result.append((key, value))
return result
def keys(self, start_time=None, end_time=None, limit=None,
with_timestamp=False):
start_time, end_time = _totimerange(start_time, end_time)
offset = None if limit is None else 0
items = self.client.zrangebyscore(self.index_key, start_time, end_time,
offset, limit, with_timestamp)
return items
def timerange(self, start_time=None, end_time=None, limit=None):
keys_with_timestamp = self.keys(start_time, end_time, limit, True)
value_keys = [self.value_key(k) for k, _ in keys_with_timestamp]
values = self.client.mget(*value_keys)
result = []
for (key, timestamp), value in zip(keys_with_timestamp, values):
if value is not None:
value = self.loads(value)
result.append((key, value, timestamp))
return result
def count_timerange(self, start_time=None, end_time=None):
start_time, end_time = _totimerange(start_time, end_time)
return self.client.zcount(self.index_key, start_time, end_time)
def delete_timerange(self, start_time=None, end_time=None):
start_time, end_time = _totimerange(start_time, end_time)
keys = self.keys(start_time, end_time)
value_keys = [self.value_key(k) for k in keys]
if value_keys:
with self.client.pipeline() as pipe:
pipe.delete(*value_keys)
pipe.zremrangebyscore(self.index_key, start_time, end_time)
pipe.execute()
else:
self.client.zremrangebyscore(self.index_key, start_time, end_time)
def has_key(self, key):
return key in self
def count(self):
return self.client.zcard(self.index_key)
def delete(self):
value_key_pattern = self.value_key('*')
keys = self.client.keys(value_key_pattern)
self.client.delete(self.index_key, *keys)
class HourIndexedKey(BaseHour, TimeIndexedKey):
pass
class DayIndexedKey(BaseDay, TimeIndexedKey):
pass
class WeekIndexedKey(BaseWeek, TimeIndexedKey):
pass
class MonthIndexedKey(BaseMonth, TimeIndexedKey):
pass
class YearIndexedKey(BaseYear, TimeIndexedKey):
pass
|
|
# This file is kept only for backwards compatibility. Edit the one in ../mapgen
import re
import warnings
class NotImplementedWarning(UserWarning):
pass
pattern = re.compile(r'^([^(]+)\((.+?)\)?$', re.DOTALL)
def get_command(instruction):
match = pattern.match(instruction)
if match is None:
command = Command()
command.set_command_name(instruction)
return command
Command_ = globals().get(match.group(1), Command)
command = Command_(*match.group(2).split(','))
command.set_command_name(match.group(1))
return command
class Command:
def __init__(self, *args):
self.command = ''
def set_command_name(self, command):
self.command = command
def __call__(self, chartsymbols, layer, geom_type):
warnings.warn('Command not implemented: {}'.format(self.command),
NotImplementedWarning)
return ''
def __iter__(self):
return iter([self])
def __add__(self, other):
if isinstance(other, Command):
return [self, other]
if isinstance(other, list):
return [self] + other
return NotImplemented
def __radd__(self, other):
if isinstance(other, list):
return other + [self]
return NotImplemented
@staticmethod
def units(value):
return float(value)
class LS(Command):
"""ShowLine: 9.3"""
patterns = {
'SOLD': '',
'DASH': 'PATTERN 12 6 END',
'DOTT': 'PATTERN 2 4 END',
}
def __init__(self, pattern, width, color):
self.pattern = pattern
self.width = width
self.color = color
def __call__(self, chartsymbols, layer, geom_type):
style = '''
STYLE
COLOR {color}
WIDTH {width}
LINECAP ROUND
LINEJOIN ROUND
{pattern}
END
'''.format(
color=chartsymbols.color_table[self.color].rgb,
width=self.units(self.width),
pattern=self.patterns.get(self.pattern, ''),
)
if geom_type == 'POLYGON':
return {'LINE': style}
else:
return style
class TE(Command):
"""ShowText 9.1"""
hjustHash = {
'1': 'C',
'2': 'R',
'3': 'L'
}
vjustHash = {
'1': 'L',
'2': 'C',
'3': 'T'
}
spaceHash = {
'1': '', # 1 Is not usde
'2': '', # Standard spaces
'3': 'MAXLENGTH 8\n WRAP " "' # Wrap on spaces
}
def __init__(self, format, attributes, hjust, vjust, space, chars,
xoffs, yoffs, colour, display):
self.format = format
self.attributes = attributes.strip("'")
self.hjust = hjust
self.vjust = vjust
self.space = space
self.chars = chars
self.xoffs = xoffs
self.yoffs = yoffs
self.colour = colour
self.display = display
super().__init__()
def __call__(self, chartsymbols, layer, geom_type):
text = re.sub(r'(%[^ ]*[a-z])[^a-z]', self.get_label_text, self.format)
if ' + ' in text:
text = '({})'.format(text)
try:
label_field = re.search(r'(\[[^\]]+\])', text).group(1)
label_expr = 'EXPRESSION ("{}" > "0")'.format(label_field)
except AttributeError:
# AAA, ZZZ not found in the original string
label_expr = '' # apply your error handling
return """
LABEL # {}
{}
TYPE TRUETYPE
FONT SC
PARTIALS TRUE
MINDISTANCE 0
POSITION {}
{}
SIZE {}
OFFSET {} {}
COLOR {}
TEXT {}
END
""".format(
self.command,
label_expr,
self.vjustHash[self.vjust] + self.hjustHash[self.hjust],
self.spaceHash[self.space],
self.chars[-3:-1],
self.xoffs, self.yoffs,
chartsymbols.color_table[self.colour].rgb,
text
)
def get_label_text(self, matches):
# TODO: Support multi attributes
s = self.attributes
if matches.group(1) == '%s':
return matches.group(0).replace('%s', '[{}]'.format(s))
else:
return matches.group(0).replace(
matches.group(1),
"' + tostring([" + s + "], '" + matches.group(1) + "') + '")
class TX(TE):
"""ShowText 9.1"""
def __init__(self, attributes, hjust, vjust, space, chars,
xoffs, yoffs, colour, display):
format = "'%s'"
super().__init__(format, attributes, hjust, vjust, space, chars,
xoffs, yoffs, colour, display)
class SY(Command):
"""ShowPoint 9.2"""
def __init__(self, symbol, rot=0):
self.symbol = symbol
self.rot_field = None
try:
self.rot = int(rot)
except ValueError:
self.rot_field = rot
self.rot = '[{}_CAL]'.format(rot)
def __call__(self, chartsymbols, layer, geom_type):
# OFFSET
x = 0
y = 0
# Hardcoded value to skip typo in official XML
# TODO: Validate that the symbol exists
if self.symbol == 'BCNCON81':
return ''
if self.symbol == 'FOGSIG01':
x = -15
if self.symbol in chartsymbols.symbols_def:
symbol = chartsymbols.symbols_def[self.symbol]
x += -(symbol['size'][0] // 2)
x += symbol['pivot'][0]
y += symbol['size'][1] // 2
y -= symbol['pivot'][1]
geomtransform = ''
if geom_type == 'POLYGON':
geomtransform = 'GEOMTRANSFORM centroid'
return """
STYLE
{geomtransform}
SYMBOL "{symbol}"
OFFSET {x} {y}
ANGLE {angle}
GAP 2000
END
""".format(symbol=self.symbol, x=x, y=y, angle=self.rot,
geomtransform=geomtransform)
class LC(Command):
"""ShowLine 9.3"""
def __init__(self, style):
self.symbol = style
def __call__(self, chartsymbols, layer, geom_type):
style = chartsymbols.line_symbols[self.symbol].as_style(
chartsymbols.color_table)
if geom_type == 'POLYGON':
return {'LINE': style}
else:
return style
class AC(Command):
"""ShowArea 9.4"""
def __init__(self, color, transparency='0'):
self.color = color
# MapServer uses Opacity, OpenCPN uses trnasparency
self.opacity = (4 - int(transparency)) * 25
def __call__(self, chartsymbols, layer, geom_type):
return """
STYLE
COLOR {}
OPACITY {}
END
""".format(chartsymbols.color_table[self.color].rgb, self.opacity)
class AP(Command):
"""ShowArea 9.4"""
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, chartsymbols, layer, geom_type):
return chartsymbols.area_symbols[self.pattern].as_style(
chartsymbols.color_table, layer)
class CS(Command):
""" CallSymproc 9.5"""
def __init__(self, proc):
self.proc = proc
def __call__(self, chartsymbols, layer, geom_type):
warnings.warn(
'Symproc left in lookup: {}'.format((self.proc, layer)),
NotImplementedWarning)
return ''
class _MS(Command):
"""Command that emits hardcoded mapserver code.
To be used with CS procedures that are too complex to be represented by S52
instructions.
"""
def __init__(self, *style):
# get_command splits on comma. we need to readd the commas if there
# were any
self.style = ','.join(style)
def __call__(self, chartsymbols, layer, geom_type):
return self.style.format(color=chartsymbols.color_table)
|
|
"""This module adds a reST directive to sphinx that generates cyclus agent
documentation based on its annotations and schema. The user simply specifies
the normal cyclus agent spec for the agent that they wish to document.
For example,
.. cyclus-agent:: tests:TestFacility:TestFacility
"""
from __future__ import print_function, unicode_literals
import sys
import os.path
import re
import time
import textwrap
import warnings
import subprocess
import xml.dom.minidom
from collections import OrderedDict, Mapping, Sequence
try:
import simplejson as json
JSONDecodeError = json.JSONDecodeError
except ImportError:
import json
JSONDecodeError = ValueError
from docutils import io, nodes, statemachine, utils
try:
from docutils.utils.error_reporting import ErrorString # the new way
except ImportError:
from docutils.error_reporting import ErrorString # the old way
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
from docutils.statemachine import ViewList
from sphinx.util.nodes import nested_parse_with_titles
if sys.version_info[0] == 2:
STRING_TYPES = (str, unicode, basestring)
IS_PY3 = False
def indent(text, prefix):
lines = text.splitlines(True)
s = prefix + prefix.join(lines)
return s
elif sys.version_info[0] >= 3:
STRING_TYPES = (str,)
IS_PY3 = True
indent = textwrap.indent
def contains_resbuf(type_str):
bufs = ('cyclus::toolkit::ResBuf',
'cyclus::toolkit::ResMap',
'cyclus::toolkit::ResourceBuff')
for buf in bufs:
if buf in type_str:
return True
return False
def ensure_tuple_or_str(x):
if isinstance(x, STRING_TYPES):
return x
else:
return tuple(map(ensure_tuple_or_str, x))
def type_to_str(t):
t = ensure_tuple_or_str(t)
if isinstance(t, STRING_TYPES):
return t
else:
s = t[0] + '<'
s += type_to_str(t[1])
for thing in t[2:]:
s += ', ' + type_to_str(thing)
s += '>'
return s
def nicestr(x):
if IS_PY3:
newx = str(x)
elif isinstance(x, STRING_TYPES):
newx = str(x)
elif isinstance(x, Sequence):
newx = '[' + ', '.join(map(nicestr, x)) + ']'
elif isinstance(x, Mapping):
newx = '{'
newxs = [nicestr(k) + ': ' + nicestr(v) for k, v in sorted(x.items())]
newx += ', '.join(newxs)
newx += '}'
else:
newx = str(x)
return newx
def prepare_type(cpptype, othertype):
"""Updates othertype to conform to the length of cpptype using None's.
"""
if not isinstance(cpptype, STRING_TYPES):
if isinstance(othertype, STRING_TYPES):
othertype = [othertype]
if othertype is None:
othertype = [None] * len(cpptype)
elif len(othertype) < len(cpptype):
othertype.extend([None] * (len(cpptype) - len(othertype)))
return othertype
else:
return othertype
PRIMITIVES = {'bool', 'int', 'float', 'double', 'std::string', 'cyclus::Blob',
'boost::uuids::uuid', }
alltypes = frozenset(['anyType', 'anySimpleType', 'string', 'boolean', 'decimal',
'float', 'double', 'duration', 'dateTime', 'time', 'date',
'gYearMonth', 'gYear', 'gMonthDay', 'gDay', 'gMonth',
'hexBinary', 'base64Binary', 'anyURI', 'QName', 'NOTATION',
'normalizedString', 'token', 'language', 'NMTOKEN',
'NMTOKENS', 'Name', 'NCName', 'ID', 'IDREF', 'IDREFS',
'ENTITY', 'ENTITIES', 'integer', 'nonPositiveInteger',
'negativeInteger', 'long', 'int', 'short', 'byte',
'nonNegativeInteger', 'unsignedLong', 'unsignedInt',
'unsignedShort', 'unsignedByte', 'positiveInteger'])
default_types = {
# Primitive types
'bool': 'boolean',
'std::string': 'string',
'int': 'int',
'float': 'float',
'double': 'double',
'cyclus::Blob': 'string',
'boost::uuids::uuid': 'token',
# UI types
'nuclide': 'string',
'commodity': 'string',
'incommodity': 'string',
'outcommodity': 'string',
'range': None,
'combobox': None,
'facility': None,
'prototype': 'string',
'recipe': 'string',
'none': None,
None: None,
'': None,
}
special_uitypes = {
'nuclide': 'string',
'recipe': 'string',
'prototype': 'string',
'commodity': 'string',
'incommodity': 'string',
'outcommodity': 'string',
}
def _type(cpp, given=None):
"""Finds a schema type for a C++ type with a possible type given."""
if given is not None:
if given in alltypes:
return given
elif given in default_types:
return default_types[given] or default_types[cpp]
msg = ("Note that {0!r} is not a valid XML schema data type, see "
"http://www.w3.org/TR/xmlschema-2/ for more information.")
raise TypeError(msg.format(given))
return default_types[cpp]
def build_xml_sample(cpptype, schematype=None, uitype=None, names=None, units=None):
schematype = prepare_type(cpptype, schematype)
uitype = prepare_type(cpptype, uitype)
names = prepare_type(cpptype, names)
units = prepare_type(cpptype, units)
impl = ''
t = cpptype if isinstance(cpptype, STRING_TYPES) else cpptype[0]
if t in PRIMITIVES:
name = 'val'
if names is not None:
name = names
d_type = _type(t, schematype or uitype)
d_type = uitype if uitype in special_uitypes else d_type
if isinstance(units, STRING_TYPES):
impl += '<{0}>[{1} ( {2} )]</{0}>'.format(name, d_type, units)
else:
impl += '<{0}>[{1}]</{0}>'.format(name, d_type)
elif t in ['std::list', 'std::set', 'std::vector']:
name = 'list' if names[0] is None else names[0]
impl += '<{0}>'.format(name)
impl += build_xml_sample(cpptype[1], schematype[1], uitype[1], names[1], units[1])
impl += build_xml_sample(cpptype[1], schematype[1], uitype[1], names[1], units[1])
impl += '...'
impl += '</{0}>'.format(name)
elif t == 'std::map':
name = 'map'
if isinstance(names[0], STRING_TYPES):
names[0] = [names[0], None]
elif names[0] is None:
names[0] = [name, None]
if names[0][0] is not None:
name = names[0][0]
itemname = 'item' if names[0][1] is None else names[0][1]
keynames = 'key' if isinstance(cpptype[1], STRING_TYPES) else ['key']
if names[1] is not None:
keynames = names[1]
valnames = 'val' if isinstance(cpptype[2], STRING_TYPES) else ['val']
if names[1] is not None:
valnames = names[2]
impl += '<{0}>'.format(name)
impl += '<{0}>'.format(itemname)
impl += build_xml_sample(cpptype[1], schematype[1], uitype[1], keynames, units[1])
impl += build_xml_sample(cpptype[2], schematype[2], uitype[2], valnames, units[2])
impl += '</{0}>'.format(itemname)
impl += '<{0}>'.format(itemname)
impl += build_xml_sample(cpptype[1], schematype[1], uitype[1], keynames, units[1])
impl += build_xml_sample(cpptype[2], schematype[2], uitype[2], valnames, units[2])
impl += '</{0}>'.format(itemname)
impl += '...'
impl += '</{0}>'.format(name)
elif t == 'std::pair':
name = 'pair'
if names[0] is not None:
name = names[0]
firstname = 'first' if isinstance(cpptype[1], STRING_TYPES) else ['first']
if names[1] is not None:
firstname = names[1]
secondname = 'second' if isinstance(cpptype[2], STRING_TYPES) else ['second']
if names[2] is not None:
secondname = names[2]
impl += '<{0}>'.format(name)
impl += build_xml_sample(cpptype[1], schematype[1], uitype[1], firstname, units[1])
impl += build_xml_sample(cpptype[2], schematype[2], uitype[2], secondname, units[2])
impl += '</{0}>'.format(name)
else:
msg = 'Unsupported type {1}'.format(t)
raise RuntimeError(msg)
s = xml.dom.minidom.parseString(impl)
s = s.toprettyxml(indent=' ')
_, lines = s.split("\n", 1)
return lines
def build_json_sample(cpptype, schematype=None, uitype=None, names=None, units=None, default=None):
schematype = prepare_type(cpptype, schematype)
uitype = prepare_type(cpptype, uitype)
names = prepare_type(cpptype, names)
units = prepare_type(cpptype, units)
impl = ''
t = cpptype if isinstance(cpptype, STRING_TYPES) else cpptype[0]
if t in PRIMITIVES:
name = 'val'
if names is not None:
name = names
d_type = _type(t, schematype or uitype)
d_type = uitype if uitype in special_uitypes else d_type
defstr = json.dumps(default.encode()) if isinstance(default, STRING_TYPES) else default
if default is None or defstr == '"null"':
defstr = '"<required>"'
if isinstance(units, STRING_TYPES):
impl += '{{"{0}": {1}}} # {2}, {3}'.format(name, defstr, d_type, units)
else:
impl += '{{"{0}": {1}}} # {2}'.format(name, defstr, d_type)
elif t in ['std::list', 'std::set', 'std::vector']:
name = 'list' if names[0] is None else names[0]
impl += '{{"{0}":'.format(name)
x = build_json_sample(cpptype[1], schematype[1], uitype[1], names[1], units[1])
pre, post = x.split(':', 1)
post, _ = post.rsplit('}', 1)
impl += indent(pre + ': [\n', " ")
impl += indent(post.rstrip() + ",\n", ' ')
impl += indent(post.rstrip() + ",\n", ' ')
impl += indent('...\n', ' ')
impl += ']}}'
elif t == 'std::map':
name = 'map'
if isinstance(names[0], STRING_TYPES):
names[0] = [names[0], None]
elif names[0] is None:
names[0] = [name, None]
if names[0][0] is not None:
name = names[0][0]
itemname = 'item' if names[0][1] is None else names[0][1]
keynames = 'key' if isinstance(cpptype[1], STRING_TYPES) else ['key']
if names[1] is not None:
keynames = names[1]
valnames = 'val' if isinstance(cpptype[2], STRING_TYPES) else ['val']
if names[1] is not None:
valnames = names[2]
impl += '{{"{0}": {{\n'.format(name)
impl += indent('"{0}": [{{\n'.format(itemname), ' ')
x = build_json_sample(cpptype[1], schematype[1], uitype[1], keynames, units[1])
pre, post = x.split('{', 1)
post, _ = post.rsplit('}', 1)
impl += indent(post.rstrip() + ',\n', ' ')
y = build_json_sample(cpptype[2], schematype[2], uitype[2], valnames, units[2])
pre, post = y.split('{', 1)
post, _, _ = post.rpartition('}')
impl += indent(post + '},\n', ' ')
pre, post = x.split('{', 1)
post, _ = post.rsplit('}', 1)
impl += indent('{' + post.rstrip() + ',\n', ' ')
pre, post = y.split('{', 1)
post, _, _ = post.rpartition('}')
impl += indent(post + '},\n', ' ')
impl += indent('...\n', ' ')
impl += ']}}'
elif t == 'std::pair':
name = 'pair'
if names[0] is not None:
name = names[0]
firstname = 'first' if isinstance(cpptype[1], STRING_TYPES) else ['first']
if names[1] is not None:
firstname = names[1]
secondname = 'second' if isinstance(cpptype[2], STRING_TYPES) else ['second']
if names[2] is not None:
secondname = names[2]
x = build_json_sample(cpptype[1], schematype[1], uitype[1], firstname, units[1])
impl += '{{"{0}": {{\n'.format(name)
pre, post = x.split('{', 1)
post, _ = post.rsplit('}', 1)
impl += indent(post.rstrip() + ',\n', ' ')
y = build_json_sample(cpptype[2], schematype[2], uitype[2], secondname, units[2])
pre, post = y.split('{', 1)
post, _, _ = post.rpartition('}')
impl += indent(post.rstrip() + '\n', ' ')
impl += " " + '}\n}'
else:
msg = 'Unsupported type {1}'.format(t)
raise RuntimeError(msg)
return impl
class CyclusAgent(Directive):
"""The cyclus-agent directive, which is based on constructing a list of
of string lines of restructured text and then parsing it into its own node.
"""
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'no-sep': directives.flag}
has_content = False
def load_schema(self):
cmd = 'cyclus --agent-schema {0}'.format(self.agentspec)
stdout = subprocess.check_output(cmd, shell=True)
self.schema = stdout.decode()
def load_annotations(self):
cmd = 'cyclus --agent-annotations {0}'.format(self.agentspec)
stdout = subprocess.check_output(cmd, shell=True)
try:
j = json.loads(stdout.decode())
except JSONDecodeError:
raise ValueError("Error reading agent annotations for "\
"{0}.".format(self.agentspec))
self.annotations = j
def append_name(self):
path, lib, agent = self.agentspec.split(':')
name = path + ':' + lib + ':**' + agent + '**'
self.lines += [name, '~' * len(name), '']
skipdoc = {'doc', 'tooltip', 'vars', 'entity', 'parents', 'all_parents'}
def append_doc(self):
if 'tooltip' in self.annotations:
self.lines += ['*' + self.annotations['tooltip'] + '*', '']
if 'doc' in self.annotations:
self.lines += self.annotations['doc'].splitlines()
self.lines.append('')
# must use list constructor to maintain order
keynames = OrderedDict([
('name', 'Full Archetype Name'),
('entity', 'Simulation Entity Type'),
('parents', 'Interfaces'),
('all_parents', 'All Interfaces'),
])
def append_otherinfo(self):
header = 'Other Info'
self.lines += [header, ';' * len(header), '']
for key, name in self.keynames.items():
val = self.annotations.get(key, None)
if val is None:
continue
self.lines.append('* **{0}**: {1}'.format(name, nicestr(val)))
for key, val in sorted(self.annotations.items()):
if key in self.skipdoc or key in self.keynames.keys():
continue
self.lines.append('* **{0}**: {1}'.format(key, val))
self.lines.append('')
skipstatevar = {'type', 'index', 'shape', 'doc', 'tooltip', 'default',
'units', 'alias', 'uilabel', 'uitype', None}
def _sort_statevars(self, item):
key, val = item
vars = self.annotations.get('vars', {})
while not isinstance(val, Mapping):
# resolves aliasing
key = val
val = vars[key]
return val['index']
def append_statevars(self):
vars = OrderedDict(sorted(self.annotations.get('vars', {}).items(),
key=self._sort_statevars))
if len(vars) == 0:
return
lines = self.lines
header = 'State Variables'
lines += [header, ';' * len(header), '']
for name, info in vars.items():
if isinstance(info, STRING_TYPES):
# must be an alias entry - skip it
continue
elif contains_resbuf(type_to_str(info['type'])):
# resbufs are not directly user accessible
continue
elif 'internal' in info:
continue
alias = info.get('alias', name)
if isinstance(alias, STRING_TYPES):
name = alias
elif isinstance(alias[0], STRING_TYPES):
name = alias[0]
else:
name = alias[0][0]
# add name
ts = type_to_str(info['type'])
n = ":{0}: ``{1}``" .format(name, ts)
if 'default' in info:
n += ', optional ('
if info['type'] == 'std::string':
n += 'default="{0}"'.format(info['default'])
else:
n += 'default={0}'.format(info['default'])
n += ')'
if 'shape' in info:
n += ', shape={0}'.format(info['shape'])
lines += [n, '']
# add docs
ind = " " * 4
if 'doc' in info:
doc = ind + info['doc'].replace('\n', '\n'+ind)
lines += doc.splitlines()
lines.append('')
t = info['type']
uitype = info.get('uitype', None)
units = info.get('units', None)
schematype = info.get('schematype', None)
labels = info.get('alias', None)
if labels is None:
labels = name if isinstance(t, STRING_TYPES) else [name]
# add everything else
for key, val in info.items():
if key in self.skipstatevar:
continue
self.lines.append(ind + ':{0}: {1}'.format(key, val))
self.lines.append('')
self.lines += [ind + '**XML:**', '', ind + '.. code-block:: xml', '']
schemalines = build_xml_sample(t, schematype, uitype, labels, units).split('\n')
previndent = ''
for l in schemalines:
if len(l.strip()) > 0:
if l.strip() == '...':
l = previndent + l.strip()
self.lines.append(ind + ' ' + l)
previndent = ' ' * (len(l) - len(l.lstrip()))
self.lines.append('')
self.lines += [ind + '**JSON:**', '', ind + '.. code-block:: yaml', '']
schemalines = build_json_sample(t, schematype, uitype, labels, units, default=info.get('default', 'null')).split('\n')
previndent = ''
for l in schemalines:
if len(l.strip()) > 0:
if l.strip() == '...':
l = previndent + l.strip()
self.lines.append(ind + ' ' + l)
previndent = ' ' * (len(l) - len(l.lstrip()))
self.lines.append('')
def append_schema(self):
header = 'XML Input Schema'
self.lines += [header, ';' * len(header), '']
lines = self.lines
lines += ['', '.. code-block:: xml', '']
ind = " " * 4
s = ind + self.schema.replace('\n', '\n' + ind) + '\n'
lines += s.splitlines()
def append_sep(self):
if 'no-sep' in self.options:
return
self.lines += ['', '--------', '']
def run(self):
# load agent
self.agentspec = self.arguments[0]
self.schema = ""
self.annotations= {}
try:
self.load_schema()
except OSError:
warnings.warn("WARNING: Failed to load schema, proceeding without schema",
RuntimeWraning)
try:
self.load_annotations()
except OSError:
warnings.warn("WARNING: Failed to load annotations, proceeding without "
"annotations", RuntimeWraning)
# set up list of rst stirngs
self.lines = []
self.append_name()
self.append_doc()
self.append_statevars()
self.append_otherinfo()
self.append_schema()
self.append_sep()
# hook to docutils
src, lineno = self.state_machine.get_source_and_line(self.lineno)
vl = ViewList(self.lines, source=src)
node = nodes.paragraph()
nested_parse_with_titles(self.state, vl, node)
return node.children
def setup(app):
app.add_directive('cyclus-agent', CyclusAgent)
if __name__ == "__main__":
t = ["std::vector", "double"]
#t = 'double'
s = build_json_sample(t, default=[42.0])
print(s)
|
|
from __future__ import absolute_import
import hashlib
import numpy as nm
import warnings
import scipy.sparse as sps
import six
from six.moves import range
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import LinearSolver
def solve(mtx, rhs, solver_class=None, solver_conf=None):
"""
Solve the linear system with the matrix `mtx` and the right-hand side
`rhs`.
Convenience wrapper around the linear solver classes below.
"""
solver_class = get_default(solver_class, ScipyDirect)
solver_conf = get_default(solver_conf, {})
solver = solver_class(solver_conf, mtx=mtx)
solution = solver(rhs)
return solution
def _get_cs_matrix_hash(mtx, chunk_size=100000):
def _gen_array_chunks(arr):
ii = 0
while len(arr[ii:]):
yield arr[ii:ii+chunk_size].tobytes()
ii += chunk_size
sha1 = hashlib.sha1()
for chunk in _gen_array_chunks(mtx.indptr):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.indices):
sha1.update(chunk)
for chunk in _gen_array_chunks(mtx.data):
sha1.update(chunk)
digest = sha1.hexdigest()
return digest
def _is_new_matrix(mtx, mtx_digest, force_reuse=False):
if not isinstance(mtx, sps.csr_matrix):
return True, mtx_digest
if force_reuse:
return False, mtx_digest
id0, digest0 = mtx_digest
id1 = id(mtx)
digest1 = _get_cs_matrix_hash(mtx)
if (id1 == id0) and (digest1 == digest0):
return False, (id1, digest1)
return True, (id1, digest1)
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None,
**kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
context=context, **kwargs)
if isinstance(result, tuple):
result, n_iter = result
else:
n_iter = -1 # Number of iterations is undefined/unavailable.
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = n_iter
return result
return _standard_call
def petsc_call(call):
"""
Decorator handling argument preparation and timing for PETSc-based linear
solvers.
"""
def _petsc_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None,
context=None, **kwargs):
timer = Timer(start=True)
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
context = get_default(context, self.context)
comm = get_default(comm, self.comm)
mshape = mtx.size if isinstance(mtx, self.petsc.Mat) else mtx.shape
rshape = [rhs.size] if isinstance(rhs, self.petsc.Vec) else rhs.shape
assert_(mshape[0] == mshape[1] == rshape[0])
if x0 is not None:
xshape = [x0.size] if isinstance(x0, self.petsc.Vec) else x0.shape
assert_(xshape[0] == rshape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
comm, context=context, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_iter'] = self.ksp.getIterationNumber()
return result
return _petsc_call
class ScipyDirect(LinearSolver):
"""
Direct sparse solver from SciPy.
"""
name = 'ls.scipy_direct'
_parameters = [
('method', "{'auto', 'umfpack', 'superlu'}", 'auto', False,
'The actual solver to use.'),
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, method=None, **kwargs):
LinearSolver.__init__(self, conf, solve=None, **kwargs)
um = self.sls = None
if method is None:
method = self.conf.method
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
if 'sls' in aux:
self.sls = aux['sls']
else:
raise ValueError('SuperLU not available!')
if method in ['auto', 'umfpack']:
aux = try_imports([
'import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
is_umfpack = True if 'um' in aux\
and hasattr(aux['um'], 'UMFPACK_OK') else False
if method == 'umfpack' and not is_umfpack:
raise ValueError('UMFPACK not available!')
elif method == 'superlu':
is_umfpack = False
else:
raise ValueError('uknown solution method! (%s)' % method)
if is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
else:
self.sls.use_solver(useUmfpack=False)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if conf.use_presolve:
self.presolve(mtx)
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def presolve(self, mtx):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if is_new:
self.solve = self.sls.factorized(mtx)
self.mtx_digest = mtx_digest
class ScipySuperLU(ScipyDirect):
"""
SuperLU - direct sparse solver from SciPy.
"""
name = 'ls.scipy_superlu'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='superlu', **kwargs)
class ScipyUmfpack(ScipyDirect):
"""
UMFPACK - direct sparse solver from SciPy.
"""
name = 'ls.scipy_umfpack'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
]
def __init__(self, conf, **kwargs):
ScipyDirect.__init__(self, conf, method='umfpack', **kwargs)
class ScipyIterative(LinearSolver):
"""
Interface to SciPy iterative solvers.
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
"""
name = 'ls.scipy_iterative'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in scipy.sparse.linalg pass a solution vector into
# a callback except those below, that take a residual vector.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, context=context, **kwargs)
try:
solver = getattr(la, self.conf.method)
except AttributeError:
output('scipy solver %s does not exist!' % self.conf.method)
output('using cg instead')
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
solver_kwargs.update(prec_args)
try:
sol, info = self.solver(mtx, rhs, x0=x0, atol=eps_a, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
except TypeError:
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PyAMGSolver(LinearSolver):
"""
Interface to PyAMG solvers.
The `method` parameter can be one of: 'smoothed_aggregation_solver',
'ruge_stuben_solver'. The `accel` parameter specifies the Krylov
solver name, that is used as an accelerator for the multigrid solver.
"""
name = 'ls.pyamg'
_parameters = [
('method', 'str', 'smoothed_aggregation_solver', False,
'The actual solver to use.'),
('accel', 'str', None, False,
'The accelerator.'),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres accelerator, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the MG solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Use the 'method:'
prefix for arguments of the method construction function
(e.g. 'method:max_levels' : 5), and the 'solve:' prefix for
the subsequent solver call."""),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, **kwargs):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr(pyamg, self.conf.method)
except AttributeError:
output('pyamg.%s does not exist!' % self.conf.method)
output('using pyamg.smoothed_aggregation_solver instead')
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.accel not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if is_new or (self.mg is None):
_kwargs = {key[7:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('method:')}
self.mg = self.solver(mtx, **_kwargs)
self.mtx_digest = mtx_digest
_kwargs = {key[6:] : val
for key, val in six.iteritems(solver_kwargs)
if key.startswith('solve:')}
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r,
maxiter=i_max, callback=iter_callback,
**_kwargs)
return sol, self.iter
class PyAMGKrylovSolver(LinearSolver):
"""
Interface to PyAMG Krylov solvers.
"""
name = 'ls.pyamg_krylov'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', lambda mtx, context: None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return one
of {sparse matrix, dense matrix, LinearOperator}.
"""),
('callback', 'callable', None, False,
"""User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector, except
the gmres method, where the argument is the residual norm.
"""),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
# All iterative solvers in pyamg.krylov pass a solution vector into
# a callback except those below, that take a residual vector norm.
_callbacks_res = ['gmres']
def __init__(self, conf, context=None, **kwargs):
try:
import pyamg.krylov as krylov
except ImportError:
msg = 'cannot import pyamg.krylov!'
raise ImportError(msg)
LinearSolver.__init__(self, conf, mg=None,
context=context, **kwargs)
try:
solver = getattr(krylov, self.conf.method)
except AttributeError:
output('pyamg.krylov.%s does not exist!' % self.conf.method)
raise
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, context=None, **kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
setup_precond = get_default(kwargs.get('setup_precond', None),
self.conf.setup_precond)
callback = get_default(kwargs.get('callback', lambda sol: None),
self.conf.callback)
self.iter = 0
def iter_callback(sol):
self.iter += 1
msg = '%s: iteration %d' % (self.conf.name, self.iter)
if conf.verbose > 2:
if conf.method not in self._callbacks_res:
res = mtx * sol - rhs
else:
res = sol
rnorm = nm.linalg.norm(res)
msg += ': |Ax-b| = %e' % rnorm
output(msg, verbose=conf.verbose > 1)
# Call an optional user-defined callback.
callback(sol)
precond = setup_precond(mtx, context)
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
M=precond, callback=iter_callback,
**solver_kwargs)
output('%s: %s convergence: %s (%s, %d iterations)'
% (self.conf.name, self.conf.method,
info, self.converged_reasons[nm.sign(info)], self.iter),
verbose=conf.verbose)
return sol, self.iter
class PETScKrylovSolver(LinearSolver):
"""
PETSc Krylov subspace solver.
The solver supports parallel use with a given MPI communicator (see `comm`
argument of :func:`PETScKrylovSolver.__init__()`) and allows passing in
PETSc matrices and vectors. Returns a (global) PETSc solution vector
instead of a (local) numpy array, when given a PETSc right-hand side
vector.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overridden when called by passing a `conf`
object.
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_parameters = [
('method', 'str', 'cg', False,
'The actual solver to use.'),
('setup_precond', 'callable', None, False,
"""User-supplied function for the preconditioner initialization/setup.
It is called as setup_precond(mtx, context), where mtx is the
matrix, context is a user-supplied context, and should return an
object with `setUp(self, pc)` and `apply(self, pc, x, y)` methods.
Has precedence over the `precond`/`sub_precond` parameters.
"""),
('precond', 'str', 'icc', False,
'The preconditioner.'),
('sub_precond', 'str', 'none', False,
'The preconditioner for matrix blocks (in parallel runs).'),
('precond_side', "{'left', 'right', 'symmetric', None}", None, False,
'The preconditioner side.'),
('i_max', 'int', 100, False,
'The maximum number of iterations.'),
('eps_a', 'float', 1e-8, False,
'The absolute tolerance for the residual.'),
('eps_r', 'float', 1e-8, False,
'The relative tolerance for the residual.'),
('eps_d', 'float', 1e5, False,
'The divergence tolerance for the residual.'),
('force_reuse', 'bool', False, False,
"""If True, skip the check whether the KSP solver object corresponds
to the `mtx` argument: it is always reused."""),
('*', '*', None, False,
"""Additional parameters supported by the method. Can be used to pass
all PETSc options supported by :func:`petsc.Options()`."""),
]
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
def __init__(self, conf, comm=None, context=None, **kwargs):
if comm is None:
from sfepy.parallel.parallel import init_petsc_args; init_petsc_args
from petsc4py import PETSc as petsc
converged_reasons = {}
for key, val in six.iteritems(petsc.KSP.ConvergedReason.__dict__):
if isinstance(val, int):
converged_reasons[val] = key
LinearSolver.__init__(self, conf, petsc=petsc, comm=comm,
converged_reasons=converged_reasons,
fields=None, ksp=None, pmtx=None,
context=context, **kwargs)
def set_field_split(self, field_ranges, comm=None):
"""
Setup local PETSc ranges for fields to be used with 'fieldsplit'
preconditioner.
This function must be called before solving the linear system.
"""
comm = get_default(comm, self.comm)
self.fields = []
for key, rng in six.iteritems(field_ranges):
if isinstance(rng, slice):
rng = rng.start, rng.stop
size = rng[1] - rng[0]
field_is = self.petsc.IS().createStride(size, first=rng[0], step=1,
comm=comm)
self.fields.append((key, field_is))
def create_ksp(self, options=None, comm=None):
optDB = self.petsc.Options()
optDB['sub_pc_type'] = self.conf.sub_precond
if options is not None:
for key, val in six.iteritems(options):
optDB[key] = val
ksp = self.petsc.KSP()
ksp.create(comm)
ksp.setType(self.conf.method)
pc = ksp.getPC()
if self.conf.setup_precond is None:
pc.setType(self.conf.precond)
else:
pc.setType(pc.Type.PYTHON)
ksp.setFromOptions()
if (pc.type == 'fieldsplit'):
if self.fields is not None:
pc.setFieldSplitIS(*self.fields)
else:
msg = 'PETScKrylovSolver.set_field_split() has to be called!'
raise ValueError(msg)
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
return ksp
def create_petsc_matrix(self, mtx, comm=None):
if isinstance(mtx, self.petsc.Mat):
pmtx = mtx
else:
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat()
pmtx.createAIJ(mtx.shape, csr=(mtx.indptr, mtx.indices, mtx.data),
comm=comm)
return pmtx
@petsc_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, comm=None, context=None,
**kwargs):
solver_kwargs = self.build_solver_kwargs(conf)
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest,
force_reuse=conf.force_reuse)
if (not is_new) and self.ksp is not None:
ksp = self.ksp
pmtx = self.pmtx
else:
pmtx = self.create_petsc_matrix(mtx, comm=comm)
ksp = self.create_ksp(options=solver_kwargs, comm=comm)
ksp.setOperators(pmtx)
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d,
max_it=i_max)
setup_precond = self.conf.setup_precond
if setup_precond is not None:
ksp.pc.setPythonContext(setup_precond(mtx, context))
ksp.setFromOptions()
self.mtx_digest = mtx_digest
self.ksp = ksp
self.pmtx = pmtx
if isinstance(rhs, self.petsc.Vec):
prhs = rhs
else:
prhs = pmtx.getVecLeft()
prhs[...] = rhs
if x0 is not None:
if isinstance(x0, self.petsc.Vec):
psol = x0
else:
psol = pmtx.getVecRight()
psol[...] = x0
ksp.setInitialGuessNonzero(True)
else:
psol = pmtx.getVecRight()
ksp.setInitialGuessNonzero(False)
ksp.solve(prhs, psol)
output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)'
% (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
ksp.reason, self.converged_reasons[ksp.reason],
ksp.getIterationNumber()),
verbose=conf.verbose)
if isinstance(rhs, self.petsc.Vec):
sol = psol
else:
sol = psol[...].copy()
return sol
class MUMPSSolver(LinearSolver):
"""
Interface to MUMPS solver.
"""
name = 'ls.mumps'
_parameters = [
('use_presolve', 'bool', False, False,
'If True, pre-factorize the matrix.'),
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import sfepy.solvers.ls_mumps as mumps
self.mumps_ls = None
if not mumps.use_mpi:
raise AttributeError('No mpi4py found! Required by MUMPS solver.')
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if not self.mumps_presolved:
self.presolve(mtx, presolve_flag=conf.use_presolve)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
self.mumps_ls(3) # solve
return out
def presolve(self, mtx, presolve_flag=False):
is_new, mtx_digest = _is_new_matrix(mtx, self.mtx_digest)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
if self.mumps_ls is None:
system = 'complex' if mtx.dtype.name.startswith('complex')\
else 'real'
is_sym = self.mumps.coo_is_symmetric(mtx)
mem_relax = self.conf.memory_relaxation
self.mumps_ls = self.mumps.MumpsSolver(system=system,
is_sym=is_sym,
mem_relax=mem_relax)
if is_new:
if self.conf.verbose:
self.mumps_ls.set_verbose()
self.mumps_ls.set_mtx_centralized(mtx)
self.mumps_ls(4) # analyze + factorize
if presolve_flag:
self.mumps_presolved = True
self.mtx_digest = mtx_digest
def __del__(self):
if self.mumps_ls is not None:
del(self.mumps_ls)
class MUMPSParallelSolver(LinearSolver):
"""
Interface to MUMPS parallel solver.
"""
name = 'ls.mumps_par'
_parameters = [
('memory_relaxation', 'int', 20, False,
'The percentage increase in the estimated working space.'),
]
def __init__(self, conf, **kwargs):
import multiprocessing
import sfepy.solvers.ls_mumps as mumps
mumps.load_mumps_libraries() # try to load MUMPS libraries
LinearSolver.__init__(self, conf, mumps=mumps, mumps_ls=None,
number_of_cpu=multiprocessing.cpu_count(),
mumps_presolved=False, **kwargs)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
from mpi4py import MPI
import sys
from sfepy import data_dir
import os.path as op
from tempfile import gettempdir
def tmpfile(fname):
return op.join(gettempdir(), fname)
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
is_sym = self.mumps.coo_is_symmetric(mtx)
rr, cc, data = mtx.row + 1, mtx.col + 1, mtx.data
if is_sym:
idxs = nm.where(cc >= rr)[0] # upper triangular matrix
rr, cc, data = rr[idxs], cc[idxs], data[idxs]
n = mtx.shape[0]
nz = rr.shape[0]
flags = nm.memmap(tmpfile('vals_flags.array'), dtype='int32',
mode='w+', shape=(4,))
flags[0] = n
flags[1] = 1 if data.dtype.name.startswith('complex') else 0
flags[2] = int(is_sym)
flags[3] = int(self.conf.verbose)
idxs = nm.memmap(tmpfile('idxs.array'), dtype='int32',
mode='w+', shape=(2, nz))
idxs[0, :] = rr
idxs[1, :] = cc
dtype = {0: 'float64', 1: 'complex128'}[flags[1]]
vals_mtx = nm.memmap(tmpfile('vals_mtx.array'), dtype=dtype,
mode='w+', shape=(nz,))
vals_rhs = nm.memmap(tmpfile('vals_rhs.array'), dtype=dtype,
mode='w+', shape=(n,))
vals_mtx[:] = data
vals_rhs[:] = rhs
mumps_call = op.join(data_dir, 'sfepy', 'solvers',
'ls_mumps_parallel.py')
comm = MPI.COMM_SELF.Spawn(sys.executable, args=[mumps_call],
maxprocs=self.number_of_cpu)
comm.Disconnect()
out = nm.memmap(tmpfile('vals_x.array'), dtype=dtype, mode='r')
return out
class SchurMumps(MUMPSSolver):
r"""
Mumps Schur complement solver.
"""
name = 'ls.schur_mumps'
_parameters = MUMPSSolver._parameters + [
('schur_variables', 'list', None, True,
'The list of Schur variables.'),
]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import scipy.linalg as sla
if not isinstance(mtx, sps.coo_matrix):
mtx = mtx.tocoo()
system = 'complex' if mtx.dtype.name.startswith('complex') else 'real'
self.mumps_ls = self.mumps.MumpsSolver(system=system)
if self.conf.verbose:
self.mumps_ls.set_verbose()
schur_list = []
for schur_var in conf.schur_variables:
slc = self.context.equations.variables.adi.indx[schur_var]
schur_list.append(nm.arange(slc.start, slc.stop, slc.step, dtype='i'))
self.mumps_ls.set_mtx_centralized(mtx)
out = rhs.copy()
self.mumps_ls.set_rhs(out)
S, y2 = self.mumps_ls.get_schur(nm.hstack(schur_list))
x2 = sla.solve(S.T, y2) # solve the dense Schur system using scipy.linalg
return self.mumps_ls.expand_schur(x2)
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
_parameters = ScipyDirect._parameters + [
('others', 'list', None, True,
'The list of auxiliary problem definition files.'),
('coupling_variables', 'list', None, True,
'The list of coupling variables.'),
]
def __init__(self, conf, context=None, **kwargs):
ScipyDirect.__init__(self, conf, context=context, **kwargs)
def init_subproblems(self, conf, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
# init subproblems
problem = self.context
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in six.itervalues(self.adi_indx):
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = Problem.from_conf(confi, init_equations=True)
sti = State(pbi.equations.variables)
pbi.equations.set_data(None, ignore_unknown=True)
pbi.time_update()
pbi.update_materials()
sti.apply_ebc()
pbi_vars = pbi.get_variables()
output.set_output_prefix(master_prefix)
self.subpb.append([pbi, sti, None])
# append "slave" DofInfo
for jj in pbi_vars.names:
if not(pbi_vars[jj].is_state()):
continue
didx = pbi.equations.variables.adi.indx[jj]
ndof = didx.stop - didx.start
if jj in self.adi_indx:
if ndof != \
(self.adi_indx[jj].stop - self.adi_indx[jj].start):
raise ValueError('DOFs do not match!')
else:
self.adi_indx.update({
jj: slice(last_indx, last_indx + ndof, None)})
last_indx += ndof
for jj in conf.coupling_variables:
if jj in pbi_vars.names:
if pbi_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = ii
else:
self.cvars_to_pb[jj][1] = ii
self.subpb.append([problem, None, None])
self.cvars_to_pb_map = {}
for varname, pbs in six.iteritems(self.cvars_to_pb):
# match field nodes
coors = []
for ii in pbs:
pbi = self.subpb[ii][0]
pbi_vars = pbi.get_variables()
fcoors = pbi_vars[varname].field.coors
dc = nm.abs(nm.max(fcoors, axis=0)\
- nm.min(fcoors, axis=0))
ax = nm.where(dc > 1e-9)[0]
coors.append(fcoors[:,ax])
if len(coors[0]) != len(coors[1]):
raise ValueError('number of nodes does not match!')
kdtree = KDTree(coors[0])
map_12 = kdtree.query(coors[1])[1]
pbi1 = self.subpb[pbs[0]][0]
pbi1_vars = pbi1.get_variables()
eq_map_1 = pbi1_vars[varname].eq_map
pbi2 = self.subpb[pbs[1]][0]
pbi2_vars = pbi2.get_variables()
eq_map_2 = pbi2_vars[varname].eq_map
dpn = eq_map_2.dpn
nnd = map_12.shape[0]
map_12_nd = nm.zeros((nnd * dpn,), dtype=nm.int32)
if dpn > 1:
for ii in range(dpn):
map_12_nd[ii::dpn] = map_12 * dpn + ii
else:
map_12_nd = map_12
idx = nm.where(eq_map_2.eq >= 0)[0]
self.cvars_to_pb_map[varname] = eq_map_1.eq[map_12[idx]]
def sparse_submat(self, Ad, Ar, Ac, gr, gc, S):
"""
A[gr,gc] = S
"""
if type(gr) is slice:
gr = nm.arange(gr.start, gr.stop)
if type(gc) is slice:
gc = nm.arange(gc.start, gc.stop)
for ii, lrow in enumerate(S):
m = lrow.indices.shape[0]
idxrow = nm.ones((m,), dtype=nm.int32) * gr[ii]
Ar = nm.hstack([Ar, idxrow])
Ac = nm.hstack([Ac, gc[lrow.indices]])
Ad = nm.hstack([Ad, lrow.data])
return Ad, Ar, Ac
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
self.init_subproblems(self.conf, **kwargs)
max_indx = 0
hst = nm.hstack
for ii in six.itervalues(self.adi_indx):
max_indx = nm.max([max_indx, ii.stop])
new_rhs = nm.zeros((max_indx,), dtype=rhs.dtype)
new_rhs[:rhs.shape[0]] = rhs
# copy "master" matrices
pbi = self.subpb[-1][0]
adi_indxi = pbi.equations.variables.adi.indx
mtxc = mtx.tocsc()
aux_data = nm.array([], dtype=mtxc.dtype)
aux_rows = nm.array([], dtype=nm.int32)
aux_cols = nm.array([], dtype=nm.int32)
for jk, jv in six.iteritems(adi_indxi):
if jk in self.cvars_to_pb:
if not(self.cvars_to_pb[jk][0] == -1):
continue
gjv = self.adi_indx[jk]
ii = gjv.start
for jj in nm.arange(jv.start, jv.stop):
ptr = mtxc.indptr[jj]
nn = mtxc.indptr[jj + 1] - ptr
sl = slice(ptr, ptr + nn, None)
aux_data = hst([aux_data, mtxc.data[sl]])
aux_rows = hst([aux_rows, mtxc.indices[sl]])
aux_cols = hst([aux_cols, nm.ones((nn,), dtype=nm.int32) * ii])
ii += 1
# copy "slave" (sub)matricies
mtxs = []
for kk, (pbi, sti0, _) in enumerate(self.subpb[:-1]):
x0i = sti0.get_reduced()
evi = pbi.get_evaluator()
mtxi = evi.eval_tangent_matrix(x0i, mtx=pbi.mtx_a)
rhsi = evi.eval_residual(x0i)
mtxs.append(mtxi)
adi_indxi = pbi.equations.variables.adi.indx
for ik, iv in six.iteritems(adi_indxi):
if ik in self.cvars_to_pb:
if not(self.cvars_to_pb[ik][0] == kk):
continue
giv = self.adi_indx[ik]
for jk, jv in six.iteritems(adi_indxi):
gjv = self.adi_indx[jk]
if jk in self.cvars_to_pb:
if not(self.cvars_to_pb[jk][0] == kk):
continue
aux_data, aux_rows, aux_cols =\
self.sparse_submat(aux_data, aux_rows, aux_cols,
giv, gjv, mtxi[iv, jv])
new_rhs[giv] = rhsi[iv]
mtxs.append(mtx)
# copy "coupling" (sub)matricies
for varname, pbs in six.iteritems(self.cvars_to_pb):
idx = pbs[1]
pbi = self.subpb[idx][0]
mtxi = mtxs[idx]
gjv = self.adi_indx[varname]
jv = pbi.equations.variables.adi.indx[varname]
adi_indxi = pbi.equations.variables.adi.indx
for ik, iv in six.iteritems(adi_indxi):
if ik == varname:
continue
giv = self.adi_indx[ik]
aux_mtx = mtxi[iv,:].tocsc()
for ll, jj in enumerate(nm.arange(jv.start, jv.stop)):
ptr = aux_mtx.indptr[jj]
nn = aux_mtx.indptr[jj + 1] - ptr
if nn < 1:
continue
sl = slice(ptr, ptr + nn, None)
aux_data = hst([aux_data, aux_mtx.data[sl]])
aux_rows = hst([aux_rows, aux_mtx.indices[sl] + giv.start])
jjr = gjv.start + self.cvars_to_pb_map[varname][ll]
aux_cols = hst([aux_cols,
nm.ones((nn,), dtype=nm.int32) * jjr])
# create new matrix
new_mtx = sps.coo_matrix((aux_data, (aux_rows, aux_cols))).tocsr()
res0 = ScipyDirect.__call__(self, new_rhs, mtx=new_mtx)
res = []
for kk, (pbi, sti0, _) in enumerate(self.subpb):
adi_indxi = pbi.equations.variables.adi.indx
max_indx = 0
for ii in six.itervalues(adi_indxi):
max_indx = nm.max([max_indx, ii.stop])
resi = nm.zeros((max_indx,), dtype=res0.dtype)
for ik, iv in six.iteritems(adi_indxi):
giv = self.adi_indx[ik]
if ik in self.cvars_to_pb:
if pbi is self.subpb[self.cvars_to_pb[ik][1]][0]:
giv = self.cvars_to_pb_map[ik] + giv.start
resi[iv] = res0[giv]
if sti0 is not None:
sti = sti0.copy()
sti.set_reduced(-resi)
pbi.setup_default_output()
pbi.save_state(pbi.get_output_name(), sti)
self.subpb[kk][-1] = sti
res.append(resi)
return res[-1]
|
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
Hitori is played with a grid of squares or cells, and each cell contains a
number. The objective is to eliminate numbers by filling in the squares such
that remaining cells do not contain numbers that appear more than once in
either a given row or column.
Filled-in cells cannot be horizontally or vertically adjacent, although they
can be diagonally adjacent. The remaining un-filled cells must form a single
component connected horizontally and vertically.
See https://en.wikipedia.org/wiki/Hitori
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import CpoModel
from sys import stdout
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# Problem 0 (for test). A solution is:
# * 2 *
# 2 3 1
# * 1 *
HITORI_PROBLEM_0 = ( (2, 2, 1),
(2, 3, 1),
(1, 1, 1),
)
# Problem 1. A solution is:
# * 2 * 5 3
# 2 3 1 4 *
# * 1 * 3 5
# 1 * 5 * 2
# 5 4 3 2 1
HITORI_PROBLEM_1 = ( (2, 2, 1, 5, 3),
(2, 3, 1, 4, 5),
(1, 1, 1, 3, 5),
(1, 3, 5, 4, 2),
(5, 4, 3, 2, 1),
)
# Problem 2. A solution is:
# * 8 * 6 3 2 * 7
# 3 6 7 2 1 * 5 4
# * 3 4 * 2 8 6 1
# 4 1 * 5 7 * 3 *
# 7 * 3 * 8 5 1 2
# * 5 6 7 * 1 8 *
# 6 * 2 3 5 4 7 8
# 8 7 1 4 * 3 * 6
HITORI_PROBLEM_2 = ( (4, 8, 1, 6, 3, 2, 5, 7),
(3, 6, 7, 2, 1, 6, 5, 4),
(2, 3, 4, 8, 2, 8, 6, 1),
(4, 1, 6, 5, 7, 7, 3, 5),
(7, 2, 3, 1, 8, 5, 1, 2),
(3, 5, 6, 7, 3, 1, 8, 4),
(6, 4, 2, 3, 5, 4, 7, 8),
(8, 7, 1, 4, 2, 3, 5, 6),
)
# Problem 3, solution to discover !
HITORI_PROBLEM_3 = ( ( 2, 5, 6, 3, 8, 10, 7, 4, 13, 6, 14, 15, 9, 4, 1),
( 3, 1, 7, 12, 8, 4, 10, 4, 4, 11, 5, 13, 4, 9, 2),
( 4, 14, 10, 10, 14, 5, 11, 1, 6, 2, 7, 11, 13, 15, 12),
( 5, 10, 2, 5, 13, 3, 8, 5, 9, 7, 4, 10, 6, 10, 2),
( 1, 6, 8, 15, 10, 7, 4, 2, 15, 14, 9, 3, 3, 11, 4),
( 6, 14, 3, 11, 2, 4, 9, 5, 7, 13, 12, 8, 10, 14, 1),
(12, 8, 14, 11, 3, 7, 15, 13, 10, 7, 12, 13, 5, 2, 13),
(11, 4, 12, 15, 5, 6, 5, 3, 15, 10, 7, 9, 5, 13, 14),
( 8, 15, 4, 6, 15, 3, 13, 14, 6, 12, 10, 1, 11, 3, 5),
(15, 15, 9, 12, 1, 8, 11, 10, 2, 2, 11, 9, 4, 12, 2),
( 7, 1, 9, 9, 10, 5, 3, 11, 13, 6, 7, 4, 12, 5, 8),
(14, 10, 13, 4, 12, 15, 11, 10, 5, 7, 8, 12, 5, 3, 6),
( 5, 10, 11, 5, 11, 14, 14, 15, 8, 13, 13, 2, 7, 9, 9),
( 9, 7, 15, 10, 12, 11, 8, 6, 1, 5, 7, 14, 13, 1, 3),
( 6, 9, 1, 13, 6, 4, 12, 7, 14, 4, 2, 1, 3, 8, 12)
)
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
PUZZLE = HITORI_PROBLEM_3
SIZE = len(PUZZLE)
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
def get_neighbors(l, c):
""" Build the list of neighbors of a given cell """
res = []
if c > 0: res.append((l, c-1))
if c < SIZE - 1: res.append((l, c+1))
if l > 0: res.append((l-1, c))
if l < SIZE - 1: res.append((l+1, c))
return res
# Create model
mdl = CpoModel()
# Create one binary variable for each colored cell
color = [[mdl.integer_var(min=0, max=1, name="C" + str(l) + "_" + str(c)) for c in range(SIZE)] for l in range(SIZE)]
# Forbid adjacent colored cells
for l in range(SIZE):
for c in range(SIZE - 1):
mdl.add((color[l][c] + color[l][c + 1]) < 2)
for c in range(SIZE):
for l in range(SIZE - 1):
mdl.add((color[l][c] + color[l + 1][c]) < 2)
# Color cells for digits occurring more than once
for l in range(SIZE):
lvals = [] # List of values already processed
for c in range(SIZE):
v = PUZZLE[l][c]
if v not in lvals:
lvals.append(v)
lvars = [color[l][c]]
for c2 in range(c + 1, SIZE):
if PUZZLE[l][c2] == v:
lvars.append(color[l][c2])
# Add constraint if more than one occurrence of the value
nbocc = len(lvars)
if nbocc > 1:
mdl.add(mdl.sum(lvars) >= nbocc - 1)
for c in range(SIZE):
lvals = [] # List of values already processed
for l in range(SIZE):
v = PUZZLE[l][c]
if v not in lvals:
lvals.append(v)
lvars = [color[l][c]]
for l2 in range(l + 1, SIZE):
if PUZZLE[l2][c] == v:
lvars.append(color[l2][c])
# Add constraint if more than one occurrence of the value
nbocc = len(lvars)
if nbocc > 1:
mdl.add(mdl.sum(lvars) >= nbocc - 1)
# Each cell (blank or not) must be adjacent to at least another
for l in range(SIZE):
for c in range(SIZE):
lvars = [color[l2][c2] for l2, c2 in get_neighbors(l, c)]
mdl.add(mdl.sum(lvars) < len(lvars))
# At least cell 0,0 or cell 0,1 is blank.
# Build table of distance to one of these cells
# Black cells are associated to a max distance SIZE*SIZE
MAX_DIST = SIZE * SIZE
distance = [[mdl.integer_var(min=0, max=MAX_DIST, name="D" + str(l) + "_" + str(c)) for c in range(SIZE)] for l in range(SIZE)]
mdl.add(distance[0][0] == mdl.conditional(color[0][0], MAX_DIST, 0))
mdl.add(distance[0][1] == mdl.conditional(color[0][1], MAX_DIST, 0))
for c in range(2, SIZE):
mdl.add( distance[0][c] == mdl.conditional(color[0][c], MAX_DIST, 1 + mdl.min(distance[l2][c2] for l2, c2 in get_neighbors(0, c))) )
for l in range(1, SIZE):
for c in range(SIZE):
mdl.add( distance[l][c] == mdl.conditional(color[l][c], MAX_DIST, 1 + mdl.min(distance[l2][c2] for l2, c2 in get_neighbors(l, c))) )
# Force distance of blank cells to be less than max
for l in range(SIZE):
for c in range(SIZE):
mdl.add((color[l][c] > 0) | (distance[l][c] < MAX_DIST))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
def print_grid(grid):
""" Print Hitori grid """
mxlen = max([len(str(grid[l][c])) for l in range(SIZE) for c in range(SIZE)])
frmt = " {:>" + str(mxlen) + "}"
for l in grid:
for v in l:
stdout.write(frmt.format(v))
stdout.write('\n')
# Solve model
print("\nSolving model....")
msol = mdl.solve(TimeLimit=100)
# Print solution
stdout.write("Initial problem:\n")
print_grid(PUZZLE)
stdout.write("Solution:\n")
if msol:
# Print solution grig
psol = []
for l in range(SIZE):
nl = []
for c in range(SIZE):
nl.append('.' if msol[color[l][c]] > 0 else PUZZLE[l][c])
psol.append(nl)
print_grid(psol)
# Print distance grid
print("Distances:")
psol = [['.' if msol[distance[l][c]] == MAX_DIST else msol[distance[l][c]] for c in range(SIZE)] for l in range(SIZE)]
print_grid(psol)
else:
stdout.write("No solution found\n")
|
|
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Module for working with EOS static routes
The staticroute resource provides configuration management of static
route resources on an EOS node. It provides the following class
implementations:
* StaticRoute - Configure static routes in EOS
StaticRoute Attributes:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
next_hop_ip (string): The next hop address on destination interface
distance (int): Administrative distance for this route
tag (int): Route tag
route_name (string): Route name
Notes:
The 'default' prefix function of the 'ip route' command,
'default ip route ...', currently equivalent to the 'no ip route ...'
command.
"""
import re
from pyeapi.api import EntityCollection
# Define the regex to match ip route lines (by lines in regex):
# 'ip route' header
# ip_dest
# next_hop
# next_hop_ip
# distance
# tag
# name
ROUTES_RE = re.compile(r'(?<=^ip route)'
r' (\d+\.\d+\.\d+\.\d+\/\d+)'
r' (\d+\.\d+\.\d+\.\d+|\S+)'
r'(?: (\d+\.\d+\.\d+\.\d+))?'
r' (\d+)'
r'(?: tag (\d+))?'
r'(?: name (\S+))?', re.M)
class StaticRoute(EntityCollection):
"""The StaticRoute class provides a configuration instance
for working with static routes
"""
def __str__(self):
return 'StaticRoute'
def get(self, name):
"""Retrieves the ip route information for the destination
ip address specified.
Args:
name (string): The ip address of the destination in the
form of A.B.C.D/E
Returns:
dict: An dict object of static route entries in the form
{ ip_dest:
{ next_hop:
{ next_hop_ip:
{ distance:
{ 'tag': tag,
'route_name': route_name
}
}
}
}
}
If the ip address specified does not have any associated
static routes, then None is returned.
Notes:
The keys ip_dest, next_hop, next_hop_ip, and distance in
the returned dictionary are the values of those components
of the ip route specification. If a route does not contain
a next_hop_ip, then that key value will be set as 'None'.
"""
# Return the route configurations for the specified ip address,
# or None if its not found
return self.getall().get(name)
def getall(self):
"""Return all ip routes configured on the switch as a resource dict
Returns:
dict: An dict object of static route entries in the form
{ ip_dest:
{ next_hop:
{ next_hop_ip:
{ distance:
{ 'tag': tag,
'route_name': route_name
}
}
}
}
}
If the ip address specified does not have any associated
static routes, then None is returned.
Notes:
The keys ip_dest, next_hop, next_hop_ip, and distance in
the returned dictionary are the values of those components
of the ip route specification. If a route does not contain
a next_hop_ip, then that key value will be set as 'None'.
"""
# Find all the ip routes in the config
matches = ROUTES_RE.findall(self.config)
# Parse the routes and add them to the routes dict
routes = dict()
for match in matches:
# Get the four identifying components
ip_dest = match[0]
next_hop = match[1]
next_hop_ip = None if match[2] is '' else match[2]
distance = int(match[3])
# Create the data dict with the remaining components
data = {}
data['tag'] = None if match[4] is '' else int(match[4])
data['route_name'] = None if match[5] is '' else match[5]
# Build the complete dict entry from the four components
# and the data.
# temp_dict = parent_dict[key] = parent_dict.get(key, {})
# This creates the keyed dict in the parent_dict if it doesn't
# exist, or reuses the existing keyed dict.
# The temp_dict is used to make things more readable.
ip_dict = routes[ip_dest] = routes.get(ip_dest, {})
nh_dict = ip_dict[next_hop] = ip_dict.get(next_hop, {})
nhip_dict = nh_dict[next_hop_ip] = nh_dict.get(next_hop_ip, {})
nhip_dict[distance] = data
return routes
def create(self, ip_dest, next_hop, **kwargs):
"""Create a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
kwargs (dict): A key/value dictionary containing
next_hop_ip (string): The next hop address on destination
interface
distance (string): Administrative distance for this route
tag (string): Route tag
route_name (string): Route name
Returns:
True if the operation succeeds, otherwise False.
"""
# Call _set_route with delete and default set to False
return self._set_route(ip_dest, next_hop, **kwargs)
def delete(self, ip_dest, next_hop, **kwargs):
"""Delete a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
kwargs (dict): A key/value dictionary containing
next_hop_ip (string): The next hop address on destination
interface
distance (string): Administrative distance for this route
tag (string): Route tag
route_name (string): Route name
Returns:
True if the operation succeeds, otherwise False.
"""
# Call _set_route with the delete flag set to True
kwargs.update({'delete': True})
return self._set_route(ip_dest, next_hop, **kwargs)
def default(self, ip_dest, next_hop, **kwargs):
"""Set a static route to default (i.e. delete the matching route)
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
kwargs (dict): A key/value dictionary containing
next_hop_ip (string): The next hop address on destination
interface
distance (string): Administrative distance for this route
tag (string): Route tag
route_name (string): Route name
Returns:
True if the operation succeeds, otherwise False.
"""
# Call _set_route with the default flag set to True
kwargs.update({'default': True})
return self._set_route(ip_dest, next_hop, **kwargs)
def set_tag(self, ip_dest, next_hop, **kwargs):
"""Set the tag value for the specified route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
kwargs (dict): A key/value dictionary containing
next_hop_ip (string): The next hop address on destination
interface
distance (string): Administrative distance for this route
tag (string): Route tag
route_name (string): Route name
Returns:
True if the operation succeeds, otherwise False.
Notes:
Any existing route_name value must be included in call to
set_tag, otherwise the tag will be reset
by the call to EOS.
"""
# Call _set_route with the new tag information
return self._set_route(ip_dest, next_hop, **kwargs)
def set_route_name(self, ip_dest, next_hop, **kwargs):
"""Set the route_name value for the specified route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
kwargs (dict): A key/value dictionary containing
next_hop_ip (string): The next hop address on destination
interface
distance (string): Administrative distance for this route
tag (string): Route tag
route_name (string): Route name
Returns:
True if the operation succeeds, otherwise False.
Notes:
Any existing tag value must be included in call to
set_route_name, otherwise the tag will be reset
by the call to EOS.
"""
# Call _set_route with the new route_name information
return self._set_route(ip_dest, next_hop, **kwargs)
def _build_commands(self, ip_dest, next_hop, **kwargs):
"""Build the EOS command string for ip route interactions.
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
kwargs (dict): A key/value dictionary containing
next_hop_ip (string): The next hop address on destination
interface
distance (string): Administrative distance for this route
tag (string): Route tag
route_name (string): Route name
Returns the ip route command string to be sent to the switch for
the given set of parameters.
"""
commands = "ip route %s %s" % (ip_dest, next_hop)
next_hop_ip = kwargs.get('next_hop_ip', None)
distance = kwargs.get('distance', None)
tag = kwargs.get('tag', None)
route_name = kwargs.get('route_name', None)
if next_hop_ip is not None:
commands += " %s" % next_hop_ip
if distance is not None:
commands += " %s" % distance
if tag is not None:
commands += " tag %s" % tag
if route_name is not None:
commands += " name %s" % route_name
return commands
def _set_route(self, ip_dest, next_hop, **kwargs):
"""Configure a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
kwargs (dict): A key/value dictionary containing
next_hop_ip (string): The next hop address on destination
interface
distance (string): Administrative distance for this route
tag (string): Route tag
route_name (string): Route name
delete (boolean): If true, deletes the specified route
instead of creating or setting values for the route
default (boolean): If true, defaults the specified route
instead of creating or setting values for the route
Returns:
True if the operation succeeds, otherwise False.
"""
commands = self._build_commands(ip_dest, next_hop, **kwargs)
delete = kwargs.get('delete', False)
default = kwargs.get('default', False)
# Prefix with 'no' if delete is set
if delete:
commands = "no " + commands
# Or with 'default' if default is setting
else:
if default:
commands = "default " + commands
return self.configure(commands)
def instance(node):
"""Returns an instance of StaticRoute
This method will create and return an instance of the StaticRoute
object passing the value of API to the object. The instance method
is required for the resource to be autoloaded by the Node object
Args:
node (Node): The node argument passes an instance of Node to the
resource
"""
return StaticRoute(node)
|
|
from sure import expect
from freezegun import freeze_time
from moto.swf.models import (
ActivityType,
Timeout,
WorkflowType,
WorkflowExecution,
)
from moto.swf.exceptions import (
SWFDefaultUndefinedFault,
)
from ..utils import (
auto_start_decision_tasks,
get_basic_domain,
get_basic_workflow_type,
make_workflow_execution,
)
VALID_ACTIVITY_TASK_ATTRIBUTES = {
"activityId": "my-activity-001",
"activityType": { "name": "test-activity", "version": "v1.1" },
"taskList": { "name": "task-list-name" },
"scheduleToStartTimeout": "600",
"scheduleToCloseTimeout": "600",
"startToCloseTimeout": "600",
"heartbeatTimeout": "300",
}
def test_workflow_execution_creation():
domain = get_basic_domain()
wft = get_basic_workflow_type()
wfe = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE")
wfe.domain.should.equal(domain)
wfe.workflow_type.should.equal(wft)
wfe.child_policy.should.equal("TERMINATE")
def test_workflow_execution_creation_child_policy_logic():
domain = get_basic_domain()
WorkflowExecution(
domain,
WorkflowType(
"test-workflow", "v1.0",
task_list="queue", default_child_policy="ABANDON",
default_execution_start_to_close_timeout="300",
default_task_start_to_close_timeout="300",
),
"ab1234"
).child_policy.should.equal("ABANDON")
WorkflowExecution(
domain,
WorkflowType(
"test-workflow", "v1.0", task_list="queue",
default_execution_start_to_close_timeout="300",
default_task_start_to_close_timeout="300",
),
"ab1234",
child_policy="REQUEST_CANCEL"
).child_policy.should.equal("REQUEST_CANCEL")
WorkflowExecution.when.called_with(
domain,
WorkflowType("test-workflow", "v1.0"), "ab1234"
).should.throw(SWFDefaultUndefinedFault)
def test_workflow_execution_string_representation():
wfe = make_workflow_execution(child_policy="TERMINATE")
str(wfe).should.match(r"^WorkflowExecution\(run_id: .*\)")
def test_workflow_execution_generates_a_random_run_id():
domain = get_basic_domain()
wft = get_basic_workflow_type()
wfe1 = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE")
wfe2 = WorkflowExecution(domain, wft, "ab1235", child_policy="TERMINATE")
wfe1.run_id.should_not.equal(wfe2.run_id)
def test_workflow_execution_short_dict_representation():
domain = get_basic_domain()
wf_type = WorkflowType(
"test-workflow", "v1.0",
task_list="queue", default_child_policy="ABANDON",
default_execution_start_to_close_timeout="300",
default_task_start_to_close_timeout="300",
)
wfe = WorkflowExecution(domain, wf_type, "ab1234")
sd = wfe.to_short_dict()
sd["workflowId"].should.equal("ab1234")
sd.should.contain("runId")
def test_workflow_execution_medium_dict_representation():
domain = get_basic_domain()
wf_type = WorkflowType(
"test-workflow", "v1.0",
task_list="queue", default_child_policy="ABANDON",
default_execution_start_to_close_timeout="300",
default_task_start_to_close_timeout="300",
)
wfe = WorkflowExecution(domain, wf_type, "ab1234")
md = wfe.to_medium_dict()
md["execution"].should.equal(wfe.to_short_dict())
md["workflowType"].should.equal(wf_type.to_short_dict())
md["startTimestamp"].should.be.a('float')
md["executionStatus"].should.equal("OPEN")
md["cancelRequested"].should.be.falsy
md.should_not.contain("tagList")
wfe.tag_list = ["foo", "bar", "baz"]
md = wfe.to_medium_dict()
md["tagList"].should.equal(["foo", "bar", "baz"])
def test_workflow_execution_full_dict_representation():
domain = get_basic_domain()
wf_type = WorkflowType(
"test-workflow", "v1.0",
task_list="queue", default_child_policy="ABANDON",
default_execution_start_to_close_timeout="300",
default_task_start_to_close_timeout="300",
)
wfe = WorkflowExecution(domain, wf_type, "ab1234")
fd = wfe.to_full_dict()
fd["executionInfo"].should.equal(wfe.to_medium_dict())
fd["openCounts"]["openTimers"].should.equal(0)
fd["openCounts"]["openDecisionTasks"].should.equal(0)
fd["openCounts"]["openActivityTasks"].should.equal(0)
fd["executionConfiguration"].should.equal({
"childPolicy": "ABANDON",
"executionStartToCloseTimeout": "300",
"taskList": {"name": "queue"},
"taskStartToCloseTimeout": "300",
})
def test_workflow_execution_schedule_decision_task():
wfe = make_workflow_execution()
wfe.open_counts["openDecisionTasks"].should.equal(0)
wfe.schedule_decision_task()
wfe.open_counts["openDecisionTasks"].should.equal(1)
def test_workflow_execution_start_decision_task():
wfe = make_workflow_execution()
wfe.schedule_decision_task()
dt = wfe.decision_tasks[0]
wfe.start_decision_task(dt.task_token, identity="srv01")
dt = wfe.decision_tasks[0]
dt.state.should.equal("STARTED")
wfe.events()[-1].event_type.should.equal("DecisionTaskStarted")
wfe.events()[-1].event_attributes["identity"].should.equal("srv01")
def test_workflow_execution_history_events_ids():
wfe = make_workflow_execution()
wfe._add_event("WorkflowExecutionStarted")
wfe._add_event("DecisionTaskScheduled")
wfe._add_event("DecisionTaskStarted")
ids = [evt.event_id for evt in wfe.events()]
ids.should.equal([1, 2, 3])
@freeze_time("2015-01-01 12:00:00")
def test_workflow_execution_start():
wfe = make_workflow_execution()
wfe.events().should.equal([])
wfe.start()
wfe.start_timestamp.should.equal(1420113600.0)
wfe.events().should.have.length_of(2)
wfe.events()[0].event_type.should.equal("WorkflowExecutionStarted")
wfe.events()[1].event_type.should.equal("DecisionTaskScheduled")
@freeze_time("2015-01-02 12:00:00")
def test_workflow_execution_complete():
wfe = make_workflow_execution()
wfe.complete(123, result="foo")
wfe.execution_status.should.equal("CLOSED")
wfe.close_status.should.equal("COMPLETED")
wfe.close_timestamp.should.equal(1420200000.0)
wfe.events()[-1].event_type.should.equal("WorkflowExecutionCompleted")
wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123)
wfe.events()[-1].event_attributes["result"].should.equal("foo")
@freeze_time("2015-01-02 12:00:00")
def test_workflow_execution_fail():
wfe = make_workflow_execution()
wfe.fail(123, details="some details", reason="my rules")
wfe.execution_status.should.equal("CLOSED")
wfe.close_status.should.equal("FAILED")
wfe.close_timestamp.should.equal(1420200000.0)
wfe.events()[-1].event_type.should.equal("WorkflowExecutionFailed")
wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123)
wfe.events()[-1].event_attributes["details"].should.equal("some details")
wfe.events()[-1].event_attributes["reason"].should.equal("my rules")
@freeze_time("2015-01-01 12:00:00")
def test_workflow_execution_schedule_activity_task():
wfe = make_workflow_execution()
wfe.latest_activity_task_timestamp.should.be.none
wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES)
wfe.latest_activity_task_timestamp.should.equal(1420113600.0)
wfe.open_counts["openActivityTasks"].should.equal(1)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ActivityTaskScheduled")
last_event.event_attributes["decisionTaskCompletedEventId"].should.equal(123)
last_event.event_attributes["taskList"]["name"].should.equal("task-list-name")
wfe.activity_tasks.should.have.length_of(1)
task = wfe.activity_tasks[0]
task.activity_id.should.equal("my-activity-001")
task.activity_type.name.should.equal("test-activity")
wfe.domain.activity_task_lists["task-list-name"].should.contain(task)
def test_workflow_execution_schedule_activity_task_without_task_list_should_take_default():
wfe = make_workflow_execution()
wfe.domain.add_type(
ActivityType("test-activity", "v1.2", task_list="foobar")
)
wfe.schedule_activity_task(123, {
"activityId": "my-activity-001",
"activityType": { "name": "test-activity", "version": "v1.2" },
"scheduleToStartTimeout": "600",
"scheduleToCloseTimeout": "600",
"startToCloseTimeout": "600",
"heartbeatTimeout": "300",
})
wfe.open_counts["openActivityTasks"].should.equal(1)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ActivityTaskScheduled")
last_event.event_attributes["taskList"]["name"].should.equal("foobar")
task = wfe.activity_tasks[0]
wfe.domain.activity_task_lists["foobar"].should.contain(task)
def test_workflow_execution_schedule_activity_task_should_fail_if_wrong_attributes():
wfe = make_workflow_execution()
at = ActivityType("test-activity", "v1.1")
at.status = "DEPRECATED"
wfe.domain.add_type(at)
wfe.domain.add_type(ActivityType("test-activity", "v1.2"))
hsh = {
"activityId": "my-activity-001",
"activityType": { "name": "test-activity-does-not-exists", "version": "v1.1" },
}
wfe.schedule_activity_task(123, hsh)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ScheduleActivityTaskFailed")
last_event.event_attributes["cause"].should.equal("ACTIVITY_TYPE_DOES_NOT_EXIST")
hsh["activityType"]["name"] = "test-activity"
wfe.schedule_activity_task(123, hsh)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ScheduleActivityTaskFailed")
last_event.event_attributes["cause"].should.equal("ACTIVITY_TYPE_DEPRECATED")
hsh["activityType"]["version"] = "v1.2"
wfe.schedule_activity_task(123, hsh)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ScheduleActivityTaskFailed")
last_event.event_attributes["cause"].should.equal("DEFAULT_TASK_LIST_UNDEFINED")
hsh["taskList"] = { "name": "foobar" }
wfe.schedule_activity_task(123, hsh)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ScheduleActivityTaskFailed")
last_event.event_attributes["cause"].should.equal("DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED")
hsh["scheduleToStartTimeout"] = "600"
wfe.schedule_activity_task(123, hsh)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ScheduleActivityTaskFailed")
last_event.event_attributes["cause"].should.equal("DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED")
hsh["scheduleToCloseTimeout"] = "600"
wfe.schedule_activity_task(123, hsh)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ScheduleActivityTaskFailed")
last_event.event_attributes["cause"].should.equal("DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED")
hsh["startToCloseTimeout"] = "600"
wfe.schedule_activity_task(123, hsh)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ScheduleActivityTaskFailed")
last_event.event_attributes["cause"].should.equal("DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED")
wfe.open_counts["openActivityTasks"].should.equal(0)
wfe.activity_tasks.should.have.length_of(0)
wfe.domain.activity_task_lists.should.have.length_of(0)
hsh["heartbeatTimeout"] = "300"
wfe.schedule_activity_task(123, hsh)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ActivityTaskScheduled")
task = wfe.activity_tasks[0]
wfe.domain.activity_task_lists["foobar"].should.contain(task)
wfe.open_counts["openDecisionTasks"].should.equal(0)
wfe.open_counts["openActivityTasks"].should.equal(1)
def test_workflow_execution_schedule_activity_task_failure_triggers_new_decision():
wfe = make_workflow_execution()
wfe.start()
task_token = wfe.decision_tasks[-1].task_token
wfe.start_decision_task(task_token)
wfe.complete_decision_task(task_token,
execution_context="free-form execution context",
decisions=[
{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityId": "my-activity-001",
"activityType": { "name": "test-activity-does-not-exist", "version": "v1.2" },
}
},
{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityId": "my-activity-001",
"activityType": { "name": "test-activity-does-not-exist", "version": "v1.2" },
}
},
])
wfe.latest_execution_context.should.equal("free-form execution context")
wfe.open_counts["openActivityTasks"].should.equal(0)
wfe.open_counts["openDecisionTasks"].should.equal(1)
last_events = wfe.events()[-3:]
last_events[0].event_type.should.equal("ScheduleActivityTaskFailed")
last_events[1].event_type.should.equal("ScheduleActivityTaskFailed")
last_events[2].event_type.should.equal("DecisionTaskScheduled")
def test_workflow_execution_schedule_activity_task_with_same_activity_id():
wfe = make_workflow_execution()
wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES)
wfe.open_counts["openActivityTasks"].should.equal(1)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ActivityTaskScheduled")
wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES)
wfe.open_counts["openActivityTasks"].should.equal(1)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("ScheduleActivityTaskFailed")
last_event.event_attributes["cause"].should.equal("ACTIVITY_ID_ALREADY_IN_USE")
def test_workflow_execution_start_activity_task():
wfe = make_workflow_execution()
wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES)
task_token = wfe.activity_tasks[-1].task_token
wfe.start_activity_task(task_token, identity="worker01")
task = wfe.activity_tasks[-1]
task.state.should.equal("STARTED")
wfe.events()[-1].event_type.should.equal("ActivityTaskStarted")
wfe.events()[-1].event_attributes["identity"].should.equal("worker01")
def test_complete_activity_task():
wfe = make_workflow_execution()
wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES)
task_token = wfe.activity_tasks[-1].task_token
wfe.open_counts["openActivityTasks"].should.equal(1)
wfe.open_counts["openDecisionTasks"].should.equal(0)
wfe.start_activity_task(task_token, identity="worker01")
wfe.complete_activity_task(task_token, result="a superb result")
task = wfe.activity_tasks[-1]
task.state.should.equal("COMPLETED")
wfe.events()[-2].event_type.should.equal("ActivityTaskCompleted")
wfe.events()[-1].event_type.should.equal("DecisionTaskScheduled")
wfe.open_counts["openActivityTasks"].should.equal(0)
wfe.open_counts["openDecisionTasks"].should.equal(1)
def test_terminate():
wfe = make_workflow_execution()
wfe.schedule_decision_task()
wfe.terminate()
wfe.execution_status.should.equal("CLOSED")
wfe.close_status.should.equal("TERMINATED")
wfe.close_cause.should.equal("OPERATOR_INITIATED")
wfe.open_counts["openDecisionTasks"].should.equal(1)
last_event = wfe.events()[-1]
last_event.event_type.should.equal("WorkflowExecutionTerminated")
# take default child_policy if not provided (as here)
last_event.event_attributes["childPolicy"].should.equal("ABANDON")
def test_first_timeout():
wfe = make_workflow_execution()
wfe.first_timeout().should.be.none
with freeze_time("2015-01-01 12:00:00"):
wfe.start()
wfe.first_timeout().should.be.none
with freeze_time("2015-01-01 14:01"):
# 2 hours timeout reached
wfe.first_timeout().should.be.a(Timeout)
# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more details
def test_timeouts_are_processed_in_order_and_reevaluated():
# Let's make a Workflow Execution with the following properties:
# - execution start to close timeout of 8 mins
# - (decision) task start to close timeout of 5 mins
#
# Now start the workflow execution, and look at the history 15 mins later:
# - a first decision task is fired just after workflow execution start
# - the first decision task should have timed out after 5 mins
# - that fires a new decision task (which we hack to start automatically)
# - then the workflow timeouts after 8 mins (shows gradual reevaluation)
# - but the last scheduled decision task should *not* timeout (workflow closed)
with freeze_time("2015-01-01 12:00:00"):
wfe = make_workflow_execution(
execution_start_to_close_timeout=8*60,
task_start_to_close_timeout=5*60,
)
# decision will automatically start
wfe = auto_start_decision_tasks(wfe)
wfe.start()
event_idx = len(wfe.events())
with freeze_time("2015-01-01 12:08:00"):
wfe._process_timeouts()
event_types = [e.event_type for e in wfe.events()[event_idx:]]
event_types.should.equal([
"DecisionTaskTimedOut",
"DecisionTaskScheduled",
"DecisionTaskStarted",
"WorkflowExecutionTimedOut",
])
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations:
"""RoutesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs: Any
) -> "_models.Route":
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
route_parameters: "_models.Route",
**kwargs: Any
) -> "_models.Route":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
route_parameters: "_models.Route",
**kwargs: Any
) -> AsyncLROPoller["_models.Route"]:
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2018_10_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteListResult"]:
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
import itertools
import os
import pprint
import shutil
from abc import abstractmethod
from collections import OrderedDict, defaultdict
from pex.compatibility import string, to_bytes
from pex.installer import InstallerBase, Packager
from twitter.common.collections import OrderedSet
from twitter.common.dirutil.chroot import Chroot
from pants.backend.codegen.targets.python_antlr_library import PythonAntlrLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.python.antlr_builder import PythonAntlrBuilder
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_target import PythonTarget
from pants.backend.python.tasks.python_task import PythonTask
from pants.backend.python.thrift_builder import PythonThriftBuilder
from pants.base.address_lookup_error import AddressLookupError
from pants.base.build_environment import get_buildroot
from pants.base.build_graph import sort_targets
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.util.dirutil import safe_rmtree, safe_walk
from pants.util.meta import AbstractClass
SETUP_BOILERPLATE = """
# DO NOT EDIT THIS FILE -- AUTOGENERATED BY PANTS
# Target: {setup_target}
from setuptools import setup
setup(**
{setup_dict}
)
"""
class SetupPyRunner(InstallerBase):
def __init__(self, source_dir, setup_command, **kw):
self.__setup_command = setup_command.split()
super(SetupPyRunner, self).__init__(source_dir, **kw)
def _setup_command(self):
return self.__setup_command
class TargetAncestorIterator(object):
"""Supports iteration of target ancestor lineages."""
def __init__(self, build_graph):
self._build_graph = build_graph
def iter_target_siblings_and_ancestors(self, target):
"""Produces an iterator over a target's siblings and ancestor lineage.
:returns: A target iterator yielding the target and its siblings and then it ancestors from
nearest to furthest removed.
"""
def iter_targets_in_spec_path(spec_path):
try:
for address in self._build_graph.address_mapper.addresses_in_spec_path(spec_path):
self._build_graph.inject_address_closure(address)
yield self._build_graph.get_target(address)
except AddressLookupError:
# A spec path may not have any addresses registered under it and that's ok.
# For example:
# a:a
# a/b/c:c
#
# Here a/b contains no addresses.
pass
def iter_siblings_and_ancestors(spec_path):
for sibling in iter_targets_in_spec_path(spec_path):
yield sibling
parent_spec_path = os.path.dirname(spec_path)
if parent_spec_path != spec_path:
for parent in iter_siblings_and_ancestors(parent_spec_path):
yield parent
for target in iter_siblings_and_ancestors(target.address.spec_path):
yield target
# TODO(John Sirois): Get jvm and python publishing on the same page.
# Either python should require all nodes in an exported target closure be either exported or
# 3rdparty or else jvm publishing should use an ExportedTargetDependencyCalculator to aggregate
# un-exported non-3rdparty interior nodes as needed. It seems like the latter is preferable since
# it can be used with a BUILD graph validator requiring completely exported subgraphs to enforce the
# former as a matter of local repo policy.
class ExportedTargetDependencyCalculator(AbstractClass):
"""Calculates the dependencies of exported targets.
When a target is exported many of its internal transitive library dependencies may be satisfied by
other internal targets that are also exported and "own" these internal transitive library deps.
In other words, exported targets generally can have reduced dependency sets and an
`ExportedTargetDependencyCalculator` can calculate these reduced dependency sets.
To use an `ExportedTargetDependencyCalculator` a subclass must be created that implements two
predicates and a walk function for the class of targets in question. For example, a
`JvmDependencyCalculator` would need to be able to identify jvm third party dependency targets,
and local exportable jvm library targets. In addition it would need to define a walk function
that knew how to walk a jvm target's dependencies.
"""
class UnExportedError(TaskError):
"""Indicates a target is not exported."""
class NoOwnerError(TaskError):
"""Indicates an exportable target has no owning exported target."""
class AmbiguousOwnerError(TaskError):
"""Indicates an exportable target has more than one owning exported target."""
def __init__(self, build_graph):
self._ancestor_iterator = TargetAncestorIterator(build_graph)
@abstractmethod
def is_third_party(self, target):
"""Identifies targets that are exported by third parties.
:param target: The target to identify.
:returns: `True` if the given `target` represents a third party dependency.
"""
@abstractmethod
def is_exported(self, target):
"""Identifies targets of interest that are exported from this project.
:param target: The target to identify.
:returns: `True` if the given `target` represents a top-level target exported from this project.
"""
@abstractmethod
def dependencies(self, target):
"""Returns an iterator over the dependencies of the given target.
:param target: The target to iterate dependencies of.
:returns: An iterator over all of the target's dependencies.
"""
def _walk(self, target, visitor):
"""Walks the dependency graph for the given target.
:param target: The target to start the walk from.
:param visitor: A function that takes a target and returns `True` if its dependencies should
also be visited.
"""
visited = set()
def walk(current):
if current not in visited:
visited.add(current)
keep_going = visitor(current)
if keep_going:
for dependency in self.dependencies(current):
walk(dependency)
walk(target)
def _closure(self, target):
"""Return the target closure as defined by this dependency calculator's definition of a walk."""
closure = set()
def collect(current):
closure.add(current)
return True
self._walk(target, collect)
return closure
def reduced_dependencies(self, exported_target):
"""Calculates the reduced transitive dependencies for an exported target.
The reduced set of dependencies will be just those transitive dependencies "owned" by
the `exported_target`.
A target is considered "owned" if:
1. It's "3rdparty" and "directly reachable" from `exported_target` by at least 1 path.
2. It's not "3rdparty" and not "directly reachable" by any of `exported_target`'s "3rdparty"
dependencies.
Here "3rdparty" refers to targets identified as either `is_third_party` or `is_exported`.
And in this context "directly reachable" means the target can be reached by following a series
of dependency links from the `exported_target`, never crossing another exported target and
staying within the `exported_target` address space. It's the latter restriction that allows for
unambiguous ownership of exportable targets and mirrors the BUILD file convention of targets
only being able to own sources in their filesystem subtree. The single ambiguous case that can
arise is when there is more than one exported target in the same BUILD file family that can
"directly reach" a target in its address space.
:raises: `UnExportedError` if the given `exported_target` is not, in-fact, exported.
:raises: `NoOwnerError` if a transitive dependency is found with no proper owning exported
target.
:raises: `AmbiguousOwnerError` if there is more than one viable exported owner target for a
given transitive dependency.
"""
# The strategy adopted requires 3 passes:
# 1.) Walk the exported target to collect provisional owned exportable targets, but _not_
# 3rdparty since these may be introduced by exported subgraphs we discover in later steps!
# 2.) Determine the owner of each target collected in 1 by walking the ancestor chain to find
# the closest exported target. The ancestor chain is just all targets whose spec path is
# a prefix of th descendant. In other words, all targets in descendant's BUILD file family
# (its siblings), all targets in its parent directory BUILD file family, and so on.
# 3.) Finally walk the exported target once more, replacing each visited dependency with its
# owner.
if not self.is_exported(exported_target):
raise self.UnExportedError('Cannot calculate reduced dependencies for a non-exported '
'target, given: {}'.format(exported_target))
owner_by_owned_python_target = OrderedDict()
def collect_potentially_owned_python_targets(current):
if (current != exported_target) and not self.is_third_party(current):
owner_by_owned_python_target[current] = None # We can't know the owner in the 1st pass.
return (current == exported_target) or not self.is_exported(current)
self._walk(exported_target, collect_potentially_owned_python_targets)
for owned in owner_by_owned_python_target:
if not self.is_exported(owned):
potential_owners = set()
for potential_owner in self._ancestor_iterator.iter_target_siblings_and_ancestors(owned):
if self.is_exported(potential_owner) and owned in self._closure(potential_owner):
potential_owners.add(potential_owner)
if not potential_owners:
raise self.NoOwnerError('No exported target owner found for {}'.format(owned))
owner = potential_owners.pop()
if potential_owners:
ambiguous_owners = [o for o in potential_owners
if o.address.spec_path == owner.address.spec_path]
if ambiguous_owners:
raise self.AmbiguousOwnerError('Owners for {} are ambiguous. Found {} and '
'{} others: {}'.format(owned,
owner,
len(ambiguous_owners),
ambiguous_owners))
owner_by_owned_python_target[owned] = owner
reduced_dependencies = OrderedSet()
def collect_reduced_dependencies(current):
if current == exported_target:
return True
else:
# The provider will be one of:
# 1. `None`, ie: a 3rdparty requirement we should collect.
# 2. `exported_target`, ie: a local exportable target owned by `exported_target` that we
# should collect
# 3. Or else a local exportable target owned by some other exported target in which case
# we should collect the exported owner.
owner = owner_by_owned_python_target.get(current)
if owner is None or owner == exported_target:
reduced_dependencies.add(current)
else:
reduced_dependencies.add(owner)
return owner == exported_target
self._walk(exported_target, collect_reduced_dependencies)
return reduced_dependencies
class SetupPy(PythonTask):
"""Generate setup.py-based Python projects from python_library targets."""
GENERATED_TARGETS = {
PythonAntlrLibrary: PythonAntlrBuilder,
PythonThriftLibrary: PythonThriftBuilder,
}
SOURCE_ROOT = b'src'
@staticmethod
def is_requirements(target):
return isinstance(target, PythonRequirementLibrary)
@staticmethod
def is_python_target(target):
return isinstance(target, PythonTarget)
@classmethod
def has_provides(cls, target):
return cls.is_python_target(target) and target.provides
class DependencyCalculator(ExportedTargetDependencyCalculator):
"""Calculates reduced dependencies for exported python targets."""
def is_third_party(self, target):
return SetupPy.is_requirements(target)
def is_exported(self, target):
return SetupPy.has_provides(target)
def dependencies(self, target):
for dependency in target.dependencies:
yield dependency
if self.is_exported(target):
for binary in target.provided_binaries.values():
yield binary
@classmethod
def register_options(cls, register):
super(SetupPy, cls).register_options(register)
register('--run',
help="The command to run against setup.py. Don't forget to quote any additional "
"parameters. If no run command is specified, pants will by default generate "
"and dump the source distribution.")
register('--recursive', action='store_true',
help='Transitively run setup_py on all provided downstream targets.')
@classmethod
def iter_entry_points(cls, target):
"""Yields the name, entry_point pairs of binary targets in this PythonArtifact."""
for name, binary_target in target.provided_binaries.items():
concrete_target = binary_target
if not isinstance(concrete_target, PythonBinary) or concrete_target.entry_point is None:
raise TargetDefinitionException(target,
'Cannot add a binary to a PythonArtifact if it does not contain an entry_point.')
yield name, concrete_target.entry_point
@classmethod
def declares_namespace_package(cls, filename):
"""Given a filename, walk its ast and determine if it is declaring a namespace package.
Intended only for __init__.py files though it will work for any .py.
"""
with open(filename) as fp:
init_py = ast.parse(fp.read(), filename)
calls = [node for node in ast.walk(init_py) if isinstance(node, ast.Call)]
for call in calls:
if len(call.args) != 1:
continue
if isinstance(call.func, ast.Attribute) and call.func.attr != 'declare_namespace':
continue
if isinstance(call.func, ast.Name) and call.func.id != 'declare_namespace':
continue
if isinstance(call.args[0], ast.Name) and call.args[0].id == '__name__':
return True
return False
@classmethod
def nearest_subpackage(cls, package, all_packages):
"""Given a package, find its nearest parent in all_packages."""
def shared_prefix(candidate):
zipped = itertools.izip(package.split('.'), candidate.split('.'))
matching = itertools.takewhile(lambda pair: pair[0] == pair[1], zipped)
return [pair[0] for pair in matching]
shared_packages = list(filter(None, map(shared_prefix, all_packages)))
return '.'.join(max(shared_packages, key=len)) if shared_packages else package
@classmethod
def find_packages(cls, chroot, log=None):
"""Detect packages, namespace packages and resources from an existing chroot.
:returns: a tuple of:
set(packages)
set(namespace_packages)
map(package => set(files))
"""
base = os.path.join(chroot.path(), cls.SOURCE_ROOT)
packages, namespace_packages = set(), set()
resources = defaultdict(set)
def iter_files():
for root, _, files in safe_walk(base):
module = os.path.relpath(root, base).replace(os.path.sep, '.')
for filename in files:
yield module, filename, os.path.join(root, filename)
# establish packages, namespace packages in first pass
for module, filename, real_filename in iter_files():
if filename != '__init__.py':
continue
packages.add(module)
if cls.declares_namespace_package(real_filename):
namespace_packages.add(module)
# second pass establishes non-source content (resources)
for module, filename, real_filename in iter_files():
if filename.endswith('.py'):
if module not in packages:
# TODO(wickman) Consider changing this to a full-on error as it
# could indicate bad BUILD hygiene.
# raise cls.UndefinedSource('{} is source but does not belong to a package!'.format(filename))
if log:
log.warn('{} is source but does not belong to a package.'.format(real_filename))
else:
continue
submodule = cls.nearest_subpackage(module, packages)
if submodule == module:
resources[submodule].add(filename)
else:
assert module.startswith(submodule + '.')
relative_module = module[len(submodule) + 1:]
relative_filename = os.path.join(relative_module.replace('.', os.path.sep), filename)
resources[submodule].add(relative_filename)
return packages, namespace_packages, resources
@classmethod
def install_requires(cls, reduced_dependencies):
install_requires = OrderedSet()
for dep in reduced_dependencies:
if cls.is_requirements(dep):
for req in dep.payload.requirements:
install_requires.add(str(req.requirement))
elif cls.has_provides(dep):
install_requires.add(dep.provides.key)
return install_requires
def __init__(self, *args, **kwargs):
super(SetupPy, self).__init__(*args, **kwargs)
self._root = get_buildroot()
self._run = self.get_options().run
self._recursive = self.get_options().recursive
def iter_generated_sources(self, target):
# This is sort of facepalmy -- python.new will make this much better.
for target_type, target_builder in self.GENERATED_TARGETS.items():
if isinstance(target, target_type):
builder_cls = target_builder
break
else:
raise TypeError(
'iter_generated_sources could not find suitable code generator for {}'.format(type(target)))
builder = builder_cls(target, self._root, self.context.options)
builder.generate()
for root, _, files in safe_walk(builder.package_root):
for fn in files:
target_file = os.path.join(root, fn)
yield os.path.relpath(target_file, builder.package_root), target_file
def write_contents(self, root_target, reduced_dependencies, chroot):
"""Write contents of the target."""
def write_target_source(target, src):
chroot.link(os.path.join(target.target_base, src), os.path.join(self.SOURCE_ROOT, src))
# check parent __init__.pys to see if they also need to be linked. this is to allow
# us to determine if they belong to regular packages or namespace packages.
while True:
src = os.path.dirname(src)
if not src:
# Do not allow the repository root to leak (i.e. '.' should not be a package in setup.py)
break
if os.path.exists(os.path.join(target.target_base, src, '__init__.py')):
chroot.link(os.path.join(target.target_base, src, '__init__.py'),
os.path.join(self.SOURCE_ROOT, src, '__init__.py'))
def write_codegen_source(relpath, abspath):
chroot.link(abspath, os.path.join(self.SOURCE_ROOT, relpath))
def write_target(target):
if isinstance(target, tuple(self.GENERATED_TARGETS.keys())):
for relpath, abspath in self.iter_generated_sources(target):
write_codegen_source(relpath, abspath)
else:
sources_and_resources = (list(target.payload.sources.relative_to_buildroot()) +
list(target.payload.resources.relative_to_buildroot()))
for rel_source in sources_and_resources:
abs_source_path = os.path.join(get_buildroot(), rel_source)
abs_source_root_path = os.path.join(get_buildroot(), target.target_base)
source_root_relative_path = os.path.relpath(abs_source_path, abs_source_root_path)
write_target_source(target, source_root_relative_path)
write_target(root_target)
for dependency in reduced_dependencies:
if self.is_python_target(dependency) and not dependency.provides:
write_target(dependency)
def write_setup(self, root_target, reduced_dependencies, chroot):
"""Write the setup.py of a target.
Must be run after writing the contents to the chroot.
"""
# NB: several explicit str conversions below force non-unicode strings in order to comply
# with setuptools expectations.
setup_keywords = root_target.provides.setup_py_keywords.copy()
package_dir = {b'': self.SOURCE_ROOT}
packages, namespace_packages, resources = self.find_packages(chroot, self.context.log)
if namespace_packages:
setup_keywords['namespace_packages'] = list(sorted(namespace_packages))
if packages:
setup_keywords.update(
package_dir=package_dir,
packages=list(sorted(packages)),
package_data=dict((str(package), list(map(str, rs)))
for (package, rs) in resources.items()))
setup_keywords['install_requires'] = list(self.install_requires(reduced_dependencies))
for binary_name, entry_point in self.iter_entry_points(root_target):
if 'entry_points' not in setup_keywords:
setup_keywords['entry_points'] = {}
if 'console_scripts' not in setup_keywords['entry_points']:
setup_keywords['entry_points']['console_scripts'] = []
setup_keywords['entry_points']['console_scripts'].append(
'{} = {}'.format(binary_name, entry_point))
# From http://stackoverflow.com/a/13105359
def convert(input):
if isinstance(input, dict):
out = dict()
for key, value in input.items():
out[convert(key)] = convert(value)
return out
elif isinstance(input, list):
return [convert(element) for element in input]
elif isinstance(input, string):
return to_bytes(input)
else:
return input
# Distutils does not support unicode strings in setup.py, so we must
# explicitly convert to binary strings as pants uses unicode_literals.
# Ideally we would write the output stream with an encoding, however,
# pprint.pformat embeds u's in the string itself during conversion.
# For that reason we convert each unicode string independently.
#
# hoth:~ travis$ python
# Python 2.6.8 (unknown, Aug 25 2013, 00:04:29)
# [GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin
# Type "help", "copyright", "credits" or "license" for more information.
# >>> import pprint
# >>> data = {u'entry_points': {u'console_scripts': [u'pants = pants.bin.pants_exe:main']}}
# >>> pprint.pformat(data, indent=4)
# "{ u'entry_points': { u'console_scripts': [ u'pants = pants.bin.pants_exe:main']}}"
# >>>
#
# For more information, see http://bugs.python.org/issue13943
chroot.write(SETUP_BOILERPLATE.format(
setup_dict=pprint.pformat(convert(setup_keywords), indent=4),
setup_target=repr(root_target)
), 'setup.py')
# make sure that setup.py is included
chroot.write('include *.py'.encode('utf8'), 'MANIFEST.in')
def create_setup_py(self, target, dist_dir):
chroot = Chroot(dist_dir, name=target.provides.name)
dependency_calculator = self.DependencyCalculator(self.context.build_graph)
reduced_deps = dependency_calculator.reduced_dependencies(target)
self.write_contents(target, reduced_deps, chroot)
self.write_setup(target, reduced_deps, chroot)
target_base = '{}-{}'.format(target.provides.name, target.provides.version)
setup_dir = os.path.join(dist_dir, target_base)
safe_rmtree(setup_dir)
shutil.move(chroot.path(), setup_dir)
return setup_dir, reduced_deps
def execute(self):
targets = [target for target in self.context.target_roots if self.has_provides(target)]
if not targets:
raise TaskError('setup-py target(s) must provide an artifact.')
dist_dir = self.get_options().pants_distdir
# NB: We have to create and then run in 2 steps so that we can discover all exported targets
# in-play in the creation phase which then allows a tsort of these exported targets in the run
# phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
# exported target that depends on it is uploaded.
created = {}
def create(target):
if target not in created:
self.context.log.info('Creating setup.py project for {}'.format(target))
setup_dir, dependencies = self.create_setup_py(target, dist_dir)
created[target] = setup_dir
if self._recursive:
for dep in dependencies:
if self.has_provides(dep):
create(dep)
for target in targets:
create(target)
executed = [] # Collected and returned for tests.
for target in reversed(sort_targets(created.keys())):
setup_dir = created.get(target)
if setup_dir:
if not self._run:
self.context.log.info('Running packager against {}'.format(setup_dir))
setup_runner = Packager(setup_dir)
tgz_name = os.path.basename(setup_runner.sdist())
self.context.log.info('Writing {}'.format(os.path.join(dist_dir, tgz_name)))
shutil.move(setup_runner.sdist(), os.path.join(dist_dir, tgz_name))
safe_rmtree(setup_dir)
else:
self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
setup_runner = SetupPyRunner(setup_dir, self._run)
setup_runner.run()
executed.append(target)
return executed
|
|
#!/usr/bin/python !/usr/bin/env python
# -*- coding: utf-8 -*
# Functions to extract knowledge from medical text. Everything related to
# extraction needed for the knowledge base. Also, some wrappers for SemRep,
# MetaMap and Reverb. Contains some enrichment routines for utilizing UTS
# services.
import json
import subprocess
import urllib2
import pymongo
import numpy as np
from nltk.tokenize import sent_tokenize
from config import settings
from pymetamap import MetaMap
from utilities import time_log, get_concept_from_cui, get_concept_from_source
from itertools import product
from multiprocessing import cpu_count, Pool
from unidecode import unidecode
def metamap_wrapper(text):
"""
Function-wrapper for metamap binary. Extracts concepts
found in text.
!!!! REMEMBER TO START THE METAMAP TAGGER AND
WordSense DISAMBIGUATION SERVER !!!!
Input:
- text: str,
a piece of text or sentence
Output:
- a dictionary with key sents and values
a list of the concepts found
"""
# Tokenize into sentences
sents = sent_tokenize(text)
# Load Metamap Instance
mm = MetaMap.get_instance(settings['load']['path']['metamap'])
concepts, errors = mm.extract_concepts(sents, range(len(sents)))
# Keep the sentence ids
ids = np.array([int(concept[0]) for concept in concepts])
sentences = []
for i in xrange(len(sents)):
tmp = {'sent_id': i+1, 'entities': [], 'relations': []}
# Wanted concepts according to sentence
wanted = np.where(ids == i)[0].tolist()
for w_ind in wanted:
w_conc = concepts[w_ind]
if hasattr(w_conc, 'cui'):
tmp_conc = {'label': w_conc.preferred_name, 'cui': w_conc.cui,
'sem_types': w_conc.semtypes, 'score': w_conc.score}
tmp['entities'].append(tmp_conc)
sentences.append(tmp)
if errors:
time_log('Errors with extracting concepts!')
time_log(errors)
return {'sents': sentences, 'sent_text':text}
def runProcess(exe, working_dir):
"""
Function that opens a command line and runs a command.
Captures the output and returns.
Input:
- exe: str,
string of the command to be run. ! REMEMBER TO ESCAPE CHARS!
- working_dir: str,
directory where the cmd should be executed
Output:
- lines: list,
list of strings generated from the command
"""
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=working_dir, shell=True)
lines = p.stdout.readlines()
return lines
def stopw_removal(inp, stop):
"""
Stopwords removal in line of text.
Input:
- inp: str,
string of the text input
- stop: list,
list of stop-words to be removed
"""
# Final string to be returned
final = ''
for w in inp.lower().split():
if w not in stop:
final += w + ' '
# Remove last whitespace that was added ' '
final = final[:-1]
return final
def create_text_batches(text, N=5000, buffer_ = 100):
"""
Function that takes a long string and split it into
batches of approximately length N. The actual length
of each batch differs, as each batch end in the next
dot found in the string after the N chars.
Input:
- text: str,
piece of text to clean
- N: int,
split into strings of 5000 characters each
Output:
- chunks: list,
list containing the string parts
"""
M = len(text)
chunks_num = M // N
if M % N != 0:
chunks_num += 1
chunks = []
end_ind = 0
start_ind = 0
i = 0
while i < chunks_num:
start_ind = end_ind
prob_text = text[start_ind + N: start_ind + N + buffer_]
if '.' in prob_text:
end_ind = start_ind + N + prob_text.index('.')+1
else:
end_ind = start_ind + N
chunks.append(text[start_ind:end_ind])
i += 1
chunks = [ch for ch in chunks if ch]
return chunks
def reverb_wrapper(text, stop=None):
"""
Function-wrapper for ReVerb binary. Extracts relations
found in text.
Input:
- text: str,
a piece of text or sentence
- stop: list,
list of stopwords to remove from the relations
Output:
- total: list,
list of lists. Each inner list contains one relation in the form
[subject, predicate, object]
"""
total = []
for sent in sent_tokenize(text):
cmd = 'echo "' + sent + '"' "| ./reverb -q | tr '\t' '\n' | cat -n"
reverb_dir = settings['load']['path']['reverb']
result = runProcess(cmd, reverb_dir)
# Extract relations from reverb output
result = result[-3:]
result = [row.split('\t')[1].strip('\n') for row in result]
# Remove common stopwords from relations
if stop:
result = [stopw_removal(res, stop) for res in result]
total.append(result)
# Remove empty relations
total = [t for t in total if t]
return total
def cui_to_uri(api_key, cui):
"""
Function to map from cui to uri if possible. Uses biontology portal
Input:
- api_key: str,
api usage key change it in setting.yaml
- cui: str,
cui of the entity we wish to map the uri
Output:
- the uri found in string format or None
"""
REST_URL = "http://data.bioontology.org"
annotations = get_json_with_api(api_key, REST_URL + "/search?include_properties=true&q=" + urllib2.quote(cui))
try:
return annotations['collection'][0]['@id']
except Exception, e:
time_log(Exception)
time_log(e)
return None
def get_json_with_api(api_key, url):
"""
Helper funtion to retrieve a json from a url through urlib2
Input:
- api_key: str,
api usage key change it in setting.yaml
- url: str,
url to curl
Output:
- json-style dictionary with the curl results
"""
opener = urllib2.build_opener()
opener.addheaders = [('Authorization', 'apikey token=' + api_key)]
return json.loads(opener.open(url).read())
def threshold_concepts(concepts, hard_num=3, score=None):
"""
Thresholding concepts from metamap to keep only the most probable ones.
Currently supporting thresholding on the first-N (hard_num) or based on
the concept score.
Input:
- concepts: list,
list of Metamap Class concepts
- hard_num: int,
the first-N concepts to keep, if this thresholidng is selected
- score: float,
lowest accepted concept score, if this thresholidng is selected
"""
if hard_num:
if hard_num >= len(concepts):
return concepts
elif hard_num < len(concepts):
return concepts[:hard_num]
elif score:
return [c for c in concepts if c.score > score]
else:
return concepts
def get_name_concept(concept):
"""
Get name from the metamap concept. Tries different variations and
returns the name found.
Input:
- concept: Metamap class concept, as generated from mmap_extract
for example
Output:
- name: str,
the name found for this concept
"""
name = ''
if hasattr(concept, 'preferred_name'):
name = concept.preferred_name
elif hasattr(concept, 'long_form') and hasattr(concept, 'short_form'):
name = concept.long_form + '|' + concept.short_form
elif hasattr(concept, 'long_form'):
name = concept.long_form
elif hasattr(concept, 'short_form'):
name = concept.short_form
else:
name = 'NO NAME IN CONCEPT'
return name
def metamap_ents(x):
"""
Function to get entities in usable form.
Exctracts metamap concepts first, thresholds them and
tries to extract names and uris for the concepts to be
more usable.
Input:
- x: str,
sentence to extract entities
Output:
- ents: list,
list of entities found. Each entity is a dictionary with
fields id (no. found in sentence), name if retrieved, cui if
available and uri if found
"""
# API KEY to biontology mapping from cui to uri
API_KEY = settings['apis']['biont']
concepts = mmap_extract(x)
concepts = threshold_concepts(concepts)
ents = []
for i, concept in enumerate(concepts):
ent = {}
ent['ent_id'] = i
ent['name'] = get_name_concept(concept)
if hasattr(concept, 'cui'):
ent['cui'] = concept.cui
ent['uri'] = cui_to_uri(API_KEY, ent['cui'])
else:
ent['cui'] = None
ent['uri'] = None
ents.append(ent)
return ents
def extract_entities(text, json_={}):
"""
Extract entities from a given text using metamap and
generate a json, preserving infro regarding the sentence
of each entity that was found. For the time being, we preserve
both concepts and the entities related to them
Input:
- text: str,
a piece of text or sentence
- json_: dic,
sometimes the json to be returned is given to us to be enriched
Defaults to an empty json_
Output:
- json_: dic,
json with fields text, sents, concepts and entities
containg the final results
"""
json_['text'] = text
# Tokenize the text
sents = sent_tokenize(text)
json_['sents'] = [{'sent_id': i, 'sent_text': sent} for i, sent in enumerate(sents)]
json_['concepts'], _ = mmap_extract(text)
json_['entities'] = {}
for i, sent in enumerate(json_['sents']):
ents = metamap_ents(sent)
json_['entities'][sent['sent_id']] = ents
return json_
def extract_metamap(json_, key):
"""
Task function to parse and extract concepts from json_ style dic, using
the MetaMap binary.
Input:
- json_ : dic,
json-style dictionary generated from the Parse object related
to the specific type of input
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- json_ : dic,
the previous json-style dictionary enriched with medical concepts
"""
# outerfield for the documents in json
docfield = settings['out']['json']['itemfield']
# textfield to read text from
textfield = settings['out']['json']['json_text_field']
N = len(json_[docfield])
for i, doc in enumerate(json_[docfield]):
text = clean_text(doc[textfield])
if len(text) > 5000:
chunks = create_text_batches(text)
results = {'text': text, 'sents': []}
sent_id = 0
for chunk in chunks:
tmp = metamap_wrapper(chunk)
for sent in tmp['sents']:
sent['sent_id'] = sent_id
sent_id += 1
results['sents'].append(sent)
else:
results = metamap_wrapper(text)
json_[docfield][i].update(results)
proc = int(i/float(N)*100)
if proc % 10 == 0 and proc > 0:
time_log('We are at %d/%d documents -- %0.2f %%' % (i, N, proc))
return json_
def enrich_with_triples(results, subject, pred='MENTIONED_IN'):
"""
Enrich with rdf triples a json dictionary in the form of:
entity-URI -- MENTIONED_IN -- 'Text 'Title'. Only entities with
uri's are considered.
Input:
- results: dic,
json-style dictionary genereated from the extract_entities function
- subject: str,
the name of the text document in which the entities are mentioned
- pred: str,
the predicate to be used as a link between the uri and the title
Output:
- results: dic,
the same dictionary with one more
"""
triples = []
for sent_key, ents in results['entities'].iteritems():
for ent in ents:
if ent['uri']:
triples.append({'subj': ent['uri'], 'pred': pred, 'obj': subject})
results['triples'] = triples
return results
def force_to_unicode(text):
"If text is unicode, it is returned as is. If it's str, convert it to Unicode using UTF-8 encoding"
return text if isinstance(text, unicode) else text.decode('utf8', 'ignore')
def toAscii_wrapper(text):
"""
Function wrapper for Lexical Tool toAscii:
https://lexsrv3.nlm.nih.gov/LexSysGroup/Projects/lvg/current/docs/userDoc/tools/toAscii.html
Converts input to ascii ready for SemRep
Input:
- text: str,
a piece of text or sentence'
Output:
- text: str,
the same text with changes
"""
text = clean_text(text)
#text = repr(text)
cmd = 'echo "' + text + '" | ./toAscii'
toAscii_dir = settings['load']['path']['toAscii']
lines = runProcess(cmd, toAscii_dir)
return lines[0]
def semrep_wrapper(text):
"""
Function wrapper for SemRep binary. It is called with flags
-F only and changing this will cause this parsing to fail, cause
the resulting lines won't have the same structure.
Input:
- text: str,
a piece of text or sentence
Output:
- results: dic,
jston-style dictionary with fields text and sents. Each
sentence has entities and relations found in it. Each entity and
each relation has attributes denoted in the corresponding
mappings dictionary.
"""
# Exec the binary
# THIS SHOULD FIX ENCODING PROBLEMS???
text = clean_text(text)
utf8 = force_to_unicode(text)
text = unidecode(utf8)
# text = toAscii_wrapper(text)
# THIS IS NEEDED FOR ANY ARTIFACTS!
text = repr(text)
cmd = "echo " + text + " | ./semrep.v1.7 -L 2015 -Z 2015AA -F"
#print cmd
semrep_dir = settings['load']['path']['semrep']
lines = runProcess(cmd, semrep_dir)
#print(lines)
# mapping of line elements to fields
mappings = {
"text": {
"sent_id": 4,
"sent_text": 6
},
"entity": {
'cuid': 6,
'label': 7,
'sem_types': 8,
'score': 15
},
"relation": {
'subject__cui': 8,
'subject__label': 9,
'subject__sem_types': 10,
'subject__sem_type': 11,
'subject__score': 18,
'predicate__type': 21,
'predicate': 22,
'negation': 23,
'object__cui': 28,
'object__label': 29,
'object__sem_types': 30,
'object__sem_type': 31,
'object__score': 38,
}
}
results = {'sents': [], 'text': text}
for line in lines:
# If Sentence
if line.startswith('SE'):
##### DEPRECATED AS IN CLEAN TEXT WE REMOVE TABS FROM TEXT #######
# Temporary workaround to remove read |-delimited semrep output
# Without mixing up tabs contained in the text
# line = line.replace('\|', '!@#$')
# elements = line.split('|')
# elements = [el.replace('!@#$', '\|') for el in elements]
######################### DEPRECATED ###########################
elements = line.split('|')
# New sentence that was processed
if elements[5] == 'text':
tmp = {"entities": [], "relations": []}
for key, ind in mappings['text'].iteritems():
tmp[key] = elements[ind]
results['sents'].append(tmp)
# A line containing entity info
if elements[5] == 'entity':
tmp = {}
for key, ind in mappings['entity'].iteritems():
if key == 'sem_types':
tmp[key] = elements[ind].split(',')
tmp[key] = elements[ind]
results['sents'][-1]['entities'].append(tmp)
# A line containing relation info
if elements[5] == 'relation':
tmp = {}
for key, ind in mappings['relation'].iteritems():
if 'sem_types' in key:
tmp[key] = elements[ind].split(',')
else:
tmp[key] = elements[ind]
results['sents'][-1]['relations'].append(tmp)
return results
def clean_text(text):
"""
Escape specific characters for command line call of SemRep. This
could be updated in the future to more sophisticated transformations.
Input:
- text: str,
piece of text to clean
Output:
- text: str,
the same text with cmd escaped parenthesis and removing '
"""
replace_chars = [('(', ' '), (')', ' '), ("'", ' '), ('\n', " "), ('\t', ' '), (';', " "),
("}", " "), ("{", " "), ("|", " "), ("&", " "), ("/", ' ')]
for unw_pair in replace_chars:
text = text.replace(unw_pair[0], unw_pair[1])
text = ' '.join(text.split())
return text
def extract_semrep(json_, key):
"""
Task function to parse and extract concepts from json_ style dic, using
the SemRep binary.
Input:
- json_ : dic,
json-style dictionary generated from the Parse object related
to the specific type of input
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- json_ : dic,
the previous json-style dictionary enriched with medical concepts
"""
# outerfield for the documents in json
if key == 'mongo':
key = 'json'
docfield = settings['out']['json']['itemfield']
# textfield to read text from
textfield = settings['out']['json']['json_text_field']
N = len(json_[docfield])
for i, doc in enumerate(json_[docfield]):
print doc['id']
text = doc[textfield]
if len(text) > 5000:
chunks = create_text_batches(text)
results = {'text': text, 'sents': []}
sent_id = 0
c = 0
for chunk in chunks:
c += 1
tmp = semrep_wrapper(chunk)
for sent in tmp['sents']:
sent['sent_id'] = sent_id
sent_id += 1
results['sents'].append(sent)
else:
results = semrep_wrapper(text)
json_[docfield][i].update(results)
proc = int(i/float(N)*100)
if proc % 10 == 0 and proc > 0:
time_log('We are at %d/%d documents -- %0.2f %%' % (i, N, proc))
return json_
def extract_semrep_parallel(json_, key):
"""
Task function to parse and extract concepts from json_ style dic, using
the SemRep binary. It uses multiprocessing for efficiency.
Input:
- json_ : dic,
json-style dictionary generated from the Parse object related
to the specific type of input
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- json_ : dic,
the previous json-style dictionary enriched with medical concepts
"""
# outerfield for the documents in json
docfield = settings['out']['json']['itemfield']
N = len(json_[docfield])
try:
N_THREADS = int(settings['num_cores'])
except:
N_THREADS = cpu_count()
batches = chunk_document_collection(json_[docfield], N_THREADS)
len_col = " | ".join([str(len(b)) for b in batches])
time_log('Will break the collection into batches of: %s documents!' % len_col)
batches = [{docfield: batch} for batch in batches]
data = zip(batches, [key for batch in batches])
pool = Pool(N_THREADS, maxtasksperchild=1)
res = pool.map(semrep_parallel_worker, data)
pool.close()
pool.join()
del pool
tmp = {docfield: []}
for batch_res in res:
tmp[docfield].extend(batch_res[docfield])
for i, sub_doc in enumerate(json_[docfield]):
for sub_doc_new in tmp[docfield]:
if sub_doc_new['id'] == sub_doc['id']:
json_[docfield][i].update(sub_doc_new)
break
time_log('Completed multiprocessing extraction!')
return json_
def chunk_document_collection(seq, num):
"""
Helper function to break a collection of N = len(seq) documents
to num batches.
Input:
- seq: list,
a list of documents
- num: int,
number of batches to be broken into. This will usually be
equal to the number of cores available
Output:
- out: list,
a list of lists. Each sublist contains the batch-collection
of documents to be used.
"""
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def semrep_parallel_worker((json_, key)):
"""
Just a worker interface for the different SemRep
executions.
Input:
- json_ : dic,
json-style dictionary generated from the Parse object related
to the specific type of input
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- res : dic,
the previous json-style dictionary enriched with medical concepts
"""
res = extract_semrep(json_, key)
return res
def get_concepts_from_edges_parallel(json_, key):
"""
Same work as the get_concepts_from_edges_paralle. It uses multiprocessing
for efficiency.
Input:
- json: dict,
json-style dictionary with a field containing
relations
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- json: dict,
the updated json-style dictionary where the relations
in the list have been updated and each subject-object has been
mapped to the according
"""
outfield = settings['load'][key]['itemfield']
N = len(json_[outfield])
try:
N_THREADS = int(settings['num_cores'])
except:
N_THREADS = cpu_count()
batches = chunk_document_collection(json_[outfield], N_THREADS)
len_col = " | ".join([str(len(b)) for b in batches])
time_log('Will break the edges into batches of: %s documents!' % len_col)
batches = [{outfield: batch} for batch in batches]
data = zip(batches, [key for batch in batches])
pool = Pool(N_THREADS, maxtasksperchild=1)
res = pool.map(edges_parallel_worker, data)
pool.close()
pool.join()
del pool
json_ = {outfield: []}
for batch_res in res:
json_[outfield].extend(batch_res[outfield])
time_log('Completed multiprocessing extraction!')
return json_
def edges_parallel_worker((json_, key)):
"""
Just a worker interface for the parallel enrichment
executions.
Input:
- json_ : dic,
json-style dictionary generated from the Parse object related
to the specific type of input
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- res : dic,
expected outcome of get_concepts_from_edges
"""
res = get_concepts_from_edges(json_, key)
return res
def get_concepts_from_edges(json_, key):
"""
Get concept-specific info related to an entity from a list
containing relations. Each subject-object in the relations
list is expressed in a another data source(MESH, DRUGBANK etc)
and their unique identifier is provided. Also, articles and new
kinde of sub-obj are handled.
Input:
- json: dict,
json-style dictionary with a field containing
relations
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- json: dict,
the updated json-style dictionary where the relations
in the list have been updated and each subject-object has been
mapped to the according
"""
# docfield containing list of elements containing the relations
outfield = settings['load'][key]['itemfield']
# field containing the type of the node for the subject
sub_type = settings['load'][key]['sub_type']
# field containing the source of the node for the subject
sub_source = settings['load'][key]['sub_source']
# field containing the type of the node for the object
obj_type = settings['load'][key]['obj_type']
# field containing the source of the node for the object
obj_source = settings['load'][key]['obj_source']
new_relations = []
uri = settings['load']['mongo']['uri']
db_name = settings['load']['mongo']['db']
collection_name = settings['load']['mongo']['cache_collection']
client = pymongo.MongoClient(uri)
db = client[db_name]
collection = db[collection_name]
cur = collection.find({})
cache = {}
for item in cur:
cache[item['key']] = item['value']
N = len(json_[outfield])
for ii, triple in enumerate(json_[outfield]):
print triple
try:
if sub_source == 'UMLS':
if not(triple['s'] in cache):
ent = get_concept_from_cui(triple['s'])
cache[triple['s']] = ent
collection.insert_one({'key':triple['s'],'value':ent})
print 'INSERTED in UMLS %s' % triple['s']
else:
ent = cache[triple['s']]
if (type(ent['sem_types']) == list and len(ent['sem_types']) > 1):
sem_types = ';'.join(ent['sem_types'])
elif (',' in ent['sem_types']):
sem_types = ';'.join(ent['sem_types'].split(','))
else:
sem_types = ent['sem_types']
triple_subj = [{'id:ID': ent['cuid'],
'label': ent['label'],
'sem_types:string[]': sem_types}]
elif (sub_source == 'PMC') or (sub_source == 'TEXT') or (sub_source == 'None'):
triple_subj = [{'id:ID': triple['s']}]
else:
if not(triple['s'] in cache):
ents = get_concept_from_source(triple['s'], sub_source)
cache[triple['s']] = ents
collection.insert_one({'key':triple['s'],'value':ents})
print 'INSERTED in other %s' % triple['s']
else:
ents = cache[triple['s']]
triple_subj = []
for ent in ents:
if (type(ent['sem_types']) == list and len(ent['sem_types']) > 1):
sem_types = ';'.join(ent['sem_types'])
elif (',' in ent['sem_types']):
sem_types = ';'.join(ent['sem_types'].split(','))
else:
sem_types = ent['sem_types']
triple_subj.append({'id:ID': ent['cuid'],
'label': ent['label'],
'sem_types:string[]': sem_types})
if obj_source == 'UMLS':
if not(triple['o'] in cache):
ent = get_concept_from_cui(triple['o'])
cache[triple['o']] = ent
collection.insert_one({'key':triple['o'],'value':ent})
print 'INSERTED in UMLS %s' % triple['o']
else:
ent = cache[triple['o']]
if (type(ent['sem_types']) == list and len(ent['sem_types']) > 1):
sem_types = ';'.join(ent['sem_types'])
elif (',' in ent['sem_types']):
sem_types = ';'.join(ent['sem_types'].split(','))
else:
sem_types = ent['sem_types']
triple_obj = [{'id:ID': ent['cuid'],
'label': ent['label'],
'sem_types:string[]': sem_types}]
elif (obj_source == 'PMC') or (obj_source == 'TEXT') or (obj_source == 'None'):
triple_obj = [{'id:ID': triple['o']}]
else:
if not(triple['o'] in cache):
ents = get_concept_from_source(triple['o'], obj_source)
cache[triple['o']] = ents
collection.insert_one({'key':triple['o'],'value':ents})
print 'INSERTED in other %s' % triple['o']
else:
ents = cache[triple['o']]
triple_obj = []
for ent in ents:
if (type(ent['sem_types']) == list and len(ent['sem_types']) > 1):
sem_types = ';'.join(ent['sem_types'])
elif (',' in ent['sem_types']):
sem_types = ';'.join(ent['sem_types'].split(','))
else:
sem_types = ent['sem_types']
triple_obj.append({'id:ID': ent['cuid'],
'label': ent['label'],
'sem_types:string[]': sem_types})
combs = product(triple_subj, triple_obj)
for comb in combs:
new_relations.append({'s':comb[0], 'p':triple['p'], 'o':comb[1]})
except Exception, e:
time_log(e)
time_log('S: %s | P: %s | O: %s' % (triple['s'],triple['p'],triple['o']))
time_log('Skipped the above edge! Probably due to concept-fetching errors!')
proc = int(ii/float(N)*100)
if proc % 10 == 0 and proc > 0:
time_log('We are at %d/%d edges transformed -- %0.2f %%' % (ii, N, proc))
# if ii % 100 == 0 and ii > 9:
# time_log("Edges Transformation Process: %d -- %0.2f %%" % (ii, 100*ii/float(len(json_[outfield]))))
json_[outfield] = new_relations
return json_
|
|
#!/usr/bin/env python3
import math
import unittest
import vectors
class TestVector(unittest.TestCase):
def setUp(self):
super().setUp()
self.v = vectors.Vector(4, -6, 7, 2.4, 10)
self.u = vectors.Vector(2, 3, -0.5, 4, 3)
def tearDown(self):
super().tearDown()
# methods
def test_same_length_true(self):
self.assertTrue(vectors.Vector._same_length(
self.v, self.u, [1,2,3,4,5], (9,8,7,6,5)))
def test_same_length_false(self):
self.assertFalse(vectors.Vector._same_length(
self.v, self.u, [1,2,3], (9,8,7,6,5)))
# products
def test_scalar_product_vector_length_error(self):
with self.assertRaises(vectors.VectorLengthError):
vectors.Vector.scalar_product(self.v,
vectors.Vector(1, 2, 3))
def test_scalar_product_vector_vector(self):
self.assertEqual(vectors.Vector.scalar_product(self.v, self.u), 26.1)
def test_scalar_product_vector_tuple(self):
self.assertEqual(vectors.Vector.scalar_product(self.v,
(5, 4, 3, 2, 1)), 31.8)
def test_scalar_product_tuple_vector(self):
self.assertEqual(vectors.Vector.scalar_product((5, 4, 3, 2, 1),
self.v), 31.8)
def test_scalar_product_vector_list(self):
self.assertEqual(vectors.Vector.scalar_product(self.u,
[5, 4, 3, 2, 1]), 31.5)
def test_scalar_product_list_vector(self):
self.assertEqual(vectors.Vector.scalar_product([5, 4, 3, 2, 1],
self.u), 31.5)
def test_dot_vector_vector(self):
self.assertEqual(vectors.Vector.dot(self.v, self.u), 26.1)
def test_dot_vector_tuple(self):
self.assertEqual(vectors.Vector.dot(self.v, (5, 4, 3, 2, 1)), 31.8)
def test_dot_tuple_vector(self):
self.assertEqual(vectors.Vector.dot((5, 4, 3, 2, 1), self.v), 31.8)
def test_dot_vector_list(self):
self.assertEqual(vectors.Vector.dot(self.u, [5, 4, 3, 2, 1]), 31.5)
def test_dot_list_vector(self):
self.assertEqual(vectors.Vector.dot([5, 4, 3, 2, 1], self.u), 31.5)
def test_vector_mul_vector(self):
self.assertEqual(self.v * self.u, 26.1)
def test_vector_mul_tuple(self):
self.assertEqual(self.v * (5, 4, 3, 2, 1), 31.8)
def test_tuple_mul_vector(self):
self.assertEqual((5, 4, 3, 2, 1) * self.v, 31.8)
def test_vector_mul_list(self):
self.assertEqual(self.u * [5, 4, 3, 2, 1], 31.5)
def test_list_mul_vector(self):
self.assertEqual([5, 4, 3, 2, 1] * self.u, 31.5)
def test_vector_mul_scalar(self):
self.assertEqual(self.v * 5, vectors.Vector(20, -30, 35, 12, 50))
def test_scalar_mul_vector(self):
self.assertEqual(3 * self.u, vectors.Vector(6, 9, -1.5, 12, 9))
def test_ensure_scalar_commutes_mul(self):
self.assertEqual(self.u * 10.3, 10.3 * self.u)
# exponential
def test_squaring(self):
self.assertEqual(self.v**2, self.v * self.v)
def test_fractional_powers(self):
self.assertEqual(round(self.u**5.2, 7),
round(math.sqrt(self.u * self.u)**5.2, 7))
def test_negative_power(self):
self.assertEqual(self.v**(-3), 1 / self.v**3)
# vector length
def test_magnitude(self):
self.assertEqual(self.v.magnitude(), math.sqrt(206.76))
def test_length(self):
self.assertEqual(self.u.length(), math.sqrt(38.25))
# division
def test_vector_div_integer_scalar(self):
self.assertEqual(self.v / 2, vectors.Vector(2.0, -3.0, 3.5, 1.2, 5.0))
def test_vector_div_decimal_scalar(self):
self.assertEqual(self.u / 2.5, vectors.Vector(0.8, 1.2, -0.2, 1.6, 1.2))
def test_vector_div_vector(self):
with self.assertRaises(TypeError):
self.v / self.u
def test_unit_vector(self):
self.assertEqual(self.v.unit(), self.v / self.v.magnitude())
# uniary
def test_pos_vector(self):
self.assertEqual(+self.v, self.v)
def test_neg_vector(self):
self.assertEqual(-self.u, vectors.Vector(-2, -3, 0.5, -4, -3))
# addition
def test_add_vector_length(self):
with self.assertRaises(vectors.VectorLengthError):
self.v + vectors.Vector(1, 2, 3)
def test_vector_add_vector(self):
self.assertEqual(self.v + self.u,
vectors.Vector(6, -3, 6.5, 6.4, 13))
def test_vector_add_tuple(self):
self.assertEqual(self.v + (6, 6, 3, -2.4, 0),
vectors.Vector(10, 0, 10, 0, 10))
def test_tuple_add_vector(self):
self.assertEqual((6, 6, 3, -2.4, 0) + self.v,
vectors.Vector(10, 0, 10, 0, 10))
def test_vector_add_list(self):
self.assertEqual(self.u + [8, -3, 0.5, 6, 7],
vectors.Vector(10, 0, 0, 10, 10))
def test_list_add_vector(self):
self.assertEqual([8, -3, 0.5, 6, 7] + self.u,
vectors.Vector(10, 0, 0, 10, 10))
# subtraction
def test_sub_vector_length(self):
with self.assertRaises(vectors.VectorLengthError):
self.v - vectors.Vector(1, 2, 3)
def test_vector_sub_vector(self):
self.assertEqual(self.v - self.u,
vectors.Vector(2, -9, 7.5, -1.6, 7))
def test_vector_sub_tuple(self):
self.assertEqual(self.v - (4, 4, 7, -2.4, 0),
vectors.Vector(0, -10, 0, 4.8, 10))
def test_tuple_sub_vector(self):
self.assertEqual((4, 4, 7, -2.4, 0) - self.v,
vectors.Vector(0, 10, 0, -4.8, -10))
def test_vector_sub_list(self):
self.assertEqual(self.u - [2, -3, 0.5, 6, 7],
vectors.Vector(0, 6, -1, -2, -4))
def test_list_sub_vector(self):
self.assertEqual([2, -3, 0.5, 6, 7] - self.u,
vectors.Vector(0, -6, 1, 2, 4))
def test_dimension(self):
self.assertEqual(self.v.dimension(), 5)
class TestVector3(unittest.TestCase):
def setUp(self):
super().setUp()
self.v = vectors.Vector3(1, 2, 3)
self.u = vectors.Vector3(7, 4, 2)
self.w = vectors.Vector3(2, 2*math.sqrt(3), 3)
def tearDown(self):
super().tearDown()
# accessors
def test_get_x(self):
self.assertEqual(self.u.x, 7)
def test_get_y(self):
self.assertEqual(self.u.y, 4)
def test_get_z(self):
self.assertEqual(self.u.z, 2)
# products
def test_scalar_product_vector3_length(self):
with self.assertRaises(vectors.VectorLengthError):
vectors.Vector3.scalar_product(
self.v, vectors.Vector(5, 4, 3, 2, 1))
def test_scalar_product_vector3_vector(self):
self.assertEqual(vectors.Vector3.scalar_product(
self.v, vectors.Vector(3, 2, 1)), 10)
# vector product
def test_vector_product(self):
self.assertEqual(vectors.Vector3.vector_product(self.v, self.u),
vectors.Vector3(-8, 19, -10))
def test_cross(self):
self.assertEqual(vectors.Vector3.cross(self.v, self.u),
vectors.Vector3(-8, 19, -10))
def test_vector_product_anticommutes(self):
self.assertEqual(vectors.Vector3.vector_product(self.v, self.u),
-vectors.Vector3.vector_product(self.u, self.v))
# addition
def test_vector3_add_vector(self):
self.assertEqual(self.u + vectors.Vector(3, 2, 1),
vectors.Vector3(10, 6, 3))
# subtraction
def test_vector3_sub_vector(self):
self.assertEqual(self.u - vectors.Vector(3, 2, 1),
vectors.Vector3(4, 2, 1))
# properties
def test_spherical_polar_radius(self):
self.assertEqual(self.w.r, 5)
def test_azimuthal_angle(self):
self.assertEqual(round(self.w.phi * 180/math.pi, 7), 60)
def test_zenith_angle(self):
self.assertEqual(math.cos(self.w.theta), 3/5)
def test_cylindrical_polar_radius(self):
self.assertEqual(self.w.rho, 4)
def test_dimension(self):
self.assertEqual(self.v.dimension(), 3)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# changeip script for calendar server
#
# Copyright (c) 2005-2017 Apple Inc. All Rights Reserved.
#
# IMPORTANT NOTE: This file is licensed only for use on Apple-labeled
# computers and is subject to the terms and conditions of the Apple
# Software License Agreement accompanying the package this file is a
# part of. You may not port this file to another platform without
# Apple's written consent.
from __future__ import print_function
from __future__ import with_statement
import datetime
from getopt import getopt, GetoptError
import os
import plistlib
import subprocess
import sys
SERVER_APP_ROOT = "/Applications/Server.app/Contents/ServerRoot"
CALENDARSERVER_CONFIG = "%s/usr/sbin/calendarserver_config" % (SERVER_APP_ROOT,)
def serverRootLocation():
"""
Return the ServerRoot value from the servermgr_calendar.plist. If not
present, return the default.
"""
plist = "/Library/Server/Preferences/Calendar.plist"
serverRoot = u"/Library/Server/Calendar and Contacts"
if os.path.exists(plist):
serverRoot = plistlib.readPlist(plist).get("ServerRoot", serverRoot)
return serverRoot
def usage():
name = os.path.basename(sys.argv[0])
print("Usage: %s [-hv] old-ip new-ip [old-hostname new-hostname]" % (name,))
print(" Options:")
print(" -h - print this message and exit")
print(" -f <file> - path to config file")
print(" Arguments:")
print(" old-ip - current IPv4 address of the server")
print(" new-ip - new IPv4 address of the server")
print(" old-hostname - current FQDN for the server")
print(" new-hostname - new FQDN for the server")
def log(msg):
serverRoot = serverRootLocation()
logDir = os.path.join(serverRoot, "Logs")
logFile = os.path.join(logDir, "changeip.log")
try:
timestamp = datetime.datetime.now().strftime("%b %d %H:%M:%S")
msg = "changeip_calendar: %s %s" % (timestamp, msg)
with open(logFile, 'a') as output:
output.write("%s\n" % (msg,))
except IOError:
# Could not write to log
pass
def main():
name = os.path.basename(sys.argv[0])
# Since the serveradmin command must be run as root, so must this script
if os.getuid() != 0:
print("%s must be run as root" % (name,))
sys.exit(1)
try:
(optargs, args) = getopt(
sys.argv[1:], "hf:", [
"help",
"config=",
]
)
except GetoptError:
usage()
sys.exit(1)
configFile = None
for opt, arg in optargs:
if opt in ("-h", "--help"):
usage()
sys.exit(1)
elif opt in ("-f", "--config"):
configFile = arg
oldIP, newIP = args[0:2]
try:
oldHostname, newHostname = args[2:4]
except ValueError:
oldHostname = newHostname = None
log("args: {}".format(args))
config = readConfig(configFile=configFile)
updateConfig(
config,
oldIP, newIP,
oldHostname, newHostname
)
writeConfig(config)
def sendCommand(commandDict, configFile=None):
args = [CALENDARSERVER_CONFIG]
if configFile is not None:
args.append("-f {}".format(configFile))
child = subprocess.Popen(
args=args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
commandString = plistlib.writePlistToString(commandDict)
log("Sending to calendarserver_config: {}".format(commandString))
output, error = child.communicate(input=commandString)
log("Output from calendarserver_config: {}".format(output))
if child.returncode:
log(
"Error from calendarserver_config: {}, {}".format(
child.returncode, error
)
)
return None
else:
return plistlib.readPlistFromString(output)["result"]
def readConfig(configFile=None):
"""
Ask calendarserver_config for the current configuration
"""
command = {
"command": "readConfig"
}
return sendCommand(command)
def writeConfig(valuesDict, configFile=None):
"""
Ask calendarserver_config to update the configuration
"""
command = {
"command": "writeConfig",
"Values": valuesDict,
}
return sendCommand(command)
def updateConfig(
config,
oldIP, newIP,
oldHostname, newHostname,
configFile=None
):
keys = (
("Scheduling", "iMIP", "Receiving", "Server"),
("Scheduling", "iMIP", "Sending", "Server"),
("Scheduling", "iMIP", "Sending", "Address"),
("ServerHostName",),
)
def _replace(value, oldIP, newIP, oldHostname, newHostname):
newValue = value.replace(oldIP, newIP)
if oldHostname and newHostname:
newValue = newValue.replace(oldHostname, newHostname)
if value != newValue:
log("Changed %s -> %s" % (value, newValue))
return newValue
for keyPath in keys:
parent = config
path = keyPath[:-1]
key = keyPath[-1]
for step in path:
if step not in parent:
parent = None
break
parent = parent[step]
if parent:
if key in parent:
value = parent[key]
if isinstance(value, list):
newValue = []
for item in value:
item = _replace(
item, oldIP, newIP, oldHostname, newHostname
)
newValue.append(item)
else:
newValue = _replace(
value, oldIP, newIP, oldHostname, newHostname
)
parent[key] = newValue
if __name__ == '__main__':
main()
|
|
"""Backup methods and utilities"""
import couchdb
import logging
import os
import re
import sys
import shutil
import subprocess as sp
import time
from datetime import datetime
from taca.utils.config import CONFIG
from taca.utils import filesystem, misc
logger = logging.getLogger(__name__)
class run_vars(object):
"""A simple variable storage class"""
def __init__(self, run):
self.abs_path = os.path.abspath(run)
self.path, self.name = os.path.split(self.abs_path)
self.name = self.name.split('.', 1)[0]
self.zip = "{}.tar.gz".format(self.name)
self.key = "{}.key".format(self.name)
self.key_encrypted = "{}.key.gpg".format(self.name)
self.zip_encrypted = "{}.tar.gz.gpg".format(self.name)
class backup_utils(object):
"""A class object with main utility methods related to backing up"""
def __init__(self, run=None):
self.run = run
self.fetch_config_info()
self.host_name = os.getenv('HOSTNAME', os.uname()[1]).split('.', 1)[0]
def fetch_config_info(self):
"""Try to fecth required info from the config file. Log and exit if any neccesary info is missing"""
try:
self.data_dirs = CONFIG['backup']['data_dirs']
self.archive_dirs = CONFIG['backup']['archive_dirs']
self.keys_path = CONFIG['backup']['keys_path']
self.gpg_receiver = CONFIG['backup']['gpg_receiver']
self.mail_recipients = CONFIG['mail']['recipients']
self.check_demux = CONFIG.get('backup', {}).get('check_demux', False)
self.couch_info = CONFIG.get('statusdb')
except KeyError as e:
logger.error("Config file is missing the key {}, make sure it have all required information".format(str(e)))
raise SystemExit
def collect_runs(self, ext=None, filter_by_ext=False):
"""Collect runs from archive directories"""
self.runs = []
if self.run:
run = run_vars(self.run)
if not re.match(filesystem.RUN_RE, run.name):
logger.error("Given run {} did not match a FC pattern".format(self.run))
raise SystemExit
self.runs.append(run)
else:
for adir in self.archive_dirs.values():
if not os.path.isdir(adir):
logger.warn("Path {} does not exist or it is not a directory".format(adir))
return self.runs
for item in os.listdir(adir):
if filter_by_ext and not item.endswith(ext):
continue
elif item.endswith(ext):
item = item.replace(ext, '')
elif not os.path.isdir(item):
continue
if re.match(filesystem.RUN_RE, item) and item not in self.runs:
self.runs.append(run_vars(os.path.join(adir, item)))
def avail_disk_space(self, path, run):
"""Check the space on file system based on parent directory of the run"""
# not able to fetch runtype use the max size as precaution, size units in GB
illumina_run_sizes = {'hiseq' : 500, 'hiseqx' : 900, 'miseq' : 20}
required_size = illumina_run_sizes.get(self._get_run_type(run), 900) * 2
# check for any ongoing runs and add up the required size accrdingly
for ddir in self.data_dirs.values():
for item in os.listdir(ddir):
if not re.match(filesystem.RUN_RE, item):
continue
if not os.path.exists(os.path.join(ddir, item, "RTAComplete.txt")):
required_size += illumina_run_sizes.get(self._get_run_type(run), 900)
# get available free space from the file system
try:
df_proc = sp.Popen(['df', path], stdout=sp.PIPE, stderr=sp.PIPE)
df_out, df_err = df_proc.communicate()
available_size = int(df_out.strip().split('\n')[-1].strip().split()[2])/1024/1024
except Exception, e:
logger.error("Evaluation of disk space failed with error {}".format(e))
raise SystemExit
if available_size < required_size:
e_msg = "Required space for encryption is {}GB, but only {}GB available".format(required_size, available_size)
subjt = "Low space for encryption - {}".format(self.host_name)
logger.error(e_msg)
misc.send_mail(subjt, e_msg, self.mail_recipients)
raise SystemExit
def file_in_pdc(self, src_file, silent=True):
"""Check if the given files exist in PDC"""
# dsmc will return zero/True only when file exists, it returns
# non-zero/False though cmd is execudted but file not found
src_file_abs = os.path.abspath(src_file)
try:
sp.check_call(['dsmc', 'query', 'archive', src_file_abs], stdout=sp.PIPE, stderr=sp.PIPE)
value = True
except sp.CalledProcessError:
value = False
if not silent:
msg = "File {} {} in PDC".format(src_file_abs, "exist" if value else "do not exist")
logger.info(msg)
return value
def _get_run_type(self, run):
"""Returns run type based on the flowcell name"""
run_type = ''
try:
if "ST-" in run:
run_type = "hiseqx"
elif "-" in run.split('_')[-1]:
run_type = "miseq"
else:
run_type = "hiseq"
except:
logger.warn("Could not fetch run type for run {}".format(run))
return run_type
def _call_commands(self, cmd1, cmd2=None, out_file=None, return_out=False, mail_failed=False, tmp_files=[]):
"""Call an external command(s) with atmost two commands per function call.
Given 'out_file' is always used for the later cmd and also stdout can be return
for the later cmd. In case of failure, the 'tmp_files' are removed"""
if out_file:
if not cmd2:
stdout1 = open(out_file, 'w')
else:
stdout1 = sp.PIPE
stdout2 = open(out_file, 'w')
else:
stdout1 = sp.PIPE
stdout2 = sp.PIPE
# calling the commands
try:
cmd1 = cmd1.split()
p1 = sp.Popen(cmd1, stdout=stdout1, stderr=sp.PIPE)
if cmd2:
cmd2 = cmd2.split()
p2 = sp.Popen(cmd2, stdin=p1.stdout, stdout=stdout2, stderr=sp.PIPE)
p2_stat = p2.wait()
p2_out, p2_err = p2.communicate()
if not self._check_status(cmd2, p2_stat, p2_err, mail_failed, tmp_files):
return (False, p2_err) if return_out else False
p1_stat = p1.wait()
p1_out, p1_err = p1.communicate()
if not self._check_status(cmd1, p1_stat, p1_err, mail_failed, tmp_files):
return (False, p1_err) if return_out else False
if return_out:
return (True, p2_out) if cmd2 else (True, p1_out)
return True
except Exception, e:
raise e
finally:
if out_file:
if not cmd2:
stdout1.close()
else:
stdout2.close()
def _check_status(self, cmd, status, err_msg, mail_failed, files_to_remove=[]):
"""Check if a subprocess status is success and log error if failed"""
if status != 0:
self._clean_tmp_files(files_to_remove)
if mail_failed:
subjt = "Command call failed - {}".format(self.host_name)
e_msg = "Called cmd: {}\n\nError msg: {}".format(" ".join(cmd), err_msg)
misc.send_mail(subjt, e_msg, self.mail_recipients)
logger.error("Command '{}' failed with the error '{}'".format(" ".join(cmd),err_msg))
return False
return True
def _clean_tmp_files(self, files):
"""Remove the file is exist"""
for fl in files:
if os.path.exists(fl):
os.remove(fl)
def _log_pdc_statusdb(self, run):
"""Log the time stamp in statusDB if a file is succussfully sent to PDC"""
try:
run_vals = run.split('_')
run_fc = "{}_{}".format(run_vals[0],run_vals[-1])
server = "http://{username}:{password}@{url}:{port}".format(url=self.couch_info['url'],username=self.couch_info['username'],
password=self.couch_info['password'],port=self.couch_info['port'])
couch = couchdb.Server(server)
db = couch[self.couch_info['db']]
fc_names = {e.key:e.id for e in db.view("names/name", reduce=False)}
d_id = fc_names[run_fc]
doc = db.get(d_id)
doc['pdc_archived'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
db.save(doc)
logger.info("Logged 'pdc_archived' timestamp for fc {} in statusdb doc '{}'".format(run, d_id))
except:
logger.warn("Not able to log 'pdc_archived' timestamp for run {}".format(run))
@classmethod
def encrypt_runs(cls, run, force):
"""Encrypt the runs that have been collected"""
bk = cls(run)
bk.collect_runs(ext=".tar.gz")
logger.info("In total, found {} run(s) to be encrypted".format(len(bk.runs)))
for run in bk.runs:
run.flag = "{}.encrypting".format(run.name)
run.dst_key_encrypted = os.path.join(bk.keys_path, run.key_encrypted)
tmp_files = [run.zip_encrypted, run.key_encrypted, run.key, run.flag]
logger.info("Encryption of run {} is now started".format(run.name))
# Check if there is enough space and exit if not
bk.avail_disk_space(run.path, run.name)
# Check if the run in demultiplexed
if not force and bk.check_demux:
if not misc.run_is_demuxed(run.name, bk.couch_info):
logger.warn("Run {} is not demultiplexed yet, so skipping it".format(run.name))
continue
logger.info("Run {} is demultiplexed and proceeding with encryption".format(run.name))
with filesystem.chdir(run.path):
# skip run if already ongoing
if os.path.exists(run.flag):
logger.warn("Run {} is already being encrypted, so skipping now".format(run.name))
continue
flag = open(run.flag, 'w').close()
# zip the run directory
if os.path.exists(run.zip):
if os.path.isdir(run.name):
logger.warn("Both run source and zipped archive exist for run {}, skipping run as precaution".format(run.name))
bk._clean_tmp_files([run.flag])
continue
logger.info("Zipped archive already exist for run {}, so using it for encryption".format(run.name))
else:
logger.info("Creating zipped archive for run {}".format(run.name))
if bk._call_commands(cmd1="tar -cf - {}".format(run.name), cmd2="pigz --fast -c -",
out_file=run.zip, mail_failed=True, tmp_files=[run.zip, run.flag]):
logger.info("Run {} was successfully compressed, so removing the run source directory".format(run.name))
shutil.rmtree(run.name)
else:
logger.warn("Skipping run {} and moving on".format(run.name))
continue
# Remove encrypted file if already exists
if os.path.exists(run.zip_encrypted):
logger.warn(("Removing already existing encrypted file for run {}, this is a precaution "
"to make sure the file was encrypted with correct key file".format(run.name)))
bk._clean_tmp_files([run.zip_encrypted, run.key, run.key_encrypted, run.dst_key_encrypted])
# Generate random key to use as pasphrase
if not bk._call_commands(cmd1="gpg --gen-random 1 256", out_file=run.key, tmp_files=tmp_files):
logger.warn("Skipping run {} and moving on".format(run.name))
continue
logger.info("Generated randon phrase key for run {}".format(run.name))
# Calculate md5 sum pre encryption
if not force:
logger.info("Calculating md5sum before encryption")
md5_call, md5_out = bk._call_commands(cmd1="md5sum {}".format(run.zip), return_out=True, tmp_files=tmp_files)
if not md5_call:
logger.warn("Skipping run {} and moving on".format(run.name))
continue
md5_pre_encrypt = md5_out.split()[0]
# Encrypt the zipped run file
logger.info("Encrypting the zipped run file")
if not bk._call_commands(cmd1=("gpg --symmetric --cipher-algo aes256 --passphrase-file {} --batch --compress-algo "
"none -o {} {}".format(run.key, run.zip_encrypted, run.zip)), tmp_files=tmp_files):
logger.warn("Skipping run {} and moving on".format(run.name))
continue
# Decrypt and check for md5
if not force:
logger.info("Calculating md5sum after encryption")
md5_call, md5_out = bk._call_commands(cmd1="gpg --decrypt --cipher-algo aes256 --passphrase-file {} --batch {}".format(run.key, run.zip_encrypted),
cmd2="md5sum", return_out=True, tmp_files=tmp_files)
if not md5_call:
logger.warn("Skipping run {} and moving on".format(run.name))
continue
md5_post_encrypt = md5_out.split()[0]
if md5_pre_encrypt != md5_post_encrypt:
logger.error(("md5sum did not match before {} and after {} encryption. Will remove temp files and "
"move on".format(md5_pre_encrypt, md5_post_encrypt)))
bk._clean_tmp_files(tmp_files)
continue
logger.info("Md5sum is macthing before and after encryption")
# Encrypt and move the key file
if bk._call_commands(cmd1="gpg -e -r {} -o {} {}".format(bk.gpg_receiver, run.key_encrypted, run.key), tmp_files=tmp_files):
shutil.move(run.key_encrypted, run.dst_key_encrypted)
else:
logger.error("Encrption of key file failed, skipping run")
continue
bk._clean_tmp_files([run.zip, run.key, run.flag])
logger.info("Encryption of run {} is successfully done, removing zipped run file".format(run.name))
@classmethod
def pdc_put(cls, run):
"""Archive the collected runs to PDC"""
bk = cls(run)
bk.collect_runs(ext=".tar.gz.gpg", filter_by_ext=True)
logger.info("In total, found {} run(s) to send PDC".format(len(bk.runs)))
for run in bk.runs:
run.flag = "{}.archiving".format(run.name)
run.dst_key_encrypted = os.path.join(bk.keys_path, run.key_encrypted)
if run.path not in bk.archive_dirs.values():
logger.error(("Given run is not in one of the archive directories {}. Kindly move the run {} to appropriate "
"archive dir before sending it to PDC".format(",".join(bk.archive_dirs.values()), run.name)))
continue
if not os.path.exists(run.dst_key_encrypted):
logger.error("Encrypted key file {} is not found for file {}, skipping it".format(run.dst_key_encrypted, run.zip_encrypted))
continue
#skip run if being encrypted
if os.path.exists("{}.encrypting".format(run.name)):
logger.warn("Run {} is currently being encrypted, so skipping now".format(run.name))
continue
# skip run if already ongoing
if os.path.exists(run.flag):
logger.warn("Run {} is already being archived, so skipping now".format(run.name))
continue
flag = open(run.flag, 'w').close()
with filesystem.chdir(run.path):
if bk.file_in_pdc(run.zip_encrypted, silent=False) or bk.file_in_pdc(run.dst_key_encrypted, silent=False):
logger.warn("Seems like files realted to run {} already exist in PDC, check and cleanup".format(run.name))
bk._clean_tmp_files([run.flag])
continue
logger.info("Sending file {} to PDC".format(run.zip_encrypted))
if bk._call_commands(cmd1="dsmc archive {}".format(run.zip_encrypted), tmp_files=[run.flag]):
time.sleep(15) # give some time just in case 'dsmc' needs to settle
if bk._call_commands(cmd1="dsmc archive {}".format(run.dst_key_encrypted), tmp_files=[run.flag]):
time.sleep(5) # give some time just in case 'dsmc' needs to settle
if bk.file_in_pdc(run.zip_encrypted) and bk.file_in_pdc(run.dst_key_encrypted):
logger.info("Successfully sent file {} to PDC, removing file locally from {}".format(run.zip_encrypted, run.path))
if bk.couch_info:
bk._log_pdc_statusdb(run.name)
bk._clean_tmp_files([run.zip_encrypted, run.dst_key_encrypted, run.flag])
continue
logger.warn("Sending file {} to PDC failed".format(run.zip_encrypted))
|
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Average Precision metric class for Waymo open dataset.
The Waymo library provides a metrics breakdown API for a set of breakdowns
implemented in their library. This wrapper uses our basic abstraction for
building AP metrics but only allows breakdowns that are supported in the
Waymo breakdown API. Should you want other breakdowns, consider using the
standard AP metrics implementation with our custom breakdowns.
"""
from lingvo import compat as tf
from lingvo.core import plot
from lingvo.core import py_utils
from lingvo.tasks.car import ap_metric
from lingvo.tasks.car import breakdown_metric
import numpy as np
from waymo_open_dataset import label_pb2
from waymo_open_dataset.metrics.ops import py_metrics_ops
from waymo_open_dataset.metrics.python import config_util_py as config_util
from waymo_open_dataset.protos import breakdown_pb2
from waymo_open_dataset.protos import metrics_pb2
def BuildWaymoMetricConfig(metadata, box_type, waymo_breakdown_metrics):
"""Build the Config proto for Waymo's metric op."""
config = metrics_pb2.Config()
# config.num_desired_score_cutoffs = metadata.NumberOfPrecisionRecallPoints()
num_pr_points = metadata.NumberOfPrecisionRecallPoints()
config.score_cutoffs.extend(
[i * 1.0 / (num_pr_points - 1) for i in range(num_pr_points)])
config.matcher_type = metrics_pb2.MatcherProto.Type.TYPE_HUNGARIAN
if box_type == '2d':
config.box_type = label_pb2.Label.Box.Type.TYPE_2D
else:
config.box_type = label_pb2.Label.Box.Type.TYPE_3D
# Default values
config.iou_thresholds[:] = [0.7, 0.7, 0.7, 0.7, 0.7]
for class_name, threshold in metadata.IoUThresholds().items():
cls_idx = metadata.ClassNames().index(class_name)
config.iou_thresholds[cls_idx] = threshold
# Run on all the data for 2 difficulty levels
config.breakdown_generator_ids.append(breakdown_pb2.Breakdown.ONE_SHARD)
difficulty = metrics_pb2.Difficulty()
difficulty.levels.append(label_pb2.Label.DifficultyLevel.Value('LEVEL_1'))
difficulty.levels.append(label_pb2.Label.DifficultyLevel.Value('LEVEL_2'))
config.difficulties.append(difficulty)
# Add extra breakdown metrics.
for breakdown_value in waymo_breakdown_metrics:
breakdown_id = breakdown_pb2.Breakdown.GeneratorId.Value(breakdown_value)
config.breakdown_generator_ids.append(breakdown_id)
difficulty = metrics_pb2.Difficulty()
difficulty.levels.append(label_pb2.Label.DifficultyLevel.Value('LEVEL_1'))
difficulty.levels.append(label_pb2.Label.DifficultyLevel.Value('LEVEL_2'))
config.difficulties.append(difficulty)
return config
class WaymoAPMetrics(ap_metric.APMetrics):
"""The Waymo Open Dataset implementation of AP metric."""
@classmethod
def Params(cls, metadata):
"""Params builder for APMetrics."""
p = super().Params(metadata)
p.Define(
'waymo_breakdown_metrics', [],
'List of extra waymo breakdown metrics when computing AP. These '
'should match the names of the proto entries in metrics.proto, such '
'as `RANGE` or `OBJECT_TYPE`.')
return p
def __init__(self, params):
super().__init__(params)
self._waymo_metric_config = BuildWaymoMetricConfig(
self.metadata, self.params.box_type,
self.params.waymo_breakdown_metrics)
# Compute only waymo breakdown metrics.
breakdown_names = config_util.get_breakdown_names_from_config(
self._waymo_metric_config)
waymo_params = WaymoBreakdownMetric.Params().Set(
metadata=self.metadata, breakdown_list=breakdown_names)
self._breakdown_metrics['waymo'] = WaymoBreakdownMetric(waymo_params)
# Remove the base metric.
del self._breakdown_metrics['difficulty']
def _GetData(self,
classid,
difficulty=None,
distance=None,
num_points=None,
rotation=None):
"""Returns groundtruth and prediction for the classid in a NestedMap.
Args:
classid: int32 specifying the class
difficulty: Not used.
distance: int32 specifying a binned Euclidean distance of the ground truth
bounding box. If None is specified, all distances are selected.
num_points: int32 specifying a binned number of laser points within the
ground truth bounding box. If None is specified, all boxes are selected.
rotation: int32 specifying a binned rotation within the ground truth
bounding box. If None is specified, all boxes are selected.
Returns:
NestedMap containing iou_threshold, groundtruth and predictions for
specified, classid, difficulty level and binned distance. If no bboxes
are found with these parameters, returns None.
"""
del difficulty
assert classid > 0 and classid < self.metadata.NumClasses()
g = self._LoadBoundingBoxes(
'groundtruth',
classid,
distance=distance,
num_points=num_points,
rotation=rotation)
# Note that we do not specify num_points for predictions because only
# groundtruth boxes contain points.
p = self._LoadBoundingBoxes(
'prediction', classid, distance, num_points=None, rotation=rotation)
if g is None or p is None:
return None
gt_boxes = g.boxes
gt_imgids = g.imgids
gt_speeds = g.speeds
iou_threshold = self._iou_thresholds[self.metadata.ClassNames()[classid]]
return py_utils.NestedMap(
iou_threshold=iou_threshold,
gt=py_utils.NestedMap(
imgid=gt_imgids,
bbox=gt_boxes,
speed=gt_speeds,
difficulty=g.difficulties),
pd=py_utils.NestedMap(imgid=p.imgids, bbox=p.boxes, score=p.scores))
def _BuildMetric(self, feed_data, classid):
"""Construct tensors and the feed_dict for Waymo metric op.
Args:
feed_data: a NestedMap returned by _GetData().
classid: integer.
Returns:
A tuple of 3 dicts:
- scalar_metrics: a dict mapping all the metric names to fetch tensors.
- curves: a dict mapping all the curve names to fetch tensors.
- feed_dict: a dict mapping the tensors in feed_tensors to feed values.
"""
breakdown_names = config_util.get_breakdown_names_from_config(
self._waymo_metric_config)
if feed_data is None:
dummy_scalar = tf.constant(np.nan)
dummy_curve = tf.zeros([self.metadata.NumberOfPrecisionRecallPoints(), 2],
tf.float32)
scalar_metrics = {'ap': dummy_scalar, 'ap_ha_weighted': dummy_scalar}
curve_metrics = {'pr': dummy_curve, 'pr_ha_weighted': dummy_curve}
for i, metric in enumerate(breakdown_names):
scalar_metrics['ap_%s' % metric] = dummy_scalar
scalar_metrics['ap_ha_weighted_%s' % metric] = dummy_scalar
curve_metrics['pr_%s' % metric] = dummy_curve
curve_metrics['pr_ha_weighted_%s' % metric] = dummy_curve
return py_utils.NestedMap(
feed_dict={},
scalar_metrics=scalar_metrics,
curve_metrics=curve_metrics)
feed_dict = {}
f_gt_bbox = tf.placeholder(tf.float32)
feed_dict[f_gt_bbox] = feed_data.gt.bbox
f_gt_imgid = tf.placeholder(tf.int32)
feed_dict[f_gt_imgid] = feed_data.gt.imgid
f_gt_speed = tf.placeholder(tf.float32)
feed_dict[f_gt_speed] = feed_data.gt.speed
f_gt_difficulty = tf.placeholder(tf.uint8)
feed_dict[f_gt_difficulty] = feed_data.gt.difficulty
f_pd_bbox = tf.placeholder(tf.float32)
feed_dict[f_pd_bbox] = feed_data.pd.bbox
f_pd_imgid = tf.placeholder(tf.int32)
feed_dict[f_pd_imgid] = feed_data.pd.imgid
f_pd_score = tf.placeholder(tf.float32)
feed_dict[f_pd_score] = feed_data.pd.score
num_gt_bboxes = feed_data.gt.imgid.shape[0]
num_pd_bboxes = feed_data.pd.imgid.shape[0]
gt_class_ids = tf.constant(classid, dtype=tf.uint8, shape=[num_gt_bboxes])
pd_class_ids = tf.constant(classid, dtype=tf.uint8, shape=[num_pd_bboxes])
ap, ap_ha, pr, pr_ha, _ = py_metrics_ops.detection_metrics(
prediction_bbox=f_pd_bbox,
prediction_type=pd_class_ids,
prediction_score=f_pd_score,
prediction_frame_id=tf.cast(f_pd_imgid, tf.int64),
prediction_overlap_nlz=tf.zeros_like(f_pd_imgid, dtype=tf.bool),
ground_truth_bbox=f_gt_bbox,
ground_truth_type=gt_class_ids,
ground_truth_frame_id=tf.cast(f_gt_imgid, tf.int64),
ground_truth_difficulty=f_gt_difficulty,
ground_truth_speed=f_gt_speed,
config=self._waymo_metric_config.SerializeToString())
# All tensors returned by Waymo's metric op have a leading dimension
# B=number of breakdowns. At this moment we always use B=1 to make
# it compatible to the python code.
scalar_metrics = {'ap': ap[0], 'ap_ha_weighted': ap_ha[0]}
curve_metrics = {'pr': pr[0], 'pr_ha_weighted': pr_ha[0]}
for i, metric in enumerate(breakdown_names):
# There is a scalar / curve for every breakdown.
scalar_metrics['ap_%s' % metric] = ap[i]
scalar_metrics['ap_ha_weighted_%s' % metric] = ap_ha[i]
curve_metrics['pr_%s' % metric] = pr[i]
curve_metrics['pr_ha_weighted_%s' % metric] = pr_ha[i]
return py_utils.NestedMap(
feed_dict=feed_dict,
scalar_metrics=scalar_metrics,
curve_metrics=curve_metrics)
def _ComputeFinalMetrics(self,
classids=None,
difficulty=None,
distance=None,
num_points=None,
rotation=None):
"""Compute precision-recall curves as well as average precision.
Args:
classids: A list of N int32.
difficulty: Not used.
distance: int32 specifying a binned Euclidean distance of the ground truth
bounding box. If None is specified, all distances are selected.
num_points: int32 specifying a binned number of laser points within the
ground truth bounding box. If None is specified, all boxes are selected.
rotation: int32 specifying a binned rotation within the ground truth
bounding box. If None is specified, all boxes are selected.
Returns:
dict. Each entry in the dict is a list of C (number of classes) dicts
containing mapping from metric names to individual results. Individual
entries may be the following items.
- scalars: A list of C (number of classes) dicts mapping metric
names to scalar values.
- curves: A list of C dicts mapping metrics names to np.float32
arrays of shape [NumberOfPrecisionRecallPoints()+1, 2]. In the last
dimension, 0 indexes precision and 1 indexes recall.
"""
del difficulty
tf.logging.info('Computing final Waymo metrics.')
assert classids is not None, 'classids must be supplied.'
feed_dict = {}
g = tf.Graph()
scalar_fetches = []
curve_fetches = []
with g.as_default():
for classid in classids:
data = self._GetData(
classid,
distance=distance,
num_points=num_points,
rotation=rotation)
metrics = self._BuildMetric(data, classid)
scalar_fetches += [metrics.scalar_metrics]
curve_fetches += [metrics.curve_metrics]
feed_dict.update(metrics.feed_dict)
with tf.Session(graph=g) as sess:
results = sess.run([scalar_fetches, curve_fetches], feed_dict=feed_dict)
tf.logging.info('Finished computing final Waymo metrics.')
return {'scalars': results[0], 'curves': results[1]}
@property
def value(self):
"""Returns weighted mAP over all eval classes."""
self._EvaluateIfNecessary()
ap = self._breakdown_metrics['waymo']._average_precisions # pylint:disable=protected-access
breakdown_names = config_util.get_breakdown_names_from_config(
self._waymo_metric_config)
num_sum = 0.0
denom_sum = 0.0
# Compute the average AP over all eval classes. The first breakdown
# is the overall mAP.
for class_index in range(len(self.metadata.EvalClassIndices())):
num_sum += np.nan_to_num(ap[breakdown_names[0]][class_index])
denom_sum += 1.
return num_sum / denom_sum
def Summary(self, name):
"""Implements custom Summary for Waymo metrics."""
self._EvaluateIfNecessary()
ret = tf.Summary()
# Put '.value' first (so it shows up in logs / summaries, etc).
ret.value.add(tag='{}/weighted_mAP'.format(name), simple_value=self.value)
ap = self._breakdown_metrics['waymo']._average_precisions # pylint:disable=protected-access
aph = self._breakdown_metrics['waymo']._average_precision_headings # pylint:disable=protected-access
breakdown_names = config_util.get_breakdown_names_from_config(
self._waymo_metric_config)
for i, class_index in enumerate(self.metadata.EvalClassIndices()):
classname = self.metadata.ClassNames()[class_index]
for breakdown_name in breakdown_names:
# 'ONE_SHARD' breakdowns are the overall metrics (not sliced up)
# So we should make that the defualt metric.
if 'ONE_SHARD' in breakdown_name:
# For the overall mAP, include the class name
# and set the breakdown_str which will have the level
prefix = '{}/{}'.format(name, classname)
postfix = breakdown_name.replace('ONE_SHARD_', '')
breakdown_str = postfix if postfix else 'UNKNOWN'
# Otherwise check that the class we are looking at is in the breakdown.
elif classname.lower() in breakdown_name.lower():
prefix = '{}_extra'.format(name)
breakdown_str = breakdown_name
else:
continue
tag_str = '{}/AP_{}'.format(prefix, breakdown_str)
ap_value = ap[breakdown_name][i]
ret.value.add(tag=tag_str, simple_value=ap_value)
tag_str = '{}/APH_{}'.format(prefix, breakdown_str)
aph_value = aph[breakdown_name][i]
ret.value.add(tag=tag_str, simple_value=aph_value)
image_summaries = self._breakdown_metrics['waymo'].GenerateSummaries(name)
for image_summary in image_summaries:
ret.value.extend(image_summary.value)
return ret
class WaymoBreakdownMetric(breakdown_metric.BreakdownMetric):
"""Calculate average precision as function of difficulty."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'breakdown_list', [],
'A list of breakdown names corresponding to the breakdown '
'metrics computed from the Waymo breakdown generator config.')
return p
def __init__(self, p):
super().__init__(p)
self._average_precision_headings = {}
self._precision_recall_headings = {}
def ComputeMetrics(self, compute_metrics_fn):
p = self.params
tf.logging.info('Calculating waymo AP breakdowns: start')
metrics = compute_metrics_fn()
scalars = metrics['scalars']
curves = metrics['curves']
for breakdown_str in p.breakdown_list:
self._average_precisions[breakdown_str] = [
s['ap_%s' % breakdown_str] for s in scalars
]
self._average_precision_headings[breakdown_str] = [
s['ap_ha_weighted_%s' % breakdown_str] for s in scalars
]
self._precision_recall[breakdown_str] = np.array(
[c['pr_%s' % breakdown_str][..., :2] for c in curves])
self._precision_recall_headings[breakdown_str] = np.array(
[c['pr_ha_weighted_%s' % breakdown_str] for c in curves])
tf.logging.info('Calculating waymo AP breakdowns: finished')
def GenerateSummaries(self, name):
"""Generate an image summary for precision recall by difficulty."""
p = self.params
image_summaries = []
for i, class_index in enumerate(p.metadata.EvalClassIndices()):
def _Setter(fig, axes):
"""Configure the plot for precision recall."""
ticks = np.arange(0, 1.05, 0.1)
axes.grid(b=False)
axes.set_xlabel('Recall')
axes.set_xticks(ticks)
axes.set_ylabel('Precision')
axes.set_yticks(ticks)
# TODO(vrv): Add legend indicating number of objects in breakdown.
fig.tight_layout()
classname = p.metadata.ClassNames()[class_index]
for breakdown_name in p.breakdown_list:
# 'ONE_SHARD' breakdowns are the overall metrics (not sliced up)
# So we should never skip this.
if 'ONE_SHARD' in breakdown_name:
breakdown_str = breakdown_name.replace('ONE_SHARD_', '')
tag_str = '{}/{}/{}/PR'.format(name, classname, breakdown_str)
# Otherwise check that the class we are looking at is in the breakdown.
elif classname.lower() in breakdown_name.lower():
tag_str = '{}/{}/{}/PR'.format(name, classname, breakdown_name)
else:
continue
ps = [self._precision_recall[breakdown_name][i][:, 0]]
rs = [self._precision_recall[breakdown_name][i][:, 1]]
image_summary = plot.Curve(
name=tag_str,
figsize=(10, 8),
xs=rs[0],
ys=np.array(ps).T,
setter=_Setter,
marker='.',
markersize=14,
linestyle='-',
linewidth=2,
alpha=0.5)
image_summaries.append(image_summary)
return image_summaries
# Fill in dummy implementations which are largely
# unused. The current implementation does not provide breakdown
# image summaries that do bucketing; we assume that the waymo breakdown
# implementations will break things down as necessary.
def AccumulateHistogram(self, result):
pass
def AccumulateCumulative(self, result):
pass
def NumBinsOfHistogram(self):
return 1
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file contains a unit test for the timelib in Plaso."""
import datetime
import unittest
from plaso.lib import errors
from plaso.lib import timelib
import pytz
class TimeLibTest(unittest.TestCase):
"""Tests for timestamp."""
def testCopyFromString(self):
"""Tests the CopyFromString function."""
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27')
expected_timestamp = 1340755200000000
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(None)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-6-27')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-00-27')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-13-27')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-01-00')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-01-32')
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')
expected_timestamp = 1340821021000000
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:1')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27T18:17:01')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 24:17:01')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:60:01')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:60')
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.123')
expected_timestamp = 1340821021123000
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.12')
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.123456')
expected_timestamp = 1340821021123456
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.1234')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.1234567')
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+00:00')
expected_timestamp = 1340821021000000
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+01:00')
expected_timestamp = 1340817421000000
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01-07:00')
expected_timestamp = 1340846221000000
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+1')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+01')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+01:0')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+00:00:0')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01Z')
def testCocoaTime(self):
"""Tests the Cocoa timestamp conversion."""
timestamp = timelib.Timestamp.FromCocoaTime(395011845)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-08 21:30:45')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromCocoaTime(395353142)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-12 20:19:02')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromCocoaTime(394993669)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-08 16:27:49')
self.assertEqual(timestamp, expected_timestamp)
def testHFSTimes(self):
"""Tests the HFS timestamp conversion."""
timestamp = timelib.Timestamp.FromHfsTime(
3458215528, timezone=pytz.timezone(u'EST5EDT'), is_dst=True)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-08-01 15:25:28-04:00')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromHfsPlusTime(3458215528)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-08-01 15:25:28')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromHfsPlusTime(3413373928)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-02-29 15:25:28')
self.assertEqual(timestamp, expected_timestamp)
def testTimestampIsLeapYear(self):
"""Tests the is leap year check."""
self.assertEqual(timelib.Timestamp.IsLeapYear(2012), True)
self.assertEqual(timelib.Timestamp.IsLeapYear(2013), False)
self.assertEqual(timelib.Timestamp.IsLeapYear(2000), True)
self.assertEqual(timelib.Timestamp.IsLeapYear(1900), False)
def testTimestampDaysInMonth(self):
"""Tests the days in month function."""
self.assertEqual(timelib.Timestamp.DaysInMonth(0, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(1, 2013), 28)
self.assertEqual(timelib.Timestamp.DaysInMonth(1, 2012), 29)
self.assertEqual(timelib.Timestamp.DaysInMonth(2, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(3, 2013), 30)
self.assertEqual(timelib.Timestamp.DaysInMonth(4, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(5, 2013), 30)
self.assertEqual(timelib.Timestamp.DaysInMonth(6, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(7, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(8, 2013), 30)
self.assertEqual(timelib.Timestamp.DaysInMonth(9, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(10, 2013), 30)
self.assertEqual(timelib.Timestamp.DaysInMonth(11, 2013), 31)
with self.assertRaises(ValueError):
timelib.Timestamp.DaysInMonth(-1, 2013)
with self.assertRaises(ValueError):
timelib.Timestamp.DaysInMonth(12, 2013)
def testTimestampDaysInYear(self):
"""Test the days in year function."""
self.assertEqual(timelib.Timestamp.DaysInYear(2013), 365)
self.assertEqual(timelib.Timestamp.DaysInYear(2012), 366)
def testTimestampDayOfYear(self):
"""Test the day of year function."""
self.assertEqual(timelib.Timestamp.DayOfYear(0, 0, 2013), 0)
self.assertEqual(timelib.Timestamp.DayOfYear(0, 2, 2013), 31 + 28)
self.assertEqual(timelib.Timestamp.DayOfYear(0, 2, 2012), 31 + 29)
expected_day_of_year = 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30
self.assertEqual(
timelib.Timestamp.DayOfYear(0, 11, 2013), expected_day_of_year)
def testTimestampFromDelphiTime(self):
"""Test the Delphi date time conversion."""
timestamp = timelib.Timestamp.FromDelphiTime(41443.8263953)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-06-18 19:50:00')
self.assertEqual(timestamp, expected_timestamp)
def testTimestampFromFatDateTime(self):
"""Test the FAT date time conversion."""
timestamp = timelib.Timestamp.FromFatDateTime(0xa8d03d0c)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-08-12 21:06:32')
self.assertEqual(timestamp, expected_timestamp)
# Invalid number of seconds.
fat_date_time = (0xa8d03d0c & ~(0x1f << 16)) | ((30 & 0x1f) << 16)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
# Invalid number of minutes.
fat_date_time = (0xa8d03d0c & ~(0x3f << 21)) | ((60 & 0x3f) << 21)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
# Invalid number of hours.
fat_date_time = (0xa8d03d0c & ~(0x1f << 27)) | ((24 & 0x1f) << 27)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
# Invalid day of month.
fat_date_time = (0xa8d03d0c & ~0x1f) | (32 & 0x1f)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
# Invalid month.
fat_date_time = (0xa8d03d0c & ~(0x0f << 5)) | ((13 & 0x0f) << 5)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
def testTimestampFromWebKitTime(self):
"""Test the WebKit time conversion."""
timestamp = timelib.Timestamp.FromWebKitTime(0x2dec3d061a9bfb)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-08-12 21:06:31.546875')
self.assertEqual(timestamp, expected_timestamp)
webkit_time = 86400 * 1000000
timestamp = timelib.Timestamp.FromWebKitTime(webkit_time)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'1601-01-02 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
# WebKit time that exceeds lower bound.
webkit_time = -((1 << 63L) - 1)
self.assertEqual(timelib.Timestamp.FromWebKitTime(webkit_time), 0)
def testTimestampFromFiletime(self):
"""Test the FILETIME conversion."""
timestamp = timelib.Timestamp.FromFiletime(0x01cb3a623d0a17ce)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-08-12 21:06:31.546875')
self.assertEqual(timestamp, expected_timestamp)
filetime = 86400 * 10000000
timestamp = timelib.Timestamp.FromFiletime(filetime)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'1601-01-02 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
# FILETIME that exceeds lower bound.
filetime = -1
self.assertEqual(timelib.Timestamp.FromFiletime(filetime), 0)
def testTimestampFromPosixTime(self):
"""Test the POSIX time conversion."""
timestamp = timelib.Timestamp.FromPosixTime(1281647191)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-08-12 21:06:31')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromPosixTime(-122557518)
expected_timestamp = timelib.Timestamp.FromTimeString(
u'1966-02-12 1966 12:14:42 UTC')
self.assertEqual(timestamp, expected_timestamp)
# POSIX time that exceeds upper bound.
self.assertEqual(timelib.Timestamp.FromPosixTime(9223372036855), 0)
# POSIX time that exceeds lower bound.
self.assertEqual(timelib.Timestamp.FromPosixTime(-9223372036855), 0)
def testMonthDict(self):
"""Test the month dict, both inside and outside of scope."""
self.assertEqual(timelib.MONTH_DICT[u'nov'], 11)
self.assertEqual(timelib.MONTH_DICT[u'jan'], 1)
self.assertEqual(timelib.MONTH_DICT[u'may'], 5)
month = timelib.MONTH_DICT.get(u'doesnotexist')
self.assertEqual(month, None)
def testLocaltimeToUTC(self):
"""Test the localtime to UTC conversion."""
timezone = pytz.timezone(u'CET')
local_timestamp = timelib.Timestamp.CopyFromString(u'2013-01-01 01:00:00')
timestamp = timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-01-01 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
local_timestamp = timelib.Timestamp.CopyFromString(u'2013-07-01 02:00:00')
timestamp = timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-01 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
# In the local timezone this is a non-existent timestamp.
local_timestamp = timelib.Timestamp.CopyFromString(
u'2013-03-31 02:00:00')
with self.assertRaises(pytz.NonExistentTimeError):
timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone, is_dst=None)
timestamp = timelib.Timestamp.LocaltimeToUTC(
local_timestamp, timezone, is_dst=True)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-03-31 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.LocaltimeToUTC(
local_timestamp, timezone, is_dst=False)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-03-31 01:00:00')
self.assertEqual(timestamp, expected_timestamp)
# In the local timezone this is an ambiguous timestamp.
local_timestamp = timelib.Timestamp.CopyFromString(u'2013-10-27 02:30:00')
with self.assertRaises(pytz.AmbiguousTimeError):
timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone, is_dst=None)
timestamp = timelib.Timestamp.LocaltimeToUTC(
local_timestamp, timezone, is_dst=True)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-27 00:30:00')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-27 01:30:00')
self.assertEqual(timestamp, expected_timestamp)
# Use the UTC timezone.
self.assertEqual(
timelib.Timestamp.LocaltimeToUTC(local_timestamp, pytz.UTC),
local_timestamp)
# Use a timezone in the Western Hemisphere.
timezone = pytz.timezone(u'EST')
local_timestamp = timelib.Timestamp.CopyFromString(u'2013-01-01 00:00:00')
timestamp = timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-01-01 05:00:00')
self.assertEqual(timestamp, expected_timestamp)
def testCopyToDatetime(self):
"""Test the copy to datetime object."""
timezone = pytz.timezone(u'CET')
timestamp = timelib.Timestamp.CopyFromString(u'2013-03-14 20:20:08.850041')
datetime_object = timelib.Timestamp.CopyToDatetime(timestamp, timezone)
expected_datetime_object = datetime.datetime(
2013, 3, 14, 21, 20, 8, 850041, tzinfo=timezone)
self.assertEqual(datetime_object, expected_datetime_object)
def testCopyToPosix(self):
"""Test converting microseconds to seconds."""
timestamp = timelib.Timestamp.CopyFromString(u'2013-10-01 12:00:00')
expected_posixtime, _ = divmod(timestamp, 1000000)
posixtime = timelib.Timestamp.CopyToPosix(timestamp)
self.assertEqual(posixtime, expected_posixtime)
def testTimestampFromTimeString(self):
"""The the FromTimeString function."""
# Test daylight savings.
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-01 12:00:00')
# Check certain variance of this timestamp.
timestamp = timelib.Timestamp.FromTimeString(
u'2013-10-01 14:00:00', timezone=pytz.timezone(u'Europe/Rome'))
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromTimeString(
u'2013-10-01 12:00:00', timezone=pytz.timezone(u'UTC'))
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromTimeString(
u'2013-10-01 05:00:00', timezone=pytz.timezone(u'PST8PDT'))
self.assertEqual(timestamp, expected_timestamp)
# Now to test outside of the daylight savings.
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-02-01 12:00:00')
timestamp = timelib.Timestamp.FromTimeString(
u'2014-02-01 13:00:00', timezone=pytz.timezone(u'Europe/Rome'))
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromTimeString(
u'2014-02-01 12:00:00', timezone=pytz.timezone(u'UTC'))
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromTimeString(
u'2014-02-01 04:00:00', timezone=pytz.timezone(u'PST8PDT'))
self.assertEqual(timestamp, expected_timestamp)
# Define two timestamps, one being GMT and the other UTC.
time_string_utc = u'Wed 05 May 2010 03:52:31 UTC'
time_string_gmt = u'Wed 05 May 2010 03:52:31 GMT'
timestamp_utc = timelib.Timestamp.FromTimeString(time_string_utc)
timestamp_gmt = timelib.Timestamp.FromTimeString(time_string_gmt)
# Test if these two are different, and if so, then we'll try again
# using the 'gmt_is_utc' flag, which then should result to the same
# results.
if timestamp_utc != timestamp_gmt:
self.assertEqual(timestamp_utc, timelib.Timestamp.FromTimeString(
time_string_gmt, gmt_as_timezone=False))
timestamp = timelib.Timestamp.FromTimeString(
u'12-15-1984 05:13:00', timezone=pytz.timezone(u'EST5EDT'))
self.assertEqual(timestamp, 471953580000000)
# Swap day and month.
timestamp = timelib.Timestamp.FromTimeString(
u'12-10-1984 05:13:00', timezone=pytz.timezone(u'EST5EDT'),
dayfirst=True)
self.assertEqual(timestamp, 466420380000000)
timestamp = timelib.Timestamp.FromTimeString(u'12-15-1984 10:13:00Z')
self.assertEqual(timestamp, 471953580000000)
# Setting the timezone for string that already contains a timezone
# indicator should not affect the conversion.
timestamp = timelib.Timestamp.FromTimeString(
u'12-15-1984 10:13:00Z', timezone=pytz.timezone(u'EST5EDT'))
self.assertEqual(timestamp, 471953580000000)
timestamp = timelib.Timestamp.FromTimeString(u'15/12/1984 10:13:00Z')
self.assertEqual(timestamp, 471953580000000)
timestamp = timelib.Timestamp.FromTimeString(u'15-12-84 10:13:00Z')
self.assertEqual(timestamp, 471953580000000)
timestamp = timelib.Timestamp.FromTimeString(
u'15-12-84 10:13:00-04', timezone=pytz.timezone(u'EST5EDT'))
self.assertEqual(timestamp, 471967980000000)
with self.assertRaises(errors.TimestampError):
timestamp = timelib.Timestamp.FromTimeString(
u'thisisnotadatetime', timezone=pytz.timezone(u'EST5EDT'))
timestamp = timelib.Timestamp.FromTimeString(
u'12-15-1984 04:13:00', timezone=pytz.timezone(u'America/Chicago'))
self.assertEqual(timestamp, 471953580000000)
timestamp = timelib.Timestamp.FromTimeString(
u'07-14-1984 23:13:00', timezone=pytz.timezone(u'America/Chicago'))
self.assertEqual(timestamp, 458712780000000)
timestamp = timelib.Timestamp.FromTimeString(
u'12-15-1984 05:13:00', timezone=pytz.timezone(u'US/Pacific'))
self.assertEqual(timestamp, 471964380000000)
def testRoundTimestamp(self):
"""Test the RoundToSeconds function."""
# Should be rounded up.
test_one = 442813351785412
# Should be rounded down.
test_two = 1384381247271976
self.assertEqual(
timelib.Timestamp.RoundToSeconds(test_one), 442813352000000)
self.assertEqual(
timelib.Timestamp.RoundToSeconds(test_two), 1384381247000000)
def testTimestampFromTimeParts(self):
"""Test the FromTimeParts function."""
timestamp = timelib.Timestamp.FromTimeParts(
2013, 6, 25, 22, 19, 46, 0, timezone=pytz.timezone(u'PST8PDT'))
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-06-25 22:19:46-07:00')
self.assertEqual(timestamp, expected_timestamp)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-06-26 05:19:46')
timestamp = timelib.Timestamp.FromTimeParts(2013, 6, 26, 5, 19, 46)
self.assertEqual(timestamp, expected_timestamp)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-06-26 05:19:46.000542')
timestamp = timelib.Timestamp.FromTimeParts(2013, 6, 26, 5, 19, 46, 542)
self.assertEqual(timestamp, expected_timestamp)
if __name__ == '__main__':
unittest.main()
|
|
import keyword
import re
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.models.constants import LOOKUP_SEP
class Command(BaseCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
requires_system_checks = False
stealth_options = ('table_name_filter',)
db_module = 'django.db'
def add_arguments(self, parser):
parser.add_argument(
'table', nargs='*', type=str,
help='Selects what tables or views should be introspected.',
)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to introspect. Defaults to using the "default" database.',
)
parser.add_argument(
'--include-partitions', action='store_true', help='Also output models for partition tables.',
)
parser.add_argument(
'--include-views', action='store_true', help='Also output models for database views.',
)
def handle(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options['database']]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
def table2model(table_name):
return re.sub(r'[^a-zA-Z0-9]', '', table_name.title())
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior"
yield (
"# * Remove `managed = False` lines if you wish to allow "
"Django to create, modify, and delete the table"
)
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield 'from %s import models' % self.db_module
known_models = []
table_info = connection.introspection.get_table_list(cursor)
# Determine types of tables and/or views to be introspected.
types = {'t'}
if options['include_partitions']:
types.add('p')
if options['include_views']:
types.add('v')
for table_name in (options['table'] or sorted(info.name for info in table_info if info.type in types)):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
try:
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
constraints = connection.introspection.get_constraints(cursor, table_name)
except NotImplementedError:
constraints = {}
primary_key_column = connection.introspection.get_primary_key_column(cursor, table_name)
unique_columns = [
c['columns'][0] for c in constraints.values()
if c['unique'] and len(c['columns']) == 1
]
table_description = connection.introspection.get_table_description(cursor, table_name)
except Exception as e:
yield "# Unable to inspect table '%s'" % table_name
yield "# The error was: %s" % e
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
used_column_names = [] # Holds column names used in the table so far
column_to_field_name = {} # Maps column names to names of model fields
for row in table_description:
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
column_name = row.name
is_relation = column_name in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
column_to_field_name[column_name] = att_name
# Add primary_key and unique, if necessary.
if column_name == primary_key_column:
extra_params['primary_key'] = True
elif column_name in unique_columns:
extra_params['unique'] = True
if is_relation:
if extra_params.pop('unique', False) or extra_params.get('primary_key'):
rel_type = 'OneToOneField'
else:
rel_type = 'ForeignKey'
rel_to = (
"self" if relations[column_name][1] == table_name
else table2model(relations[column_name][1])
)
if rel_to in known_models:
field_type = '%s(%s' % (rel_type, rel_to)
else:
field_type = "%s('%s'" % (rel_type, rel_to)
else:
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row.null_ok: # If it's NULL...
extra_params['blank'] = True
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if field_type.startswith(('ForeignKey(', 'OneToOneField(')):
field_desc += ', models.DO_NOTHING'
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join('%s=%r' % (k, v) for k, v in extra_params.items())
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
is_view = any(info.name == table_name and info.type == 'v' for info in table_info)
is_partition = any(info.name == table_name and info.type == 'p' for info in table_info)
yield from self.get_meta(table_name, constraints, column_to_field_name, is_view, is_partition)
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find(LOOKUP_SEP) >= 0:
while new_name.find(LOOKUP_SEP) >= 0:
new_name = new_name.replace(LOOKUP_SEP, '_')
if col_name.lower().find(LOOKUP_SEP) >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row.type_code, row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# Add max_length for all CharFields.
if field_type == 'CharField' and row.internal_size:
field_params['max_length'] = int(row.internal_size)
if field_type == 'DecimalField':
if row.precision is None or row.scale is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row.precision if row.precision is not None else 10
field_params['decimal_places'] = row.scale if row.scale is not None else 5
else:
field_params['max_digits'] = row.precision
field_params['decimal_places'] = row.scale
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints, column_to_field_name, is_view, is_partition):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
unique_together = []
has_unsupported_constraint = False
for params in constraints.values():
if params['unique']:
columns = params['columns']
if None in columns:
has_unsupported_constraint = True
columns = [x for x in columns if x is not None]
if len(columns) > 1:
unique_together.append(str(tuple(column_to_field_name[c] for c in columns)))
if is_view:
managed_comment = " # Created from a view. Don't remove."
elif is_partition:
managed_comment = " # Created from a partition. Don't remove."
else:
managed_comment = ''
meta = ['']
if has_unsupported_constraint:
meta.append(' # A unique constraint could not be introspected.')
meta += [
' class Meta:',
' managed = False%s' % managed_comment,
' db_table = %r' % table_name
]
if unique_together:
tup = '(' + ', '.join(unique_together) + ',)'
meta += [" unique_together = %s" % tup]
return meta
|
|
#!/usr/bin/env python
# convert expression data into expressed and not-expressed genes
import sys
import time
import optparse
import general
import numpy
import pickle
import pdb
import metrn
import modencode
import multiprocessing
import fasta
import os
from runner import *
print "Command:", " ".join(sys.argv)
print "Timestamp:", time.asctime(time.localtime())
""" define a function for mapping IDs """
def geneConverter(organismTag, inpath, i, j, upper=True, nameUpper=True, idUpper=True):
geneFile = metrn.reference[organismTag]["gene_ids"]
idLabels = metrn.reference[organismTag][i]
nameLabels = metrn.reference[organismTag][j]
id2name_dict, name2id_dict = modencode.idBuild(inpath + geneFile, idLabels, nameLabels, mode="label", header=True, nameUpper=nameUpper, idUpper=idUpper)
return id2name_dict, name2id_dict
""" define a function that loads the orthologs between species from file """
def orthologBuilder(aspecies, bspecies, infile):
speciesDict = {
"celegans" : "ce",
"dmel" : "dm",
"human" : "hs",
"mouse" : "mm"
}
removalDict = {
"ce" : "Cel-",
"dm" : "dmel_",
"hs" : "human_",
"mm" : "mouse_"
}
indata = open(infile)
inline = indata.readline()
iDict, jDict = dict(), dict()
while inline:
index, i, j, iGene, jGene, iValue, jValue = inline.strip().split("\t")
if i in speciesDict and j in speciesDict:
i, j = speciesDict[i], speciesDict[j]
if i in [aspecies, bspecies] and j in [aspecies, bspecies]:
if i == aspecies and j == bspecies:
iGene, jGene = iGene.lstrip(removalDict[i]), jGene.lstrip(removalDict[j])
elif j == aspecies and i == bspecies:
jGene, iGene = iGene.lstrip(removalDict[i]), jGene.lstrip(removalDict[j])
if not iGene in iDict:
iDict[iGene] = list()
if not jGene in jDict:
jDict[jGene] = list()
iDict[iGene].append(jGene)
jDict[jGene].append(iGene)
inline = indata.readline()
return iDict, jDict
""" define a function that assigns features to others and returns a dictionary... """
def featureMapper(featurefile, regionfile, outputfile, fraction, target, headerDict="auto", header="OFF", overwrite="ON"):
# get output part names:
outputname = outputfile.split("/")[len(outputfile.split("/"))-1]
outputpath = "/".join(outputfile.split("/")[:len(outputfile.split("/"))-1]) + "/"
# is there a header on the region file?
if overwrite == "ON" and header == "ON":
headfile = regionfile.replace(".bed", ".head")
tempfile = regionfile.replace(".bed", ".tmp")
command = "cp " + regionfile + " " + tempfile
os.system(command)
command = "head -n 1 " + regionfile + ' > ' + headfile
os.system(command)
command = 'grep -v "feature" ' + regionfile + ' > ' + tempfile
os.system(command)
# define header presence
if header == "ON":
headerFlag = True
else:
headerFlag = False
# intersect BED files:
if overwrite == "ON" or not outputname in os.listdir(outputpath):
if fraction == "OFF":
command = "intersectBed -wo -a " + regionfile + " -b " + featurefile + " > " + outputfile
os.system(command)
else:
command = "intersectBed -wo -f " + str(fraction) + " -a " + regionfile + " -b " + featurefile + " > " + outputfile
os.system(command)
# define annotation headers:
if headerDict == "auto":
annotationHeader = general.build_header_dict(regionfile)
elif headerDict == "bed":
annotationHeader = metrn.bedHeader
else:
annotationHeader = dict()
for entry in headerDict.split(","):
key, value = entry.split(":")
annotationHeader[key] = int(value)
# define feature key setup:
idOverlap = len(open(regionfile).readline().split("\t")) + 3
# gather annotation overlap peak regions:
overlapBed = general.build2(outputfile, i=idOverlap, j=target, x="", mode="matrix", header_dict=annotationHeader, header=headerFlag, separator=":", counter=True)
return overlapBed
""" define a function that converts a local user path to SCG3 or GS server paths """
def serverPath(inpath, server="ON"):
if server == "ON":
return inpath.replace("/Users/claraya/", "/srv/gs1/projects/snyder/claraya/")
elif server == "GS":
return inpath.replace("/Users/claraya/", "/net/fields/vol1/home/araya/")
def main():
parser = optparse.OptionParser()
parser.add_option("--path", action = "store", type = "string", dest = "path", help = "path from script to files")
parser.add_option("--organism", action = "store", type = "string", dest = "organism", help = "Target organism for operations...", default="OFF")
parser.add_option("--peaks", action = "store", type = "string", dest = "peaks", help = "Basename for target peaks", default="OFF")
parser.add_option("--mode", action = "store", type = "string", dest = "mode", help = "launch")
parser.add_option("--infile", action = "store", type = "string", dest = "infile", help = "input expression file", default="OFF")
parser.add_option("--source", action = "store", type = "string", dest = "source", help = "folder location of input files", default="OFF")
parser.add_option("--target", action = "store", type = "string", dest = "target", help = "How to format dataset labels...", default="factor.context")
parser.add_option("--mapping", action = "store", type = "string", dest = "mapping", help = "Dataset mapping guide...", default="OFF")
parser.add_option("--exclude", action = "store", type = "string", dest = "exclude", help = "Targets to exclude", default="OFF")
parser.add_option("--rename", action = "store", type = "string", dest = "rename", help = "Targets to rename. Comma-separated list of 'target:replacement' pairs to search and replace.", default="OFF")
parser.add_option("--tip", action = "store", type = "string", dest = "tip", help = "Are these TIP prediction files?", default="OFF")
parser.add_option("--fraction", action = "store", type = "string", dest = "fraction", help = "Fractional overlap required", default="0.1")
parser.add_option("--nuclear", action = "store", type = "string", dest = "nuclear", help = "Peaks are only nuclear?", default="ON")
parser.add_option("--species", action = "store", type = "string", dest = "species", help = "Species to be compared; comma-separated", default="OFF")
parser.add_option("--genes", action = "store", type = "string", dest = "genes", help = "reference gene file", default="OFF")
parser.add_option("--transcripts", action = "store", type = "string", dest = "transcripts", help = "reference transcript file")
parser.add_option("--orthology", action = "store", type = "string", dest = "orthology", help = "Use 'direct' or 'family' orthologs?", default="direct")
parser.add_option("--nametag", action = "store", type = "string", dest = "nametag", help = "Orthology nametag: nametagHsCe", default="ortho")
parser.add_option("--commonNames", action = "store", type = "string", dest = "commonNames", help = "Grab common names file?", default="ON")
parser.add_option("--familyFiles", action = "store", type = "string", dest = "familyFiles", help = "Grab cleaned files?", default="formatted")
parser.add_option("--coord", action = "store", type = "string", dest = "coord", help = "reference coordinates to export: RNA or TSS", default="RNA")
parser.add_option("--cutoff", action = "store", type = "float", dest = "cutoff", help = "expression cutoff", default=0.05)
parser.add_option("--name", action = "store", type = "string", dest = "name", help = "name for domain", default="OFF")
parser.add_option("--strip", action = "store", type = "string", dest = "strip", help = "Remove RNA transcript periods (last)?", default="OFF")
parser.add_option("--a", action = "store", type = "string", dest = "a", help = "Input A network", default="OFF")
parser.add_option("--b", action = "store", type = "string", dest = "b", help = "Input B network", default="OFF")
parser.add_option("--overwrite", action = "store", type = "string", dest = "overwrite", help = "Overwrite stuff?", default="OFF")
parser.add_option("--threads", action = "store", type = "string", dest = "threads", help = "multiprocessing threads", default="1")
parser.add_option("--qsub", action = "store", type = "string", dest = "qsub", help = "qsub configuration header", default="OFF")
parser.add_option("--server", action = "store", type = "string", dest = "server", help = "are we on the server?", default="OFF")
parser.add_option("--job", action = "store", type = "string", dest = "job", help = "job name for cluster", default="OFF")
(option, args) = parser.parse_args()
# import paths:
if option.server == "OFF":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_path.txt")
elif option.server == "ON":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_server.txt")
elif option.server == "GS":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_nexus.txt")
# specify input and output paths:
inpath = path_dict["input"]
extraspath = path_dict["extras"]
pythonpath = path_dict["python"]
scriptspath = path_dict["scripts"]
downloadpath = path_dict["download"]
fastqpath = path_dict["fastq"]
bowtiepath = path_dict["bowtie"]
bwapath = path_dict["bwa"]
macspath = path_dict["macs"]
memepath = path_dict["meme"]
idrpath = path_dict["idr"]
igvpath = path_dict["igv"]
testpath = path_dict["test"]
processingpath = path_dict["processing"]
annotationspath = path_dict["annotations"]
orthologspath = path_dict["orthologs"]
peakspath = path_dict["peaks"]
gopath = path_dict["go"]
hotpath = path_dict["hot"]
networkpath = path_dict["network"]
qsubpath = path_dict["qsub"]
cellspath = path_dict["cells"]
# standardize paths for analysis:
alignerpath = bwapath
indexpath = alignerpath + "index/"
alignmentpath = alignerpath + "alignment/"
qcfilterpath = alignerpath + "qcfilter/"
qcmergepath = alignerpath + "qcmerge/"
# import configuration dictionaries:
source_dict = modencode.configBuild(inpath + "configure_source.txt")
method_dict = modencode.configBuild(inpath + "configure_method.txt")
context_dict = modencode.configBuild(inpath + "configure_context.txt")
# define organism parameters:
if option.organism == "hs" or option.organism == "h.sapiens":
organismTag = "hs"
elif option.organism == "mm" or option.organism == "m.musculus":
organismTag = "mm"
elif option.organism == "ce" or option.organism == "c.elegans":
organismTag = "ce"
elif option.organism == "dm" or option.organism == "d.melanogaster":
organismTag = "dm"
# specify genome size file:
if option.organism != "OFF":
if option.nuclear == "ON":
chromosomes = metrn.chromosomes[organismTag]["nuclear"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["nuclear_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
else:
chromosomes = metrn.chromosomes[organismTag]["complete"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["complete_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
# load gene ID dictionaries:
id2name_dict, name2id_dict = modencode.idBuild(inpath + metrn.reference[organismTag]["gene_ids"], "Sequence Name (Gene)", "Gene Public Name", mode="label", header=True, idUpper=True, nameUpper=True)
# define expression cutoff handle:
expression = "%.0e" % (float(option.cutoff))
# define flagging dicts:
flagDict = dict()
flagDict["nameUpper"] = { "ce":True, "dm":True, "hs":True }
flagDict["idUpper"] = { "ce":False, "dm":True, "hs":True }
# build network mode:
if option.mode == "build":
# determine path of input files:
networkpath = networkpath + option.peaks + "/" + option.name + "/"
targetspath = networkpath + "targets/"
mappingpath = networkpath + "mapping/"
general.pathGenerator(targetspath)
general.pathGenerator(mappingpath)
# update peaks path:
peakspath = peakspath + option.peaks + "/"
# process TIP predictions:
if option.tip == "ON":
# load mapping:
if option.mapping != "OFF":
mappingDict = general.build2(annotationspath + option.mapping, id_column="filename")
# load binding targets:
print
print "Loading target predictions..."
mapDict = fasta.buildfile(path_dict[option.source] + option.infile)
skipped = list()
# prebuild mapping if necessary:
if option.mapping != "OFF":
peak2dataDict, data2peakDict = dict(), dict()
for dataset in sorted(mapDict.keys()):
dataset = dataset.replace(".bam", "").replace("AlnRep0", "Aln").replace("AlnRep1", "Aln")
# rename datasets if indicated:
if option.rename != "OFF":
for scheme in option.rename.split(","):
target, replace = scheme.split(":")
dataset = dataset.replace(target, replace)
if not dataset in mappingDict:
skipped.append(dataset)
else:
strain, factor, context, institute, method = mappingDict[dataset]["strain"], mappingDict[dataset]["factor"], mappingDict[dataset]["context"], mappingDict[dataset]["institute"], mappingDict[dataset]["method"]
filelabel = organismTag + "_" + "_".join([strain, factor, context, institute])
for peakfile in os.listdir(peakspath):
if filelabel in peakfile:
if not peakfile in peak2dataDict:
peak2dataDict[peakfile] = dict()
if not dataset in data2peakDict:
data2peakDict[dataset] = dict()
peak2dataDict[peakfile][dataset] = 1
data2peakDict[dataset][peakfile] = 1
# check 1-to-1 mapping between peak files and datasets...
for dataset in data2peakDict:
if len(data2peakDict[dataset]) > 1:
raise "Error: Multiple peak-files matched!", dataset
# assign targets by distance:
else:
# specify the target regions file:
i_infile = path_dict[option.source] + option.infile
# process peaks:
mapDict = dict()
skipped = list()
for peakfile in os.listdir(peakspath):
# generate dataset key:
dataset = peakfile.replace("_peaks.bed", "")
print "Processing:", dataset
# define peak source and output
p_infile = peakspath + peakfile
p_outfile = mappingpath + peakfile.replace(".bed", ".tmp")
# assign peaks to features:
peakDict = featureMapper(p_infile, i_infile, p_outfile, fraction=option.fraction, target="feature", headerDict="bed", header="OFF", overwrite=option.overwrite)
# map dataset to features:
mapDict[dataset] = list()
for peak in peakDict:
for feature in peakDict[peak]:
mapDict[dataset].append(feature)
mapDict[dataset] = sorted(list(set(mapDict[dataset])))
#key = mapDict.keys()[0]
#print key
#print mapDict[key]
#pdb.set_trace()
# transfer binding targets:
print "Transfering target predictions..."
networkDict = dict()
s, t = 0, 0
for dataset in sorted(mapDict.keys()):
# get targets:
if option.tip == "ON":
targets = general.clean(mapDict[dataset].split("\t"), "")
dataset = dataset.replace(".bam", "").replace("AlnRep0", "Aln").replace("AlnRep1", "Aln")
else:
targets = general.clean(mapDict[dataset], "")
# rename datasets if indicated:
if option.rename != "OFF":
for scheme in option.rename.split(","):
target, replace = scheme.split(":")
dataset = dataset.replace(target, replace)
# substitute signal filename for peaks:
process = False
if option.tip == "ON" and option.mapping == "OFF":
dataset = organismTag + "_" + dataset
peakfile = dataset.replace(".fc.signal", "_peaks.bed")
dataset = dataset.replace(".fc.signal", "")
process = True
t += 1
# generate peak file names from mapping:
elif option.tip == "ON" and option.mapping != "OFF":
if dataset in data2peakDict:
peakfile = data2peakDict[dataset].keys()[0]
process = True
t += 1
# store TIP target assignments:
if option.tip == "ON" and peakfile in os.listdir(peakspath) and targets != list() and process:
networkDict[dataset] = targets
s += 1
# store overlap target assignments:
elif option.tip == "OFF":
networkDict[dataset] = targets
s += 1
# define output files:
n_output = open(targetspath + "mapnetwork_build_" + option.peaks + "_" + option.name + ".txt", "w")
print >>n_output, "\t".join(["dataset", "target", "organism", "strain", "factor", "context", "institute", "method"])
# export network targets:
k = 0
print "Exporting target predictions..."
for dataset in sorted(networkDict.keys()):
if option.mapping == "OFF":
datasetID = metrn.labelGenerator(target=option.target, mode="label", dataset=dataset)
organism, strain, factor, context, institute, method = metrn.labelComponents(dataset)
else:
organism, strain, factor, context, institute, method = organismTag, mappingDict[dataset]["strain"], mappingDict[dataset]["factor"], mappingDict[dataset]["context"], mappingDict[dataset]["institute"], mappingDict[dataset]["method"]
datasetID = "_".join([organism, strain, factor, context, institute, method])
datasetID = metrn.labelGenerator(target=option.target, mode="label", dataset=datasetID)
for target in sorted(networkDict[dataset]):
print >>n_output, "\t".join([datasetID, target, organism, strain, factor, context, institute, method])
k += 1
# close output files:
n_output.close()
print "Loaded regulator-target interactions:", k
print "Input peak call files:", len(os.listdir(peakspath))
print "Input signal sources:", t
print "Stored signal sources:", s
print "Skipped sources:", len(skipped)
print
# resolve network mode:
elif option.mode == "resolve":
# determine path of input files:
networkpath = networkpath + option.peaks + "/" + option.name + "/"
targetspath = networkpath + "targets/"
mappingpath = networkpath + "mapping/"
general.pathGenerator(targetspath)
general.pathGenerator(mappingpath)
# load gene ids...
rna2gen_dict, gen2rna_dict = modencode.idBuild(inpath + metrn.reference[organismTag]["gene_ids"], metrn.reference[organismTag]["rna.link"], metrn.reference[organismTag]["gene.link"], mode="label", header=True, idUpper=flagDict["idUpper"], nameUpper=flagDict["nameUpper"])
rna2pro_dict, pro2rna_dict = modencode.idBuild(inpath + metrn.reference[organismTag]["gene_ids"], metrn.reference[organismTag]["rna.link"], metrn.reference[organismTag]["protein.link"], mode="label", header=True, idUpper=flagDict["idUpper"], nameUpper=flagDict["nameUpper"])
rna2sym_dict, sym2rna_dict = modencode.idBuild(inpath + metrn.reference[organismTag]["gene_ids"], metrn.reference[organismTag]["rna.link"], metrn.reference[organismTag]["symbol.link"], mode="label", header=True, idUpper=flagDict["idUpper"], nameUpper=flagDict["nameUpper"])
# load and process network:
networkfile = targetspath + "mapnetwork_build_" + option.peaks + "_" + option.name + ".txt"
n_output = open(targetspath + "mapnetwork_resolve_" + option.peaks + "_" + option.name + ".txt", "w")
print >>n_output, "\t".join(["dataset", "target", "organism", "strain", "factor", "context", "institute", "method", "symbol", "gene", "protein"])
inlines = open(networkfile).readlines()
inlines.pop(0)
for inline in inlines:
if inline.strip() != "":
dataset, target = inline.strip().split("\t")[:2]
if option.strip == "ON":
length = len(target.split("."))
query = ".".join(target.split(".")[:length-1])
else:
query = str(target)
if query in rna2gen_dict and query in rna2pro_dict:
gene = rna2gen_dict[query]
protein = rna2pro_dict[query]
symbol = rna2sym_dict[query]
print >>n_output, inline.strip() + "\t" + symbol + "\t" + gene + "\t" + protein
n_output.close()
# cascade network analysis:
elif option.mode == "cascade":
# determine path of input files:
networkpath = networkpath + option.peaks + "/" + option.name + "/"
targetspath = networkpath + "targets/"
mappingpath = networkpath + "mapping/"
cascadepath = networkpath + "cascade/"
general.pathGenerator(targetspath)
general.pathGenerator(mappingpath)
general.pathGenerator(cascadepath)
print
print "Loading network data..."
networkfile = targetspath + "mapnetwork_resolve_" + option.peaks + "_" + option.name + ".txt"
networkDict = general.build2(networkfile, i="factor", j="symbol", x="context", mode="matrix", skip=True, verbose=False)
networkHead = open(networkfile).readline()
# generate line-matching dictionary:
linesDict = dict()
headerDict = general.build_header_dict(networkfile)
inlines = open(networkfile).readlines()
inlines.pop(0)
for inline in inlines:
initems = inline.strip().split("\t")
factor, gene = initems[headerDict["factor"]], initems[headerDict["symbol"]]
if not factor in linesDict:
linesDict[factor] = dict()
linesDict[factor][gene] = inline.strip()
# initiate output files:
k_output = open(cascadepath + "mapnetwork_cascade_" + option.peaks + "_" + option.name + "_all.txt", "w")
m_output = open(cascadepath + "mapnetwork_cascade_" + option.peaks + "_" + option.name + "_com.txt", "w")
z_output = open(cascadepath + "mapnetwork_cascade_" + option.peaks + "_" + option.name + "_reg.txt", "w")
print >>k_output, networkHead.strip()
print >>m_output, networkHead.strip()
print >>z_output, networkHead.strip()
print "Scanning overlaps..."
k, m, z = 0, 0, 0
cascadeDict, regularDict = dict(), dict()
cellSets = os.listdir(cellspath + "cellset/" + option.mapping)
for factor in networkDict:
if factor in cellSets:
factorCells = open(cellspath + "cellset/" + option.mapping + "/" + factor).read().split("\n")
for gene in networkDict[factor]:
if gene in cellSets:
geneCells = open(cellspath + "cellset/" + option.mapping + "/" + gene).read().split("\n")
overlapCells = set(factorCells).intersection(set(geneCells))
if len(overlapCells) >= float(option.fraction)*len(geneCells) and len(factorCells) >= len(geneCells):
#print factor, gene, len(networkDict[factor])
if not factor in cascadeDict:
cascadeDict[factor] = dict()
cascadeDict[factor][gene] = networkDict[factor][gene]
print >>m_output, linesDict[factor][gene]
m += 1
if gene in networkDict:
#print factor, gene, len(networkDict[factor])
if not factor in regularDict:
regularDict[factor] = dict()
regularDict[factor][gene] = networkDict[factor][gene]
print >>z_output, linesDict[factor][gene]
z += 1
#print factor, gene, len(networkDict[factor])
print >>k_output, linesDict[factor][gene]
k += 1
# close output files:
k_output.close()
m_output.close()
z_output.close()
print
print "Possibles cascade interactions:", k
print "Supported cascade interactions:", m
print "Regulator cascade interactions:", z
print
# find common regulatory interactions mode:
elif option.mode == "commons":
# Note: This method searches for each factor, say UNC-62, all of the name-related targets across contexts.
# As such, this method will find that UNC-62 in the L4 stage binds to VIT-1, VIT-3, VIT-4 and VIT-5.
# determine path of input files:
networkpath = networkpath + option.peaks + "/" + option.name + "/"
targetspath = networkpath + "targets/"
mappingpath = networkpath + "mapping/"
summarypath = networkpath + "commons/summary/"
datasetpath = networkpath + "commons/dataset/"
general.pathGenerator(targetspath)
general.pathGenerator(mappingpath)
general.pathGenerator(summarypath)
general.pathGenerator(datasetpath)
# load input network:
print
print "Loading regulatory network..."
networkfile = targetspath + "mapnetwork_resolve_" + option.peaks + "_" + option.name + ".txt"
networkDict = general.build2(networkfile, i="dataset", j="symbol", x="context", mode="matrix", skip=True, verbose=False)
# load protein interaction network:
print "Loading protein interaction network..."
if option.a != "OFF":
i, j, x = option.a, option.b, option.b
ppiDict = general.build2(extraspath + option.mapping, i=i, j=j, x=x, mode="matrix")
# establish cutoff handle:
cutoffHandle = "_cut" + str(int(option.cutoff))
# prepare integrated network output:
integratedfile = summarypath + "mapnetwork_commons_" + option.peaks + "_" + option.name + cutoffHandle + "_network.txt"
i_output = open(integratedfile, "w")
print >>i_output, "\t".join(["source","type","target","factor","stage"])
# transfer interaction network:
proteins, missing = list(), 0
for source in ppiDict:
for target in ppiDict[source]:
proteins.append(source)
proteins.append(target)
if source in id2name_dict and target in id2name_dict:
source, target = id2name_dict[source], id2name_dict[target]
print >>i_output, "\t".join([source, "pp", target, source, "interaction"])
# tally missing proteins:
proteins = sorted(list(set(proteins)))
for protein in proteins:
if not protein in id2name_dict:
missing += 1
print "Protein missing:", round(100*float(missing)/len(proteins), 2), "%"
print
# find name-related targets network:
print "Finding name-related targets..."
commonsDict, sizeDict = dict(), dict()
for dataset in networkDict:
for target in networkDict[dataset]:
basename = target.split("-")[0]
stage = networkDict[dataset][target]
factor = dataset.replace("." + stage, "")
if not dataset in commonsDict:
commonsDict[dataset] = dict()
sizeDict[dataset] = dict()
if not basename in commonsDict[dataset]:
commonsDict[dataset][basename] = list()
commonsDict[dataset][basename].append(target)
sizeDict[dataset][basename] = len(commonsDict[dataset][basename])
# transfer regulatory network, applying basename cutoffs...
for dataset in sorted(commonsDict.keys()):
for basename in commonsDict[dataset]:
if sizeDict[dataset][basename] >= int(option.cutoff):
for target in commonsDict[dataset][basename]:
stage = networkDict[dataset][target]
factor = dataset.replace("." + stage, "")
print >>i_output, "\t".join([dataset, "pd", target, factor, stage])
# close integrated network output:
if option.a != "OFF":
i_output.close()
# find & export interesting candidates:
summaryfile = summarypath + "mapnetwork_commons_" + option.peaks + "_" + option.name + cutoffHandle + "_summary.txt"
f_output = open(summaryfile, "w")
print >>f_output, "\t".join(["dataset", "target.class", "target.count", "target.ids"])
print "Exporting summary and aggregates..."
for dataset in sorted(commonsDict.keys()):
#print "Processing:", dataset
datasetfile = datasetpath + "mapnetwork_commons_" + option.peaks + "_" + option.name + cutoffHandle + "_" + dataset + ".txt"
d_output = open(datasetfile, "w")
print >>d_output, "\t".join(["source", "target"])
for basename in sizeDict[dataset]:
if sizeDict[dataset][basename] >= int(option.cutoff):
#print dataset, sizeDict[dataset][basename], ",".join(commonsDict[dataset][basename])
print >>f_output, "\t".join(map(str, [dataset, basename, sizeDict[dataset][basename], ",".join(commonsDict[dataset][basename])]))
print >>d_output, "\t".join([dataset, basename])
for target in commonsDict[dataset][basename]:
print >>d_output, "\t".join([basename, target])
d_output.close()
#print
# close output summary file:
f_output.close()
print
# hybridize networks mode:
elif option.mode == "hybrid":
# determine path of input files:
comparisonpath = networkpath + "/comparison/"
general.pathGenerator(comparisonpath)
# collect species names:
aspecies, bspecies = option.species.split(",")
# define input network files:
ainfile = networkpath + option.a + "/targets/mapnetwork_resolve_" + option.a.replace("/", "_") + ".txt"
binfile = networkpath + option.b + "/targets/mapnetwork_resolve_" + option.b.replace("/", "_") + ".txt"
# identify target specie and comparison species:
speciesTags = option.species.split(",")
# define orthology path:
if option.orthology == "direct":
orthologypath = orthologspath + "orthologs/"
elif option.orthology == "family":
orthologypath = orthologspath + "families/"
# generate ortholog tag name:
orthologTag = metrn.orthologLabel(option.organism, speciesTags)
# generate orthology dictionary:
ortholog_dict = metrn.orthologBuilder(speciesTags, path=orthologypath, orthology=option.orthology, commonNames=option.commonNames, familyFiles=option.familyFiles, verbose="OFF")
# target specie orthologs:
if option.orthology == "direct":
aOrthologs = metrn.orthologFinder(aspecies, speciesTags, path=orthologspath + "orthologs/", orthology=option.orthology, commonNames=option.commonNames)
bOrthologs = metrn.orthologFinder(bspecies, speciesTags, path=orthologspath + "orthologs/", orthology=option.orthology, commonNames=option.commonNames)
elif option.orthology == "family":
aOrthologs = metrn.orthologFinder(aspecies, speciesTags, path=orthologspath + "families/", orthology=option.orthology, familyFiles=option.familyFiles)
bOrthologs = metrn.orthologFinder(bspecies, speciesTags, path=orthologspath + "families/", orthology=option.orthology, familyFiles=option.familyFiles)
print
print "Orthologs for " + aspecies.upper() + ":", str(len(aOrthologs))
print "Orthologs for " + bspecies.upper() + ":", str(len(bOrthologs))
print
# load ortholog and gene IDs:
print "Loading ortholog identifiers..."
aid2name_dict, aname2id_dict = geneConverter(aspecies, inpath=inpath, i="orth.link", j="symbol.link", nameUpper=flagDict["nameUpper"][aspecies], idUpper=flagDict["idUpper"][aspecies])
bid2name_dict, bname2id_dict = geneConverter(bspecies, inpath=inpath, i="orth.link", j="symbol.link", nameUpper=flagDict["nameUpper"][bspecies], idUpper=flagDict["idUpper"][bspecies])
#print btarget2id_dict.keys()[:5]
#print bid2target_dict.keys()[:5]
#pdb.set_trace()
# load network files:
print "Loading species networks..."
aNetwork = general.build2(ainfile, i="dataset", j="symbol", x="factor", mode="matrix", skip=True, verbose=False)
bNetwork = general.build2(binfile, i="dataset", j="symbol", x="factor", mode="matrix", skip=True, verbose=False)
# load ortholog mappings:
print "Mapping orthologs between species..."
aMapping, bMapping = orthologBuilder(aspecies, bspecies, extraspath + option.infile)
# scan network overlaps (make reverse networks):
# note: these network have gene-name keys but the targets are in list format...
print "Identifying network factors..."
aRevwork = dict()
for aDataset in aNetwork:
for aTarget in aNetwork[aDataset]:
aGene = aNetwork[aDataset][aTarget]
if not aGene in aRevwork:
aRevwork[aGene] = dict()
if not aDataset in aRevwork[aGene]:
aRevwork[aGene][aDataset] = list()
aRevwork[aGene][aDataset].append(aTarget)
bRevwork = dict()
for bDataset in bNetwork:
for bTarget in bNetwork[bDataset]:
bGene = bNetwork[bDataset][bTarget]
if not bGene in bRevwork:
bRevwork[bGene] = dict()
if not bDataset in bRevwork[bGene]:
bRevwork[bGene][bDataset] = list()
bRevwork[bGene][bDataset].append(bTarget)
t1, l1, l2, l3, l4 = list(), list(), list(), list(), list()
for aGene in aOrthologs:
aFactor = aname2id_dict[aGene]
if aGene in aRevwork:
t1.append(aGene)
# fix cases where the correct (mapping) ID isn't found:
if aFactor in aMapping:
l1.append(aGene)
else:
aMatches = list()
for aQuery in aMapping:
if aQuery in aid2name_dict and aGene == aid2name_dict[aQuery]:
aMatches.append(aQuery)
if aMatches != list():
aFactor = aMatches[0]
if len(aMatches) > 1:
print "Caution: More than matching ID found for", aGene, aFactor, aMatches
pdb.set_trace()
l1.append(aGene)
else:
l2.append(aGene)
# capture dataset global targets:
aGlobal = list()
for aDataset in aRevwork[aGene]:
aGlobal.extend(aRevwork[aGene][aDataset])
aGlobal = sorted(list(set(aGlobal)))
# check ortholog networks:
for bGene in ortholog_dict[aspecies][aGene][bspecies]:
if bGene in bRevwork:
# capture ortholog global targets:
bGlobal = list()
for bDataset in bRevwork[bGene]:
bGlobal.extend(bRevwork[bGene][bDataset])
bGlobal = sorted(list(set(bGlobal)))
# process dataset targets:
for aDataset in aRevwork[aGene]:
aTargets = aRevwork[aGene][aDataset]
aMatches = list()
aQueries = list()
for aTarget in aTargets:
if aTarget in aname2id_dict:
aMatches.append(aname2id_dict[aTarget])
if aname2id_dict[aTarget] in aMapping:
aQueries.extend(aMapping[aname2id_dict[aTarget]])
aQueries = sorted(list(set(aQueries)))
# process ortholog targets:
for bDataset in bRevwork[bGene]:
bTargets = bRevwork[bGene][bDataset]
bMatches = list()
bQueries = list()
for bTarget in bTargets:
if bTarget in bname2id_dict:
bMatches.append(bname2id_dict[bTarget])
if bname2id_dict[bTarget] in bMapping:
bQueries.extend(bMapping[bname2id_dict[bTarget]])
bQueries = sorted(list(set(bQueries)))
# determine overlaps:
aOverlap = set(aQueries).intersection(set(bMatches))
bOverlap = set(bQueries).intersection(set(aMatches))
if len(aQueries) != 0:
aQueryFraction = float(len(aOverlap))/len(aQueries)
aUnionFraction = float(len(aOverlap))/len(set(aQueries).union(set(bMatches)))
else:
aQueryFraction = 0
aUnionFraction = 0
if len(bQueries) != 0:
bQueryFraction = float(len(bOverlap))/len(bQueries)
bUnionFraction = float(len(bOverlap))/len(set(bQueries).union(set(aMatches)))
else:
bQueryFraction = 0
bUnionFraction = 0
#print aGene, len(aTargets)
#print bGene, len(bTargets)
#print len(aQueries), len(bQueries)
#print len(aOverlap), len(bOverlap)
#pdb.set_trace()
# export data:
output = [aDataset, bDataset, aGene, bGene, len(aMatches), len(bMatches), len(aQueries), len(bQueries), len(aOverlap), round(aQueryFraction, 3), round(aUnionFraction, 3), len(bOverlap), round(bQueryFraction, 3), round(bUnionFraction, 3)]
print "\t".join(map(str, output))
# print aGene, aDataset, aFactor, bFactor
# pdb.set_trace()
"""
for bFactor in aMapping[aFactor]:
if bFactor in bid2name_dict:
bGene = bid2name_dict[bFactor]
if bGene in bOrthologs:
# now we have two orthologs, in their networks:
if bGene in bRevwork:
#print aGene, aFactor, bGene, bFactor
#pdb.set_trace()
for aDataset in aRevwork[aGene]:
aTargets = aRevwork[aGene][aDataset]
aTargets = sorted(list(set(aTargets)))
aMatchs, aMissed = list(), list()
for aTarget in aTargets:
if aTarget in atarget2id_dict:
aMatchs.append(atarget2id_dict[aTarget])
else:
aMissed.append(aTarget)
aMapped, aUnmaps = list(), list()
for aMatch in aMatchs:
if aMatch in aMapping:
aMapped.extend(aMapping[aMatch])
else:
aUnmaps.append(aMatch)
aMatchs = sorted(list(set(aMatchs)))
aMissed = sorted(list(set(aMissed)))
aMapped = sorted(list(set(aMapped)))
aUnmaps = sorted(list(set(aUnmaps)))
for bDataset in bRevwork[bGene]:
bTargets = bRevwork[bGene][bDataset]
bTargets = sorted(list(set(bTargets)))
bMatchs, bMissed = list(), list()
for bTarget in bTargets:
if bTarget in btarget2id_dict:
bMatchs.append(btarget2id_dict[bTarget])
else:
bMissed.append(bTarget)
bMatchs = sorted(list(set(bMatchs)))
bMissed = sorted(list(set(bMissed)))
Overlap = sorted(list(set(aMapped).intersection(set(bMatchs))))
Fraction = float(len(Overlap))/len(aMapped)
Normaled = float(len(Overlap))/len(set(aMapped).union(set(bMatchs)))
#print aDataset, len(aTargets), len(aMatchs), len(aMapped), len(aUnmaps)
#print bDataset, len(bTargets), len(bMatchs), len(bMissed)
#print Overlap
#pdb.set_trace()
print aDataset, bDataset, aGene, bGene, len(aMatchs), len(bMatchs), len(aMapped), len(Overlap), round(Fraction, 3), round(Normaled, 3)
"""
"""
else:
l4.append(bGene)
else:
l3.append(bFactor)
else:
l2.append(aFactor)
else:
l1.append(aGene)
"""
print "Target orthologs:", t1
print "Orthologs in network:", len(l1)
print "Orthologs not mapped:", len(l2)
print "...match name missing:", len(l3)
print "...match not assayed:", len(l4)
#for aDataset in aRevwork[aGene]:
#for aTarget in aNetwork[aDataset]:
# aFactor = aNetwork[aDataset][aTarget]
# if aFactor in aname2id_dict:
# aGene = aname2id_dict[aFactor]
# aTarget = aid2target_dict[aGene]
# if aGene in aOrthologs:
# for bGene in aOrthologs[aGene]:
# print aDataset, aFactor, aGene, bGene
# pdb.set_trace()
#a1 = aNetwork.keys()[0]
#a2 = aNetwork[a1].keys()[0]
#print a1, a2, aNetwork[a1][a2]
#pdb.set_trace()
if __name__ == "__main__":
main()
print "Completed:", time.asctime(time.localtime())
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a Bokeh Application Handler to build up documents by compiling
and executing Python source code.
This Handler is used by the Bokeh server command line tool to build
applications that run off scripts and notebooks.
.. code-block:: python
def make_doc(doc):
# do work to modify the document, add plots, widgets, etc.
return doc
app = Application(FunctionHandler(make_doc))
server = Server({'/bkapp': app}, io_loop=IOLoop.current())
server.start()
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import sys
# External imports
# Bokeh imports
from ...io.doc import set_curdoc, curdoc
from .code_runner import CodeRunner
from .handler import Handler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class CodeHandler(Handler):
''' Run source code which modifies a Document
'''
# These functions, if present in the supplied code, will be monkey patched
# to be no-ops, with a warning.
_io_functions = ['output_notebook', 'output_file', 'show', 'save', 'reset_output']
def __init__(self, *args, **kwargs):
'''
Args:
source (str) : python source code
filename (str) : a filename to use in any debugging or error output
argv (list[str], optional) : a list of string arguments to make
available as ``sys.argv`` when the code executes
'''
super(CodeHandler, self).__init__(*args, **kwargs)
if 'source' not in kwargs:
raise ValueError('Must pass source to CodeHandler')
source = kwargs['source']
if 'filename' not in kwargs:
raise ValueError('Must pass a filename to CodeHandler')
filename = kwargs['filename']
argv = kwargs.get('argv', [])
self._runner = CodeRunner(source, filename, argv)
self._loggers = {}
for f in CodeHandler._io_functions:
self._loggers[f] = self._make_io_logger(f)
# Properties --------------------------------------------------------------
@property
def error(self):
''' If the handler fails, may contain a related error message.
'''
return self._runner.error
@property
def error_detail(self):
''' If the handler fails, may contain a traceback or other details.
'''
return self._runner.error_detail
@property
def failed(self):
''' ``True`` if the handler failed to modify the doc
'''
return self._runner.failed
@property
def safe_to_fork(self):
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if the code has already been executed.
'''
return not self._runner.ran
# Public methods ----------------------------------------------------------
def modify_document(self, doc):
'''
'''
if self.failed:
return
module = self._runner.new_module()
# One reason modules are stored is to prevent the module
# from being gc'd before the document is. A symptom of a
# gc'd module is that its globals become None. Additionally
# stored modules are used to provide correct paths to
# custom models resolver.
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
set_curdoc(doc)
old_io = self._monkeypatch_io()
try:
def post_check():
newdoc = curdoc()
# script is supposed to edit the doc not replace it
if newdoc is not doc:
raise RuntimeError("%s at '%s' replaced the output document" % (self._origin, self._runner.path))
self._runner.run(module, post_check)
finally:
self._unmonkeypatch_io(old_io)
set_curdoc(old_doc)
def url_path(self):
''' The last path component for the basename of the configured filename.
'''
if self.failed:
return None
else:
# TODO should fix invalid URL characters
return '/' + os.path.splitext(os.path.basename(self._runner.path))[0]
# Private methods ---------------------------------------------------------
# subclassess must define self._logger_text
def _make_io_logger(self, name):
def logger(*args, **kwargs):
log.info(self._logger_text , self._runner.path, name)
return logger
# monkeypatching is a little ugly, but in this case there's no reason any legitimate
# code should be calling these functions, and we're only making a best effort to
# warn people so no big deal if we fail.
def _monkeypatch_io(self):
import bokeh.io as io
old = {}
for f in CodeHandler._io_functions:
old[f] = getattr(io, f)
setattr(io, f, self._loggers[f])
return old
def _unmonkeypatch_io(self, old):
import bokeh.io as io
for f in old:
setattr(io, f, old[f])
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
from django.contrib.auth import get_user_model
from django.test import override_settings
from django.urls import reverse
from django.utils import encoding
from example.tests import TestBase
class ModelViewSetTests(TestBase):
"""
Test usage with ModelViewSets, also tests pluralization, camelization,
and underscore.
[<RegexURLPattern user-list ^identities/$>,
<RegexURLPattern user-detail ^identities/(?P<pk>[^/]+)/$>]
"""
list_url = reverse("user-list")
def setUp(self):
super().setUp()
self.detail_url = reverse("user-detail", kwargs={"pk": self.miles.pk})
def test_key_in_list_result(self):
"""
Ensure the result has a 'user' key since that is the name of the model
"""
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, 200)
user = get_user_model().objects.all()[0]
expected = {
"data": [
{
"type": "users",
"id": encoding.force_str(user.pk),
"attributes": {
"first-name": user.first_name,
"last-name": user.last_name,
"email": user.email,
},
}
],
"links": {
"first": "http://testserver/identities?page%5Bnumber%5D=1",
"last": "http://testserver/identities?page%5Bnumber%5D=2",
"next": "http://testserver/identities?page%5Bnumber%5D=2",
"prev": None,
},
"meta": {"pagination": {"page": 1, "pages": 2, "count": 2}},
}
assert expected == response.json()
def test_page_two_in_list_result(self):
"""
Ensure that the second page is reachable and is the correct data.
"""
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.get(self.list_url, {"page[number]": 2})
self.assertEqual(response.status_code, 200)
user = get_user_model().objects.all()[1]
expected = {
"data": [
{
"type": "users",
"id": encoding.force_str(user.pk),
"attributes": {
"first-name": user.first_name,
"last-name": user.last_name,
"email": user.email,
},
}
],
"links": {
"first": "http://testserver/identities?page%5Bnumber%5D=1",
"last": "http://testserver/identities?page%5Bnumber%5D=2",
"next": None,
"prev": "http://testserver/identities?page%5Bnumber%5D=1",
},
"meta": {"pagination": {"page": 2, "pages": 2, "count": 2}},
}
assert expected == response.json()
def test_page_range_in_list_result(self):
"""
Ensure that the range of a page can be changed from the client,
tests pluralization as two objects means it converts ``user`` to
``users``.
"""
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.get(self.list_url, {"page[size]": 2})
self.assertEqual(response.status_code, 200)
users = get_user_model().objects.all()
expected = {
"data": [
{
"type": "users",
"id": encoding.force_str(users[0].pk),
"attributes": {
"first-name": users[0].first_name,
"last-name": users[0].last_name,
"email": users[0].email,
},
},
{
"type": "users",
"id": encoding.force_str(users[1].pk),
"attributes": {
"first-name": users[1].first_name,
"last-name": users[1].last_name,
"email": users[1].email,
},
},
],
"links": {
"first": "http://testserver/identities?page%5Bnumber%5D=1&page%5Bsize%5D=2",
"last": "http://testserver/identities?page%5Bnumber%5D=1&page%5Bsize%5D=2",
"next": None,
"prev": None,
},
"meta": {"pagination": {"page": 1, "pages": 1, "count": 2}},
}
assert expected == response.json()
def test_key_in_detail_result(self):
"""
Ensure the result has a 'user' key.
"""
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.get(self.detail_url)
self.assertEqual(response.status_code, 200)
expected = {
"data": {
"type": "users",
"id": encoding.force_str(self.miles.pk),
"attributes": {
"first-name": self.miles.first_name,
"last-name": self.miles.last_name,
"email": self.miles.email,
},
}
}
assert expected == response.json()
def test_patch_requires_id(self):
"""
Verify that 'id' is required to be passed in an update request.
"""
data = {
"data": {"type": "users", "attributes": {"first-name": "DifferentName"}}
}
response = self.client.patch(self.detail_url, data=data)
self.assertEqual(response.status_code, 400)
def test_patch_requires_correct_id(self):
"""
Verify that 'id' is the same then in url
"""
data = {
"data": {
"type": "users",
"id": self.miles.pk + 1,
"attributes": {"first-name": "DifferentName"},
}
}
response = self.client.patch(self.detail_url, data=data)
self.assertEqual(response.status_code, 409)
def test_key_in_post(self):
"""
Ensure a key is in the post.
"""
self.client.login(username="miles", password="pw")
data = {
"data": {
"type": "users",
"id": encoding.force_str(self.miles.pk),
"attributes": {
"first-name": self.miles.first_name,
"last-name": self.miles.last_name,
"email": "miles@trumpet.org",
},
}
}
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.put(self.detail_url, data=data)
assert data == response.json()
# is it updated?
self.assertEqual(
get_user_model().objects.get(pk=self.miles.pk).email, "miles@trumpet.org"
)
def test_404_error_pointer(self):
self.client.login(username="miles", password="pw")
not_found_url = reverse("user-detail", kwargs={"pk": 12345})
errors = {
"errors": [{"detail": "Not found.", "status": "404", "code": "not_found"}]
}
response = self.client.get(not_found_url)
assert 404 == response.status_code
assert errors == response.json()
|
|
import os
import math, random
from PIL import Image
import warnings
import json
from django.db import connection
# Disable the warnings for giant images
warnings.simplefilter('ignore', Image.DecompressionBombWarning)
class Last:
pass
def phorzvert_layout(project, frame=None):
# sort the images by size, largest first where size is the area
files = sorted(project.files.all(), key=lambda x: x.width * x.height, reverse=True)
last_file = Last()
last_file.x, last_file.y = 0, 0
last_file.width, last_file.height = 0,0
order = ['t', 'r', 'b', 'l'] # top, right, bottom, left
canvas_width, canvas_height = 0, 0
new_files = []
while files:
file = files.pop()
if not order:
order = ['t', 'r', 'b', 'l']
new_order = order.pop(0)
if new_order == 't':
file.x = last_file.x
file.y = last_file.y - file.height
last_file.y -= file.height
elif new_order == 'r':
file.x = last_file.x + last_file.width
file.y = last_file.y
elif new_order == 'b':
file.x = last_file.x
file.y = last_file.y + last_file.height
elif new_order == 'l':
file.x = last_file.x - file.width
file.y = last_file.y
last_file.x -= file.width
if new_order in ('t', 'b'):
last_file.height += file.height
if new_order in ('r', 'l'):
last_file.width += file.width
new_files.append(file)
# And save all the modified attributes
for f in new_files:
f.new_width, f.new_height = f.width, f.height
cursor = connection.cursor()
cursor.execute('UPDATE metabotnik_file SET x = %s, y = %s, new_width = %s, new_height = %s WHERE id = %s',
(f.x, f.y, f.new_width, f.new_height, f.pk))
project.metabotnik_width = last_file.width
project.metabotnik_height = last_file.height
project.save()
def layout(project, frame=0):
width = MAX_WIDTH + 1
height = MAX_HEIGHT + 1
iterations = 0
scale_factor = 1
while (width > MAX_WIDTH) or (height > MAX_HEIGHT):
data = horzvert_layout(project, frame=frame, scale_factor=scale_factor)
iterations += 1
if iterations > 10:
raise Exception("Layout iterations exceeds 10")
scale_factor -= 0.1
if scale_factor <= 0:
raise Exception("Layout scale_fator <= 0")
width = data.get('width', 0)
height = data.get('height', 0)
return data
def horzvert_layout(project, frame=0, scale_factor=1):
'''Do the layout and produce a usable dict output that can be persisted with the Project.
We used to save these as attributes in the File objects.
'''
# Allow overrriding the row_height by having a paramater passed in
files = list(project.files.all())
if len(files) < 1:
return {}
if project.layout_mode == 'horizontal':
stripe_height = int(max(f.height for f in files) * scale_factor)
if frame == 'slide':
frame = stripe_height / 2
stripe_height += frame*2
if project.layout_mode.startswith('vertical'):
stripe_width = int(max(f.width for f in files) * scale_factor)
if frame == 'slide':
frame = stripe_width / 2
stripe_width += frame*2
# If a frame was passed in, adjust the x,y of all items to give them that much spacing as a frame
try:
frame = int(frame)
except ValueError:
frame = 0
# Calculate a new width/height for the files
# based on making them all the same height
for f in files:
if project.layout_mode == 'horizontal':
f.new_height = stripe_height
if f.height != stripe_height:
ratio = float(f.width)/float(f.height)
f.new_width = int(stripe_height*ratio)
else:
f.new_width = f.width
elif project.layout_mode.startswith('vertical'):
f.new_width = stripe_width
if f.width != stripe_width:
ratio = float(f.height)/float(f.width)
f.new_height = int(stripe_width*ratio)
else:
f.new_height = f.height
else:
f.new_width = f.width
f.new_height = f.height
# Given the files, how many should there be per row
# and how wide should a row be?
# calc_row_width_height
count_per_stripe = int(round(math.sqrt(len(files))))
average_width = sum(f.new_width for f in files) / len(files)
average_height = sum(f.new_height for f in files) / len(files)
if project.layout_mode == 'horizontal':
stripe_size = count_per_stripe * (average_width+frame*count_per_stripe)
stripe_width = stripe_size
elif project.layout_mode.startswith('vertical'):
stripe_size = count_per_stripe * (average_height+frame*count_per_stripe)
stripe_height = stripe_size
else:
stripe_size = count_per_stripe * average_height
stripe_width = stripe_height = stripe_size
# Make the stripes by calculating an offset for where the
# images should be placed
new_files = []
stripe_idx, stripes = 0, []
x,y = 0,0
thefile = None
cur_size = 0
if len(files) == 1:
margin = stripe_size+1
else:
margin = stripe_size*0.965
while files or thefile:
if not thefile:
thefile = files.pop(0) # just feels wrong to name it 'file'
new_files.append(thefile)
if project.layout_mode == 'horizontal':
if (cur_size + thefile.new_width) < margin:
thefile.x = x
thefile.y = y
thefile.stripe = stripe_idx
x += thefile.new_width
cur_size += thefile.new_width
dontfit = True if thefile.is_break else False
thefile = None
x += frame
else:
dontfit = True
if dontfit and (cur_size > 0):
stripes.append(cur_size)
stripe_idx += 1
cur_size = 0
x = 0
y += stripe_height
y += frame
elif project.layout_mode.startswith('vertical'):
if ((cur_size + thefile.new_height) < margin):
thefile.x = x
thefile.y = y
thefile.stripe = stripe_idx
y += thefile.new_height
cur_size += thefile.new_height
dontfit = True if thefile.is_break else False
thefile = None
y += frame
else:
dontfit = True
if dontfit and (cur_size > 0):
stripes.append(cur_size)
stripe_idx += 1
cur_size = 0
y = 0
x += stripe_width
x += frame
else:
thefile.x = random.randint(0, stripe_width-thefile.width)
thefile.y = random.randint(0, stripe_height-thefile.height)
thefile = None
if len(stripes) < (stripe_idx+1):
stripes.append(cur_size)
if project.layout_mode == 'horizontal':
# In horizontal project.layout_mode, each stripe has an actual width that is less than the stripe_width
# To make the layout nicely centered, adjust each x with an offset.
for f in new_files:
offset = (stripe_width - stripes[f.stripe]) / 2
f.x = f.x+offset
canvas_width = stripe_width
canvas_height = stripe_height * len(stripes)
elif project.layout_mode == 'vertical':
for f in new_files:
offset = (stripe_height - stripes[f.stripe]) / 2
f.y = f.y+offset
canvas_width = stripe_width * len(stripes)
canvas_height = stripe_height
elif project.layout_mode == 'verticaltop':
canvas_width = stripe_width * len(stripes)
canvas_height = stripe_height
else:
canvas_width = stripe_width
canvas_height = stripe_height
data = {
'version':1,
'width': canvas_width,
'height': canvas_height,
'background_color': project.background_color,
'images': []
}
# And save all the modified attributes
for f in new_files:
random_colour = '%x' % random.randint(0,180)
tmp = { 'pk':f.pk, 'filename':f.filename, 'fill_style': '#%s' % (random_colour*3),
'x': f.x,
'y': f.y,
'width': f.new_width,
'height': f.new_height,
'metadata': f.metadata and json.loads(f.metadata) or {},
}
data['images'].append( tmp )
return data
MAX_WIDTH = 65000
MAX_HEIGHT = 65000
def make_bitmap(project, filepath):
'Given the layout coordinates for @project, generate a bitmap and save it under @filename'
# Make the gigantic bitmap, if it is too large try and scale down the size using horzvert_layout iteratively
layout_data = project.layout_as_dict()
if not layout_data:
layout_data = horzvert_layout(project)
if layout_data['width'] > 65000:
raise Exception('Width %s is > %s' % (layout_data['width'], MAX_WIDTH))
if layout_data['height'] > 65000:
raise Exception('Height %s is > %s' % (layout_data['height'], MAX_HEIGHT))
msgs = []
large = Image.new('RGBA', (layout_data['width'], layout_data['height']), color=layout_data['background_color'])
for f in layout_data.get('images', []):
try:
img = Image.open(os.path.join(project.originals_path, f['filename']))
i_width, i_height = img.size
if i_width != f['width'] or i_height != f['height']:
img = img.resize((f['width'], f['height']), Image.ANTIALIAS)
except IOError:
msgs.append('Problem with %s' % f['filename'])
continue
if img.mode == 'RGBA':
large.paste(img, (f['x'], f['y']), img)
else:
large.paste(img, (f['x'], f['y']))
large.save(filepath)
return msgs
|
|
# Copyright 2007 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import re
TOKEN_KEYWORD = 1
TOKEN_NAME = 2
TOKEN_PUNCTUATION = 3
TOKEN_COMMENT = 4
TOKEN_STRING = 5
TOKEN_CONTINUATION = 6
TOKEN_NUMBER = 7
TOKEN_JUNK = 8
TOKEN_LPAREN = 9
TOKEN_RPAREN = 10
TOKEN_LSQB = 11
TOKEN_RSQB = 11
TOKEN_LBRACE = 12
TOKEN_RBRACE = 13
TOKEN_BACKQUOTE = 14
TOKEN_COLON = 15
TOKEN_DOT = 16
TOKEN_EQUAL = 17
TOKEN_AUGEQUAL = 18
TOKEN_BUILTIN_CONSTANT = 19
FLAG_OPEN = 1
FLAG_CLOSE = 2
_KEYWORDS = set([ 'and', 'as', 'assert', 'break', 'class', 'continue', 'def',
'del', 'elif', 'else', 'except', 'exec', 'finally', 'for',
'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'not',
'or', 'pass', 'print', 'raise', 'return', 'try', 'while',
'with', 'yield' ])
_IDENTIFIER_TOKENS = {
'None' : TOKEN_BUILTIN_CONSTANT,
'True' : TOKEN_BUILTIN_CONSTANT,
'False' : TOKEN_BUILTIN_CONSTANT
}
_PUNCTUATION_TOKENS = {
'(' : TOKEN_LPAREN,
')' : TOKEN_RPAREN,
'[' : TOKEN_LSQB,
']' : TOKEN_RSQB,
'{' : TOKEN_LBRACE,
'}' : TOKEN_RBRACE,
'`' : TOKEN_BACKQUOTE,
':' : TOKEN_COLON,
'=' : TOKEN_EQUAL,
'+=' : TOKEN_AUGEQUAL,
'-=' : TOKEN_AUGEQUAL,
'*=' : TOKEN_AUGEQUAL,
'/=' : TOKEN_AUGEQUAL,
'%=' : TOKEN_AUGEQUAL,
'&=' : TOKEN_AUGEQUAL,
'|=' : TOKEN_AUGEQUAL,
'^=' : TOKEN_AUGEQUAL,
'<<=' : TOKEN_AUGEQUAL,
'>>=' : TOKEN_AUGEQUAL,
'**=' : TOKEN_AUGEQUAL,
'//=' : TOKEN_AUGEQUAL
}
_PUNCTUATION_MATCH = {
')' : '(',
']' : '[',
'}' : '{',
}
_TOKENIZE_RE = re.compile(r"""
# Operators and delimeters
(?P<punctuation>
[@,:`;~\(\)\[\]\{\}] |
[+%&|^-]=? |
\*(?:\*=|\*|=|) |
/(?:/=|/|=|) |
<(?:<=|<|=|>|) |
>(?:>=|>|=|) |
=(?:=|) |
!=) |
(?P<comment> \#.*) | # Comment
(?P<identifier> [A-Za-z_][A-Za-z0-9_]*) | # Identifier
(?P<string>
(?:[rR][uU]?|[uU][rR]?|)
(?P<stringcore>
(?: '''(?:\\.|[^'\\]|'(?!''))*(?:'''|(?=\\$)|$)) | # String delimited with '''
(?: \"""(?:\\.|[^"\\]|"(?!""))*(?:\"""|(?=\\$)|$)) | # String delimited with \"""
(?: '(?:\\.|[^'\\])*(?:'|(?=\\$)|$)) | # String delimited with '
(?: "(?:\\.|[^"\\])*(?:"|(?=\\$)|$)) # String delimited with "
)
) |
(?P<continuation> \\) | # Line continuation
# A "number-like", possibly invalid expression
(?P<number>
0[Xx][0-9A-Za-z_]* |
(?: [0-9] | \.[0-9] )
[0-9.]*
(?: [eE][+-]? [0-9A-Za-z_.]* |
[0-9A-Za-z_.]* )
) |
(?P<dot> \.) | # isolated .
(?P<white> \s+) | # whitespace
(?P<notvalid> [^\s!-\#%->@-~]+) | # Not-valid outside of a string... needs to
# be + not +? to avoid splitting UTF-8
(?P<junk> .+?) # Other junk (vacuum anything notmatched)
""", re.VERBOSE)
_CLOSE_STRING_RE = {
"'''": re.compile(r"(?:\\.|[^\'\\]|\'(?!\'\'))*(?:(\'\'\')|(?=\\$)|$)"),
'"""': re.compile(r"(?:\\.|[^\"\\]|\"(?!\"\"))*(?:(\"\"\")|(?=\\$)|$)"),
"'": re.compile(r"(?:\\.|[^\'\\])*(?:(\')|(?=\\$)|$)"),
'"': re.compile(r"(?:\\.|[^\"\\])*(?:(\")|(?=\\$)|$)")
}
# A valid number; the idea is that when tokenizing, we want to keep
# together sequences like 0junk or 0e+a and then mark them as entirely
# invalid rather than breaking them into a "valid" number and a "valid"
# part after that
_NUMBER_RE = re.compile(r"""
^(?:
0j? | # 0 (or complex)
0[Xx][0-9A-Fa-f_]* | # Hex
0[0-7]+ | # Octal
(?:[1-9][0-9]*|0)j? | # Decimal (or complex)
(?:(?:[0-9]*\.[0-9]+|[0-9]+\.?)[eE][+-]?[0-9]+ | # Floating point (or complex)
[0-9]*\.[0-9]+|[0-9]+\.)j?
)$
""", re.VERBOSE)
def tokenize_line(str, stack=None):
if (stack == None):
stack = []
else:
stack = list(stack)
tokens = []
pos = 0
if len(stack) > 0:
if stack[-1] in _CLOSE_STRING_RE:
delim = stack[-1]
match = _CLOSE_STRING_RE[delim].match(str)
assert(match)
flags = 0
if match.group(1):
flags |= FLAG_CLOSE
stack.pop()
tokens.append((TOKEN_STRING, match.start(), match.end(), flags))
pos = match.end()
l = len(str)
while pos < l:
match = _TOKENIZE_RE.match(str, pos)
assert(match)
# print repr(match.group()), match.span(), match.groupdict()
if not match.group('white'):
flags = 0
token_type = None
if match.group('punctuation'):
token_type = TOKEN_PUNCTUATION
s = match.group()
if s in _PUNCTUATION_TOKENS:
token_type = _PUNCTUATION_TOKENS[s]
if token_type == TOKEN_BACKQUOTE:
if len(stack) > 0 and stack[-1] == "`":
flags |= FLAG_CLOSE
stack.pop()
else:
flags |= FLAG_OPEN
stack.append("`")
elif s in _PUNCTUATION_MATCH:
if len(stack) > 0 and stack[-1] == _PUNCTUATION_MATCH[s]:
flags |= FLAG_CLOSE
stack.pop()
else:
token_type = TOKEN_JUNK
elif token_type == TOKEN_LPAREN or token_type == TOKEN_LSQB or token_type == TOKEN_LBRACE:
flags |= FLAG_OPEN
stack.append(s)
elif match.group('identifier'):
s = match.group()
if s in _KEYWORDS:
token_type = TOKEN_KEYWORD
elif s in _IDENTIFIER_TOKENS:
token_type = _IDENTIFIER_TOKENS[s]
else:
token_type = TOKEN_NAME
elif match.group('number'):
s = match.group()
if _NUMBER_RE.match(s):
token_type = TOKEN_NUMBER
else:
token_type = TOKEN_JUNK
elif match.group('string'):
token_type = TOKEN_STRING
core = match.group('stringcore')
if core.startswith('"""'):
delim = '"""'
elif core.startswith("'''"):
delim = "'''"
elif core.startswith("'"):
delim = "'"
else:
delim = '"'
if len(core) == len(delim) or \
not core.endswith(delim) or \
(core[len(core)-len(delim)-1] == '\\' and
core[len(core)-len(delim)-2] != '\\'):
flags |= FLAG_OPEN
stack.append(delim)
elif match.group('dot'):
token_type = TOKEN_DOT
elif match.group('comment'):
token_type = TOKEN_COMMENT
elif match.group('continuation'):
token_type = TOKEN_CONTINUATION
elif match.group('notvalid') or match.group('junk'):
token_type = TOKEN_JUNK
tokens.append((token_type, match.start(), match.end(), flags))
pos = match.end()
# Catch an unterminated, uncontinued short string, and don't leave it on the stack
# Would be nice to indicate an error here somehow, but I'm not sure how
if len(stack) > 0 and (stack[-1] == "'" or stack[-1] == '"') and \
(len(tokens) == 0 or tokens[-1][0] != TOKEN_CONTINUATION):
token_type, start, end, flags = tokens[-1]
flags &= ~FLAG_OPEN
tokens[-1] = (token_type, start, end, flags)
stack.pop()
return (tokens, stack)
if __name__ == '__main__':
import sys
failed = False
def expect(str, expected_tokens, in_stack=[], expected_stack=[]):
tokens, stack = tokenize_line(str, stack=in_stack)
result = [(token[0], str[token[1]:token[2]]) for token in tokens]
success = True
if len(tokens) == len(expected_tokens):
for (t, e) in zip(result, expected_tokens):
if t != e:
success = False
break
else:
success = False
if not success:
print "For %s, got %s, expected %s" % (repr(str), result, expected_tokens)
failed = True
if stack != expected_stack:
print "For %s, in_stack=%s, got out_stack=%s, expected out_stack=%s" % (repr(str), in_stack, stack, expected_stack)
failed = True
expect('@', [(TOKEN_PUNCTUATION, '@')])
expect('(', [(TOKEN_LPAREN, '(')], expected_stack=['('])
expect('<<', [(TOKEN_PUNCTUATION, '<<')])
expect('<<=', [(TOKEN_AUGEQUAL, '<<=')])
expect('<<>', [(TOKEN_PUNCTUATION, '<<'), (TOKEN_PUNCTUATION, '>')])
expect("#foo", [(TOKEN_COMMENT, "#foo")])
expect("1 #foo", [(TOKEN_NUMBER, "1"), (TOKEN_COMMENT, "#foo")])
expect("abc", [(TOKEN_NAME, "abc")])
expect("if", [(TOKEN_KEYWORD, "if")])
expect("'abc'", [(TOKEN_STRING, "'abc'")])
expect(r"'a\'bc'", [(TOKEN_STRING, r"'a\'bc'")])
expect(r"'abc", [(TOKEN_STRING, r"'abc")])
expect("'abc\\", [(TOKEN_STRING, "'abc"), (TOKEN_CONTINUATION, "\\")], expected_stack=["'"])
expect('"""foo"', [(TOKEN_STRING, '"""foo"')], expected_stack=['"""'])
expect("'''foo'", [(TOKEN_STRING, "'''foo'")], expected_stack=["'''"])
expect('0x0', [(TOKEN_NUMBER, '0x0')])
expect('1', [(TOKEN_NUMBER, '1')])
expect('1.e3', [(TOKEN_NUMBER, '1.e3')])
expect('.1e3', [(TOKEN_NUMBER, '.1e3')])
expect('1.1e3', [(TOKEN_NUMBER, '1.1e3')])
expect('1.1e+3', [(TOKEN_NUMBER, '1.1e+3')])
expect('1.1e0+3', [(TOKEN_NUMBER, '1.1e0'), (TOKEN_PUNCTUATION, '+'), (TOKEN_NUMBER, '3')])
expect('.', [(TOKEN_DOT, '.')])
expect('a.b', [(TOKEN_NAME, 'a'), (TOKEN_DOT, '.'), (TOKEN_NAME, 'b')])
expect('1a', [(TOKEN_JUNK, '1a')])
# Check that literal UTF-8 gets parsed correctly as a single token instead of split up
expect('\xc3\xa4', [(TOKEN_JUNK, '\xc3\xa4')])
# But doens't swallow up trailing valid characters
expect('\xc3\xa4foo', [(TOKEN_JUNK, '\xc3\xa4'), (TOKEN_NAME, "foo")])
# Stack tests
expect('()', [(TOKEN_LPAREN, '('), (TOKEN_RPAREN, ')')])
expect('}', [(TOKEN_JUNK, '}')])
expect('(})', [(TOKEN_LPAREN, '('), (TOKEN_JUNK, '}'), (TOKEN_RPAREN, ')')])
expect('`', [(TOKEN_BACKQUOTE, '`')], expected_stack=['`'])
expect('``', [(TOKEN_BACKQUOTE, '`'), (TOKEN_BACKQUOTE, '`')])
# Unterminated single line strings don't contribute to the stack
expect('"', [(TOKEN_STRING, '"')], expected_stack=[])
expect(r'"abc\"', [(TOKEN_STRING, r'"abc\"')])
expect('"""foo""" """bar', [(TOKEN_STRING, '"""foo"""'), (TOKEN_STRING, '"""bar')], expected_stack=['"""'])
# Testing starting with an open string
expect('"', [(TOKEN_STRING, '"')], in_stack=['"'])
expect('\\"', [(TOKEN_STRING, '\\"')], in_stack=['"'])
expect('\\"" 1', [(TOKEN_STRING, '\\""'), (TOKEN_NUMBER, '1')], in_stack=['"'])
expect("'", [(TOKEN_STRING, "'")], in_stack=["'"])
expect('foo"""', [(TOKEN_STRING, 'foo"""')], in_stack=['"""'])
expect('foo', [(TOKEN_STRING, 'foo')], in_stack=['"""'], expected_stack=['"""'])
expect('foo"', [(TOKEN_STRING, 'foo"')], in_stack=['"""'], expected_stack=['"""'])
expect("foo'''", [(TOKEN_STRING, "foo'''")], in_stack=["'''"])
expect('foo', [(TOKEN_STRING, 'foo')], in_stack=["'''"], expected_stack=["'''"])
expect("foo'", [(TOKEN_STRING, "foo'")], in_stack=["'''"], expected_stack=["'''"])
if failed:
sys.exit(1)
else:
sys.exit(0)
|
|
"""
Adds rasters identifed in a comma delimited text file to a gdal VRT
and build a mosaic raster. The text file is produced by running the
script "find_raster_path.py". The rasters can be reprojected, reclassed,
copied to a new directory and renamed based on a field in the text file.
"""
from __future__ import print_function
import os
import csv
import subprocess
import string
import numpy
from collections import defaultdict
from operator import itemgetter
from osgeo import ogr
from osgeo import gdal
from osgeo import gdal_merge
# ouput csv header
header = ["STATUS", "NAME", "PATH", "PROJECT", "YEAR", "PROJ", "QUAD", "NAMECLEAN", "NAMENEW", "FORMAT"]
status_col = 0
order_col = 1
path_col = 2
year_col = 4
name_col = 7
new_name_col = 8
# To run the gdal commands you first have to set the path and GDAL_DATA variable in environments.
# C:\Python27\Lib\site-packages\osgeo
# GDAL_DATA C:\Python27\Lib\site-packages\osgeo\data\gdal
# input comma seperated text file w/ inventory of rasters going into md
in_csvfile = r"\\DEQWQNAS01\Lidar08\LiDAR\YEAR\Year_quad_list_input.csv"
proj_final = 'EPSG:2992'
outpath_warp = r"N:\LiDAR\BE\new"
# this is the directory holding the reclass rasters
outpath_reclass = r"\\DEQWQNAS01\Lidar08\LiDAR\YEAR\new"
out_vrt = r"N:\LiDAR\BE\new\BE.vrt"
# output comma seperated text file w/ inventory of reclass rasters
outcsvfile = r"N:\LiDAR\BE\new\BE_quad_list_output_new.csv"
# output comma seperated text file w/ inventory of rasters in vrt
outtxtfile = r"N:\LiDAR\BE\new\BE_mosaic_list.txt"
outmosaic = r"N:\LiDAR\BE\BE_mosaic.img"
outmosaic_path = r"N:\LiDAR\BE"
outmosaic_name = r"BE_mosaic.img"
infile_type = "\\hdr.adf"
out_format = "HFA"
out_ext = ".img"
overwrite_csv = False
buildwarp = False
buildreclass = False
buildvrt_list = True
buildvrt = True
buildmosaic = False
rc_lower = -50000
rc_upper = 50000
pathletter_dict = {r'\\DEQWQNAS01\Lidar01': r'G:',
r'\\DEQWQNAS01\Lidar02': r'H:',
r'\\DEQWQNAS01\Lidar03': r'I:',
r'\\DEQWQNAS01\Lidar04': r'J:',
r'\\DEQWQNAS01\Lidar05': r'K:',
r'\\DEQWQNAS01\Lidar06': r'L:',
r'\\DEQWQNAS01\Lidar07': r'M:',
r'\\DEQWQNAS01\Lidar08': r'N:'}
def read_csv(csvfile, skipheader = False):
"""Reads an input csv file and returns the header row and data
as a list"""
with open(csvfile, "rb") as f:
reader = csv.reader(f)
if skipheader == True: reader.next()
csvlist = [row for row in reader]
return(csvlist)
def write_csv(csvlist, csvfile):
"""write the input list to csv"""
import csv
with open(csvfile, "wb") as f:
linewriter = csv.writer(f)
for row in csvlist:
linewriter.writerow(row)
def write_txt(txtlist, txtfile):
"""write the input list to txt"""
with open(txtfile, "w") as f:
for row in txtlist:
f.write(row + '\n')
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def build_raster_list_from_csv(csvfile, status_col, year_col, path_col, name_col, new_name_col):
"""Read from a csv, and pull the path, year,
and raster file names of each raster to be processed into a sorted list.
csv must contain a fields titled Year, Path, and Name"""
csv_list = read_csv(csvfile, skipheader = True)
# Pull the path, year, and raster file name.
# The second name is a placeholder for a new name when there are duplicates
raster_list = [[row[year_col],
row[path_col],
row[name_col],
row[new_name_col]]for row in csv_list]
# sort the list by year and then NewName
raster_list = sorted(raster_list, key=itemgetter(0, 3), reverse=False)
# Add a cols for processing status and the sort order
raster_list = [["#", n] + raster_list[n] for n in range(0, len(raster_list))]
return(raster_list)
def get_nodata_value(raster_path, band, status):
"""Returns the no data value from a raster band"""
try:
raster = gdal.Open(raster_path)
r_band = raster.GetRasterBand(band)
nodatavalue = r_band.GetNoDataValue()
raster = None
except:
nodatavalue = None
status = "E"
return(nodatavalue,status)
def execute_cmd(cmd_list):
"""Executes commands to the command prompt using subprocess module.
Commands must be a string in a list"""
for cmd in cmd_list:
print(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
stdout, stderr = proc.communicate()
exit_code=proc.wait()
if exit_code:
# Something went wrong
status = "E"
# Try to delete the temp drive for next iteration
proc = subprocess.Popen(cmd[3], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
print(stderr)
return (status)
else:
print(stdout)
# Yay! we've reached the end without errors
status = "X"
return (status)
# -- Build a list of rasters to itterate through -----------------------
# get from csv file or build one from a feature class and export to csv
if overwrite_csv is True:
raster_list = build_raster_list_from_csv(in_csvfile, year_col=0, path_col=1, name_col=2, new_name_col=3)
write_csv(raster_list, outcsvfile)
else:
csv_exists = os.path.isfile(outcsvfile)
if csv_exists is False:
raster_list = build_raster_list_from_shp(in_shp)
#raster_list = build_raster_list_from_gdb(in_gdb_path, in_gdb_fc)
write_csv(raster_list, outcsvfile)
# Read the csv
raster_list = read_csv(outcsvfile, skipheader = False)
tot = len(raster_list)
n = 0
in_vrt_rasters = []
# -- Warp/reproject the rasters ----------------------------------------
for row in raster_list:
status = row[0]
inpath = row[3]
inpath_server = inpath[:20]
inpath_letter = pathletter_dict[inpath_server]
inpath_temp = inpath.replace(inpath[:20], inpath_letter) + infile_type
out_raster = outpath_warp + "\\" + row[5] + out_ext
# check to see if the file already exists
raster_exists = os.path.isfile(out_raster)
# Check if the raster already exists and status
if raster_exists is True and status in ["X", "#"]:
raster_list[n][0] = "X"
in_vrt_rasters.append(out_raster)
write_csv(raster_list, outcsvfile)
n = n + 1
elif raster_exists is True and status == "E":
buildvrt = False
n = n + 1
else:
inraster_exists = os.path.isfile(inpath_temp)
if inraster_exists is True:
# Get the no data value
nodatavalue , status = get_nodata_value(raster_path=inpath, band=1, status=status)
cmd_list = ['gdalwarp -t_srs {0} -q -r near -srcnodata {1} -dstnodata {2} -of {3} -overwrite {4} {5}'.format(proj_final, nodatavalue, nodatavalue, out_format, inpath_temp, out_raster)]
print("warping "+str(n)+" of "+str(tot)+" "+inpath_temp)
if status is not "E" and buildwarp is True:
status = execute_cmd(cmd_list)
in_vrt_rasters.append(out_raster)
raster_list[n][0] = status
write_csv(raster_list, outcsvfile)
else:
print("Error: " + inpath_temp + " does not exist")
status = "E"
if status is "E":
buildvrt = False
print("gdalwarp Error")
write_csv(raster_list, outcsvfile)
n = n + 1
print("done warping")
# -- Reclass the rasters ----------------------------------------------
n = 0
if buildreclass is True:
for row in raster_list:
status = row[0]
year = int(row[2])
inpath = row[3]
inpath_server = inpath[:20]
inpath_letter = pathletter_dict[inpath_server]
inpath_temp = inpath.replace(inpath[:20], inpath_letter) + infile_type
out_raster = outpath_reclass + "\\" + row[5] + out_ext
# check to see if the output file already exists
raster_exists = os.path.isfile(out_raster)
# Check if the raster already exists and status
if raster_exists is True and status in ["X", "#"]:
raster_list[n][0] = "X"
in_vrt_rasters.append(out_raster)
write_csv(raster_list, outcsvfile)
n = n + 1
elif raster_exists is True and status == "E":
buildvrt = False
n = n + 1
else:
inraster_exists = os.path.isfile(inpath_temp)
if inraster_exists is True:
print("reclass "+str(n)+" of "+str(tot)+" "+inpath_temp)
else:
print("Error: " + inpath_temp + " does not exist")
status = "E"
# start reclass code
try:
data = gdal.Open(inpath_temp)
band = data.GetRasterBand(1)
nodata = band.GetNoDataValue()
block_sizes = band.GetBlockSize()
x_block_size = block_sizes[0]
y_block_size = block_sizes[1]
xsize = band.XSize
ysize = band.YSize
#max_value = band.GetMaximum()
#min_value = band.GetMinimum()
#if max_value == None or min_value == None:
# stats = band.GetStatistics(0, 1)
# max_value = stats[1]
# min_value = stats[0]
driver = gdal.GetDriverByName(out_format)
data_reclass = driver.Create(out_raster, xsize, ysize, 1, gdal.GDT_UInt32)
data_reclass.SetGeoTransform(data.GetGeoTransform())
data_reclass.SetProjection(data.GetProjection())
print("reclassifying {0}".format(out_raster))
for i in range(0, ysize, y_block_size):
if i + y_block_size < ysize:
rows = y_block_size
else:
rows = ysize - i
for j in range(0, xsize, x_block_size):
if j + x_block_size < xsize:
cols = x_block_size
else:
cols = xsize - j
data_array = band.ReadAsArray(j, i, cols, rows)
rc_array = numpy.zeros((rows, cols), numpy.uint16)
rc_array = year * numpy.logical_and(data_array > rc_lower, data_array < rc_upper)
data_reclass.GetRasterBand(1).WriteArray(rc_array,j,i)
data_reclass.GetRasterBand(1).SetNoDataValue(0)
data_reclass = None
except:
status = "E"
# end reclass code
if status is not "E":
in_vrt_rasters.append(out_raster)
raster_list[n][0] = status
write_csv(raster_list, outcsvfile)
if status is "E":
buildvrt = False
print("reclass error")
write_csv(raster_list, outcsvfile)
n = n + 1
print("done with reclass")
else:
print("skipping reclass")
# -- Build the gdal VRT. -----------------------------------------------
# Must not have an error in status
if buildvrt is True:
if buildvrt_list is True:
write_txt(in_vrt_rasters, outtxtfile)
cmd_list = ['gdalbuildvrt -resolution highest -hidenodata -addalpha -overwrite -q -input_file_list {0} {1}'.format(outtxtfile, out_vrt)]
status = execute_cmd(cmd_list)
if status is "E":
buildmosaic = False
print("gdalbuild Error")
else:
print("done building vrt")
else:
print("skipping build vrt")
# Build the mosaic. Must not have an error in status
if buildmosaic is True:
print("building mosaic")
cmd_list = ['gdal_translate -of {0} -q -a_nodata none -stats {1} {2}'.format(out_format, out_vrt, outmosaic)]
status = execute_cmd(cmd_list)
if status is "E":
print("gdal_translate Error")
else:
print("done mosaicing")
else:
print("skipping mosaic")
|
|
# Copyright (c) 2015 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
from oslo_log import log
from manila.common import constants
from manila import exception
from manila.share.drivers import helpers
from manila.share.drivers.windows import windows_utils
LOG = log.getLogger(__name__)
class WindowsSMBHelper(helpers.CIFSHelperBase):
_SHARE_ACCESS_RIGHT_MAP = {
constants.ACCESS_LEVEL_RW: "Change",
constants.ACCESS_LEVEL_RO: "Read"}
_NULL_SID = "S-1-0-0"
_WIN_ACL_ALLOW = 0
_WIN_ACL_DENY = 1
_WIN_ACCESS_RIGHT_FULL = 0
_WIN_ACCESS_RIGHT_CHANGE = 1
_WIN_ACCESS_RIGHT_READ = 2
_WIN_ACCESS_RIGHT_CUSTOM = 3
_ACCESS_LEVEL_CUSTOM = 'custom'
_WIN_ACL_MAP = {
_WIN_ACCESS_RIGHT_CHANGE: constants.ACCESS_LEVEL_RW,
_WIN_ACCESS_RIGHT_FULL: constants.ACCESS_LEVEL_RW,
_WIN_ACCESS_RIGHT_READ: constants.ACCESS_LEVEL_RO,
_WIN_ACCESS_RIGHT_CUSTOM: _ACCESS_LEVEL_CUSTOM,
}
_SUPPORTED_ACCESS_LEVELS = (constants.ACCESS_LEVEL_RO,
constants.ACCESS_LEVEL_RW)
_SUPPORTED_ACCESS_TYPES = ('user', )
def __init__(self, remote_execute, configuration):
self._remote_exec = remote_execute
self.configuration = configuration
self._windows_utils = windows_utils.WindowsUtils(
remote_execute=remote_execute)
def init_helper(self, server):
self._remote_exec(server, "Get-SmbShare")
def create_exports(self, server, share_name, recreate=False):
export_location = '\\\\%s\\%s' % (server['public_address'],
share_name)
if not self._share_exists(server, share_name):
share_path = self._windows_utils.normalize_path(
os.path.join(self.configuration.share_mount_path,
share_name))
# If no access rules are requested, 'Everyone' will have read
# access, by default. We set read access for the 'NULL SID' in
# order to avoid this.
cmd = ['New-SmbShare', '-Name', share_name, '-Path', share_path,
'-ReadAccess', "*%s" % self._NULL_SID]
self._remote_exec(server, cmd)
else:
LOG.info("Skipping creating export %s as it already exists.",
share_name)
return self.get_exports_for_share(server, export_location)
def remove_exports(self, server, share_name):
if self._share_exists(server, share_name):
cmd = ['Remove-SmbShare', '-Name', share_name, "-Force"]
self._remote_exec(server, cmd)
else:
LOG.debug("Skipping removing export %s as it does not exist.",
share_name)
def _get_volume_path_by_share_name(self, server, share_name):
share_path = self._get_share_path_by_name(server, share_name)
volume_path = self._windows_utils.get_volume_path_by_mount_path(
server, share_path)
return volume_path
def _get_acls(self, server, share_name):
cmd = ('Get-SmbShareAccess -Name %(share_name)s | '
'Select-Object @("Name", "AccountName", '
'"AccessControlType", "AccessRight") | '
'ConvertTo-JSON -Compress' % {'share_name': share_name})
(out, err) = self._remote_exec(server, cmd)
if not out.strip():
return []
raw_acls = json.loads(out)
if isinstance(raw_acls, dict):
return [raw_acls]
return raw_acls
def get_access_rules(self, server, share_name):
raw_acls = self._get_acls(server, share_name)
acls = []
for raw_acl in raw_acls:
access_to = raw_acl['AccountName']
access_right = raw_acl['AccessRight']
access_level = self._WIN_ACL_MAP[access_right]
access_allow = raw_acl["AccessControlType"] == self._WIN_ACL_ALLOW
if not access_allow:
if access_to.lower() == 'everyone' and len(raw_acls) == 1:
LOG.debug("No access rules are set yet for share %s",
share_name)
else:
LOG.warning(
"Found explicit deny ACE rule that was not "
"created by Manila and will be ignored: %s",
raw_acl)
continue
if access_level == self._ACCESS_LEVEL_CUSTOM:
LOG.warning(
"Found 'custom' ACE rule that will be ignored: %s",
raw_acl)
continue
elif access_right == self._WIN_ACCESS_RIGHT_FULL:
LOG.warning(
"Account '%(access_to)s' was given full access "
"right on share %(share_name)s. Manila only "
"grants 'change' access.",
{'access_to': access_to,
'share_name': share_name})
acl = {
'access_to': access_to,
'access_level': access_level,
'access_type': 'user',
}
acls.append(acl)
return acls
def _grant_share_access(self, server, share_name, access_level, access_to):
access_right = self._SHARE_ACCESS_RIGHT_MAP[access_level]
cmd = ["Grant-SmbShareAccess", "-Name", share_name,
"-AccessRight", access_right,
"-AccountName", "'%s'" % access_to, "-Force"]
self._remote_exec(server, cmd)
self._refresh_acl(server, share_name)
LOG.info("Granted %(access_level)s access to '%(access_to)s' "
"on share %(share_name)s",
{'access_level': access_level,
'access_to': access_to,
'share_name': share_name})
def _refresh_acl(self, server, share_name):
cmd = ['Set-SmbPathAcl', '-ShareName', share_name]
self._remote_exec(server, cmd)
def _revoke_share_access(self, server, share_name, access_to):
cmd = ['Revoke-SmbShareAccess', '-Name', share_name,
'-AccountName', '"%s"' % access_to, '-Force']
self._remote_exec(server, cmd)
self._refresh_acl(server, share_name)
LOG.info("Revoked access to '%(access_to)s' "
"on share %(share_name)s",
{'access_to': access_to,
'share_name': share_name})
def update_access(self, server, share_name, access_rules, add_rules,
delete_rules):
self.validate_access_rules(
access_rules + add_rules,
self._SUPPORTED_ACCESS_TYPES,
self._SUPPORTED_ACCESS_LEVELS)
if not (add_rules or delete_rules):
existing_rules = self.get_access_rules(server, share_name)
add_rules, delete_rules = self._get_rule_updates(
existing_rules=existing_rules,
requested_rules=access_rules)
LOG.debug(("Missing rules: %(add_rules)s, "
"superfluous rules: %(delete_rules)s"),
{'add_rules': add_rules,
'delete_rules': delete_rules})
# Some rules may have changed, so we'll
# treat the deleted rules first.
for deleted_rule in delete_rules:
try:
self.validate_access_rules(
[deleted_rule],
self._SUPPORTED_ACCESS_TYPES,
self._SUPPORTED_ACCESS_LEVELS)
except (exception.InvalidShareAccess,
exception.InvalidShareAccessLevel):
# This check will allow invalid rules to be deleted.
LOG.warning(
"Unsupported access level %(level)s or access type "
"%(type)s, skipping removal of access rule to "
"%(to)s.", {'level': deleted_rule['access_level'],
'type': deleted_rule['access_type'],
'to': deleted_rule['access_to']})
continue
self._revoke_share_access(server, share_name,
deleted_rule['access_to'])
for added_rule in add_rules:
self._grant_share_access(server, share_name,
added_rule['access_level'],
added_rule['access_to'])
def _subtract_access_rules(self, access_rules, subtracted_rules):
# Account names are case insensitive on Windows.
filter_rules = lambda rules: [ # noqa: E731
{'access_to': access_rule['access_to'].lower(),
'access_level': access_rule['access_level'],
'access_type': access_rule['access_type']}
for access_rule in rules]
return [rule for rule in filter_rules(access_rules)
if rule not in filter_rules(subtracted_rules)]
def _get_rule_updates(self, existing_rules, requested_rules):
added_rules = self._subtract_access_rules(requested_rules,
existing_rules)
deleted_rules = self._subtract_access_rules(existing_rules,
requested_rules)
return added_rules, deleted_rules
def _get_share_name(self, export_location):
return self._windows_utils.normalize_path(
export_location).split('\\')[-1]
def _get_export_location_template(self, old_export_location):
share_name = self._get_share_name(old_export_location)
return '\\\\%s' + ('\\%s' % share_name)
def _get_share_path_by_name(self, server, share_name,
ignore_missing=False):
cmd = ('Get-SmbShare -Name %s | '
'Select-Object -ExpandProperty Path' % share_name)
check_exit_code = not ignore_missing
(share_path, err) = self._remote_exec(server, cmd,
check_exit_code=check_exit_code)
return share_path.strip() if share_path else None
def get_share_path_by_export_location(self, server, export_location):
share_name = self._get_share_name(export_location)
return self._get_share_path_by_name(server, share_name)
def _share_exists(self, server, share_name):
share_path = self._get_share_path_by_name(server, share_name,
ignore_missing=True)
return bool(share_path)
|
|
#!/usr/bin/env python3
import h5py
import os
import logging
import re
import hashlib
import numpy as np
from PIL import Image
import pyglet
try:
from .errors import ConflictError
except:
from errors import ConflictError
try:
from .roi import ROI
except:
from roi import ROI
from . import lib
from .utils import get_path
try: # for python2/3 compatibility
unicode
except NameError:
unicode=str
class Dataset(object):
def __init__(self,backing='dict',**kwargs):
self.sources = {}
self.openfiles = {}
self.data = None
self.callbacks = {
'image': lib.image_from_file,
'trace': lib.trace_from_file,
'name' : lib.name_from_info,
'audio': lib.audio_from_file,
}
if 'dname' in kwargs:
self.scan_directory(kwargs['dname'],**kwargs)
if 'keys' in kwargs:
self.keys = kwargs['keys']
else:
self.keys = None
if backing.endswith('hdf5'):
self.__backing = h5py.File(get_path(backing))
else:
self.__backing = {}
self.settings = kwargs
self.settings['__openfiles'] = {}
def __getitem__(self,key):
return self.__backing[key]
def __setitem__(self,key,value):
if key in self:
del self[key]
if type(value) == tuple:
maxshape = (None,) + value[1:]
if type(self.__backing) == dict:
self.__backing[key] = np.ndarray(
value,
dtype='<U15')
elif type(self.__backing) == h5py.File:
dtype = (h5py.special_dtype(vlen=unicode)
if key.lower() in ['id','name'] else 'float32' )
self.__backing.create_dataset(
key,
shape=value,
maxshape=maxshape,
dtype=dtype)
elif type(value) == np.ndarray:
if type(self.__backing) == dict:
self.__backing[key] = value
elif type(self.__backing) == h5py.File:
dtype = (h5py.special_dtype(vlen=unicode)
if key.lower() in ['id','name'] else 'float32' )
maxshape = (None,) + value.shape[1:]
self.__backing.create_dataset(
key,
shape=value.shape,
maxshape=maxshape,
dtype=dtype)
self.__backing[key][:] = value
else: raise TypeError
def __delitem__(self,key):
del self.__backing[key]
def __contains__(self,item):
return item in self.__backing
def __exit__(self):
try:
self.__backing.close()
except AttributeError:
pass
def scan_directory(self, d, types, keys, sep=':', report_every=1000):
d = get_path(d)
logging.info("scanning %s",d)
N_matches = 0
if self.keys == None:
self.keys=keys
for dirpath,__,filenames in os.walk(d):
logging.debug('entering %s...',dirpath)
for filename in filenames:
fullpath = os.sep.join((dirpath,filename))
for stype in types: #"source type"
match = re.search(types[stype]['regex'],fullpath)
if match:
N_matches += 1
if not N_matches % report_every:
logging.info('matched %d files',N_matches)
match_keys = match.groupdict()
node = self.sources
ID = []
for key in keys:
if key in match_keys:
if match_keys[key] not in node:
node[match_keys[key]] = {}
node = node[match_keys[key]]
ID.append(match_keys[key])
else:
break
if stype not in node:
node[stype] = {}
node = node[stype]
ID = sep.join(ID)
#insert
if 'conflict' not in types[stype] or types[stype]['conflict']==None:
if 'path' in node:
logging.error(
'%s conflicts with %s for item %s, type %s',
node['path'],fullpath,ID,stype)
raise ConflictError(ID,
node['path'],fullpath)
else:
node['path'] = fullpath
if any((k in node for k in match_keys)):
raise Exception
node.update(match_keys)
elif types[stype]['conflict'] == 'hash':
md5 = _hash_md5(fullpath)
if 'hash' in node and md5 != node['hash']:
logging.error(
('%s(md5sum:%s) hash-conflicts with '
'item %s(md5sum:%s) for item %s'),
node['path'],node['hash'],
fullpath,md5,ID)
raise ConflictError(ID,
node['path'],fullpath)
node['path'] = fullpath
node['hash'] = md5
if any((k in node and node[k] != match_keys[k]
for k in match_keys)):
raise Exception
node.update(match_keys)
elif types[stype]['conflict'] == 'list':
if 'path' not in node:
node['path'] = []
node['path'].append(fullpath)
for k in match_keys:
if k not in node:
node[k] = []
node[k].append(match_keys[k])
else:
raise NotImplementedError
def read_sources(self,types):
logging.info("started reading sources")
self.dat = self.__read_sources(self.sources,types,{})
dat =self.dat
ids = {d['id'] for d in dat}
N = len(ids)
self['id'] = np.array(list(ids))
for d in dat:
keyvals={}
for key in self.keys:
vals = set()
for i in d:
if key in d[i]:
if type(d[i][key]) == list:
vals.update(d[i][key])
else:
vals.update((d[i][key],))
assert len(vals)==1
val = vals.pop()
for i in d:
keyvals[key]=val
for k in types:
val = self.callbacks[k](**dict(keyvals,**dict(d[k],**self.settings)))
if k not in self:
self[k] = (N,) + val.shape
i = np.where(self['id'][:] == d['id'])[0][0]
self[k][i] = val
def __read_sources(self,node,types,context,ID=""):
type_1 = {k:node[k] for k in node if k not in types}
type_2 = {k:node[k] for k in node if k in types}
d = dict(context,**type_2)
if all((x in d for x in types)):
d['id']=ID
return (d,)
else:
return [x for k in type_1
for x in self.__read_sources(type_1[k],types,d,"%s:%s"%(ID,k))]
def _hash_md5(fname,buff=1024):
with open(fname,'rb') as f:
m = hashlib.md5()
for b in __buffered_read(f,buff):
m.update(b)
return m.hexdigest()
def __buffered_read(f,buff):
dat = True
while dat:
dat = f.read(buff)
if dat: yield dat
_types = {
'trace': {
'regex': r'(?P<study>\d+\w+)_(?P<frame>\d+)\.(?:jpg|png)\.(?P<tracer>\w+)\.traced\.txt$',
'conflict': 'list'
},
'image': {
'regex': r'(?P<study>\d+\w+)_(?P<frame>\d+)\.(?P<ext>jpg|png)$',
'conflict': 'hash'
},
'name': {
'regex': r'(?P<fname>(?P<study>\d+\w+)_(?P<frame>\d+)\.(?P<ext>jpg|png))$',
}
}
if __name__=='__main__':
roi = (140,320,250,580)
n_points = 32
keys = ['study','frame']
types = _types
ds = Dataset(backing="test.hdf5",roi=roi,n_points=n_points)
ds.scan_directory('./test_data',types,keys)
ds.read_sources(types.keys())
|
|
#---------------------------------------------------------------------------
# predict.py
#
# Author : Felix Gonda
# Date : July 10, 2015
# School : Harvard University
#
# Project : Master Thesis
# An Interactive Deep Learning Toolkit for
# Automatic Segmentation of Images
#
# Summary : This file contains the implementation of a module that manages
# the module for segmenting images. It runs the latest activated
# project's classifier to perform segmentation on all images in
# in the project.
#---------------------------------------------------------------------------
import os
import sys
import signal
import threading
import time
import numpy as np
import StringIO
import base64
import math
import zlib
from scipy.misc import imsave
import tifffile as tiff
import glob
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../common'))
sys.path.insert(2,os.path.join(base_path, './cnn'))
sys.path.insert(3,os.path.join(base_path, './mlp'))
sys.path.insert(4,os.path.join(base_path, '../database'))
DATA_PATH_IMAGES = os.path.join(base_path, '../../data/input')
DATA_PATH_SEGMENTATION = os.path.join(base_path, '../../data/segmentation')
DATA_PATH = os.path.join(base_path, '../../data')
DATA_PATH_LABELS = os.path.join(base_path, '../../data/labels')
DATA_NAME = 'main'
from manager import Manager
from utility import Utility
from utility import enum
from settings import Settings
from paths import Paths
from db import DB
from h5data import H5Data
from project import Project
#from performance import Performance
#---------------------------------------------------------------------------
class Prediction(Manager):
#-------------------------------------------------------------------
# Reads the input image and normalize it. Also creates
# a version of the input image with corner pixels
# mirrored based on the sample size
# arguments: - path : directory path where image is located
# - id : the unique identifier of the image
# - pad : the amount to pad the image by.
# return : returns original image and a padded version
#-------------------------------------------------------------------
def __init__(self):
Manager.__init__( self, 'prediction')
self.high = []
self.low = []
self.priority = 0
self.modTime = None
self.revision = 0
def can_load_model(self, path):
return os.path.exists( path )
#-------------------------------------------------------------------
# Retrieve segmentation tasks from database and call classifier
# to perform actual work.
#-------------------------------------------------------------------
def work(self, project):
if not self.online:
self.work_offline(project)
self.done = True
return
start_time = time.clock()
if project is None:
return
print 'prediction.... running', len(self.high)
if len(self.high) == 0:
self.high = DB.getPredictionImages( project.id, 1)
#FG - march 4th 2016
#if len(self.low) == 0:
# self.low = DB.getPredictionImages( project.id, 0 )
'''
for img in self.high:
print 'hid:', img.id, img.modelModifiedTime, img.segmentationTime
print '----'
for img in self.low:
print 'lid:', img.id, img.modelModifiedTime, img.segmentationTime
exit(1)
'''
task = None
if (self.priority == 0 or len(self.low) == 0) and len(self.high) > 0:
self.priority = 1
task = self.high[0]
del self.high[0]
elif len(self.low) > 0:
self.priority = 0
task = self.low[0]
del self.low[0]
if task == None:
return
has_new_model = (self.modTime != project.modelTime)
revision = DB.getRevision( project.id )
print 'revision:', revision
#has_new_model = (revision != self.revision or has_new_model)
# reload the model if it changed
if has_new_model:
#self.revision = revision
print 'initializing...'
self.model.initialize()
self.modTime = project.modelTime
# read image to segment
basepath = Paths.TrainGrayscale if task.purpose == 0 else Paths.ValidGrayscale
path = '%s/%s.tif'%(basepath, task.id)
#success, image = Utility.get_image_padded(path, project.patchSize ) #model.get_patch_size())
print 'segment - path:', path
print 'priority - ', task.segmentationPriority
# perform segmentation
Utility.report_status('segmenting %s'%(task.id),'')
#probs = self.model.predict( path )
#probs = self.model.classify( image )
# serialize to file
segPath = '%s/%s.%s.seg'%(Paths.Segmentation, task.id, project.id)
seg = H5Data.get_slice( DATA_PATH, DATA_NAME, task.id )
self.classify_n_save( seg, segPath, project )
#self.classify_n_save( path, segPath, project )
H5Data.generate_preview( DATA_PATH, DATA_NAME, DATA_PATH_LABELS, DATA_PATH_SEGMENTATION, DATA_PATH_IMAGES, task.id, project.id )
end_time = time.clock()
duration = (end_time - start_time)
DB.finishPrediction( self.projectId, task.id, duration, self.modTime )
# measure performance if new model
#if has_new_model:
# Performance.measureOnline( self.classifier.model, self.projectId )
#-------------------------------------------------------------------
# perform offlne segmentation of images in a specific directory
#-------------------------------------------------------------------
def work_offline(self, project):
imagePaths = sorted( glob.glob( '%s/*.tif'%(Paths.TrainGrayscale) ) )
for path in imagePaths:
if self.done:
break
name = Utility.get_filename_noext( path )
print 'path:', path
Utility.report_status('segmenting', '%s'%(name))
#segPath = '%s/%s.offline.seg'%(Paths.TrainGrayscale, name)
segPath = '%s/%s.%s.offline.seg'%(Paths.Segmentation, name, project.id)
self.classify_n_save( path, segPath, project )
def classify_n_save(self, image, segPath, project):
#image = tiff.imread( imagePath )
image = Utility.normalizeImage( image )
# classify the image
#prob = self.model.classify( image=image, mean=project.mean, std=project.std )
prob = self.model.predict( image=image, mean=project.mean, std=project.std, threshold=project.threshold)
#TODO: how to deal with multiple labels
# extract the predicted labels
'''
prob[ prob >= project.threshold ] = 9
prob[ prob < project.threshold ] = 1
prob[ prob == 9 ] = 0
'''
prob = prob.astype(dtype=int)
prob = prob.flatten()
print 'results:', np.bincount( prob ), self.revision
self.save_probs( prob, segPath)
#-------------------------------------------------------------------
# save probability map
#-------------------------------------------------------------------
def save_probs(self, data, path):
output = StringIO.StringIO()
output.write(data.tolist())
content = output.getvalue()
encoded = base64.b64encode(content)
compressed = zlib.compress(encoded)
with open(path, 'w') as outfile:
outfile.write(compressed)
manager = None
def signal_handler(signal, frame):
if manager is not None:
manager.shutdown()
#---------------------------------------------------------------------------
# Entry point to the main function of the program.
#---------------------------------------------------------------------------
if __name__ == '__main__':
print sys.argv
Utility.report_status('running prediction module', '')
signal.signal(signal.SIGINT, signal_handler)
manager = Prediction()
Manager.start( sys.argv, manager )
|
|
# Authors : Alexandre Gramfort, alexandre.gramfort@telecom-paristech.fr (2011)
# Denis A. Engemann <denis.engemann@gmail.com>
# License : BSD 3-clause
import numpy as np
from ..parallel import parallel_func
from ..io.pick import _pick_data_channels
from ..utils import logger, verbose, deprecated, _time_mask
from .multitaper import _psd_multitaper
@deprecated('This will be deprecated in release v0.12, see psd_welch.')
@verbose
def compute_raw_psd(raw, tmin=0., tmax=None, picks=None, fmin=0,
fmax=np.inf, n_fft=2048, n_overlap=0,
proj=False, n_jobs=1, verbose=None):
"""Compute power spectral density with average periodograms.
Parameters
----------
raw : instance of Raw
The raw data.
tmin : float
Minimum time instant to consider (in seconds).
tmax : float | None
Maximum time instant to consider (in seconds). None will use the
end of the file.
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
n_fft : int
The length of the tapers ie. the windows. The smaller
it is the smoother are the PSDs.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
proj : bool
Apply SSP projection vectors.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
psd : array of float
The PSD for all channels
freqs: array of float
The frequencies
See Also
--------
psd_welch, psd_multitaper
"""
from scipy.signal import welch
from ..io.base import _BaseRaw
if not isinstance(raw, _BaseRaw):
raise ValueError('Input must be an instance of Raw')
tmax = raw.times[-1] if tmax is None else tmax
start, stop = raw.time_as_index([tmin, tmax])
picks = slice(None) if picks is None else picks
if proj:
# Copy first so it's not modified
raw = raw.copy().apply_proj()
data, times = raw[picks, start:(stop + 1)]
n_fft, n_overlap = _check_nfft(len(times), n_fft, n_overlap)
n_fft = int(n_fft)
Fs = raw.info['sfreq']
logger.info("Effective window size : %0.3f (s)" % (n_fft / float(Fs)))
parallel, my_pwelch, n_jobs = parallel_func(_pwelch, n_jobs=n_jobs,
verbose=verbose)
freqs = np.arange(n_fft // 2 + 1) * (Fs / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[freq_mask]
psds = np.array(parallel(my_pwelch([channel],
noverlap=n_overlap, nfft=n_fft, fs=Fs,
freq_mask=freq_mask, welch_fun=welch)
for channel in data))[:, 0, :]
return psds, freqs
def _pwelch(epoch, noverlap, nfft, fs, freq_mask, welch_fun):
"""Aux function"""
return welch_fun(epoch, nperseg=nfft, noverlap=noverlap,
nfft=nfft, fs=fs)[1][..., freq_mask]
def _compute_psd(data, fmin, fmax, Fs, n_fft, psd, n_overlap, pad_to):
"""Compute the PSD"""
out = [psd(d, Fs=Fs, NFFT=n_fft, noverlap=n_overlap, pad_to=pad_to)
for d in data]
psd = np.array([o[0] for o in out])
freqs = out[0][1]
mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[mask]
return psd[:, mask], freqs
def _check_nfft(n, n_fft, n_overlap):
"""Helper to make sure n_fft and n_overlap make sense"""
n_fft = n if n_fft > n else n_fft
n_overlap = n_fft - 1 if n_overlap >= n_fft else n_overlap
return n_fft, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj):
"""Helper to do checks on PSD data / pull arrays from inst"""
from ..io.base import _BaseRaw
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, _BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {0}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
if picks is None:
picks = _pick_data_channels(inst.info, with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, _BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
data, times = inst[picks, start:(stop + 1)]
elif isinstance(inst, _BaseEpochs):
data = inst.get_data()[:, picks][:, :, time_mask]
elif isinstance(inst, Evoked):
data = inst.data[picks][:, time_mask]
return data, sfreq
def _psd_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0,
n_jobs=1):
"""Compute power spectral density (PSD) using Welch's method.
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
The length of the tapers ie. the windows. The smaller
it is the smoother are the PSDs. The default value is 256.
If ``n_fft > len(inst.times)``, it will be adjusted down to
``len(inst.times)``.
n_overlap : int
The number of points of overlap between blocks. Will be adjusted
to be <= n_fft. The default value is 0.
n_jobs : int
Number of CPUs to use in the computation.
Returns
-------
psds : ndarray, shape (..., n_freqs) or
The power spectral densities. All dimensions up to the last will
be the same as input.
freqs : ndarray, shape (n_freqs,)
The frequencies.
"""
from scipy.signal import welch
dshape = x.shape[:-1]
n_times = x.shape[-1]
x = x.reshape(-1, n_times)
# Prep the PSD
n_fft, n_overlap = _check_nfft(n_times, n_fft, n_overlap)
win_size = n_fft / float(sfreq)
logger.info("Effective window size : %0.3f (s)" % win_size)
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (sfreq / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[freq_mask]
# Parallelize across first N-1 dimensions
parallel, my_pwelch, n_jobs = parallel_func(_pwelch, n_jobs=n_jobs)
x_splits = np.array_split(x, n_jobs)
f_psd = parallel(my_pwelch(d, noverlap=n_overlap, nfft=n_fft,
fs=sfreq, freq_mask=freq_mask,
welch_fun=welch)
for d in x_splits)
# Combining/reshaping to original data shape
psds = np.concatenate(f_psd, axis=0)
psds = psds.reshape(np.hstack([dshape, -1]))
return psds, freqs
@verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256,
n_overlap=0, picks=None, proj=False, n_jobs=1, verbose=None):
"""Compute the power spectral density (PSD) using Welch's method.
Calculates periodigrams for a sliding window over the
time dimension, then averages them together for each channel/epoch.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
n_fft : int
The length of the tapers ie. the windows. The smaller
it is the smoother are the PSDs. The default value is 256.
If ``n_fft > len(inst.times)``, it will be adjusted down to
``len(inst.times)``.
n_overlap : int
The number of points of overlap between blocks. Will be adjusted
to be <= n_fft. The default value is 0.
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd, mne.Epochs.plot_psd, psd_multitaper
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj)
return _psd_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, n_jobs=n_jobs)
@verbose
def psd_multitaper(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, proj=False,
n_jobs=1, verbose=None):
"""Compute the power spectral density (PSD) using multitapers.
Calculates spectral density for orthogonal tapers, then averages them
together for each channel/epoch. See [1] for a description of the tapers
and [2] for the general method.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
References
----------
.. [1] Slepian, D. "Prolate spheroidal wave functions, Fourier analysis,
and uncertainty V: The discrete case." Bell System Technical
Journal, vol. 57, 1978.
.. [2] Percival D.B. and Walden A.T. "Spectral Analysis for Physical
Applications: Multitaper and Conventional Univariate Techniques."
Cambridge University Press, 1993.
See Also
--------
mne.io.Raw.plot_psd, mne.Epochs.plot_psd, psd_welch
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj)
return _psd_multitaper(data, sfreq, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, n_jobs=n_jobs)
@deprecated('This will be deprecated in release v0.12, see psd_welch.')
@verbose
def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, tmin=None,
tmax=None, n_fft=256, n_overlap=0, proj=False,
n_jobs=1, verbose=None):
"""Compute power spectral density with average periodograms.
Parameters
----------
epochs : instance of Epochs
The epochs.
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
n_fft : int
The length of the tapers ie. the windows. The smaller
it is the smoother are the PSDs. The default value is 256.
If ``n_fft > len(epochs.times)``, it will be adjusted down to
``len(epochs.times)``.
n_overlap : int
The number of points of overlap between blocks. Will be adjusted
to be <= n_fft.
proj : bool
Apply SSP projection vectors.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
psds : ndarray (n_epochs, n_channels, n_freqs)
The power spectral densities.
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
psd_welch, psd_multitaper
"""
from scipy.signal import welch
from ..epochs import _BaseEpochs
if not isinstance(epochs, _BaseEpochs):
raise ValueError("Input must be an instance of Epochs")
n_fft = int(n_fft)
Fs = epochs.info['sfreq']
if picks is None:
picks = _pick_data_channels(epochs.info, with_ref_meg=False)
n_fft, n_overlap = _check_nfft(len(epochs.times), n_fft, n_overlap)
if tmin is not None or tmax is not None:
time_mask = _time_mask(epochs.times, tmin, tmax,
sfreq=epochs.info['sfreq'])
else:
time_mask = slice(None)
if proj:
# Copy first so it's not modified
epochs = epochs.copy().apply_proj()
data = epochs.get_data()[:, picks][:, :, time_mask]
logger.info("Effective window size : %0.3f (s)" % (n_fft / float(Fs)))
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (Fs / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[freq_mask]
psds = np.empty(data.shape[:-1] + (freqs.size,))
parallel, my_pwelch, n_jobs = parallel_func(_pwelch, n_jobs=n_jobs,
verbose=verbose)
for idx, fepochs in zip(np.array_split(np.arange(len(data)), n_jobs),
parallel(my_pwelch(epoch, noverlap=n_overlap,
nfft=n_fft, fs=Fs,
freq_mask=freq_mask,
welch_fun=welch)
for epoch in np.array_split(data,
n_jobs))):
for i_epoch, f_epoch in zip(idx, fepochs):
psds[i_epoch, :, :] = f_epoch
return psds, freqs
|
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class SortableSearchRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'limit': 'int',
'offset': 'int',
'query': 'list[SearchQuery]',
'sort': 'Sorting'
}
attribute_map = {
'limit': 'limit',
'offset': 'offset',
'query': 'query',
'sort': 'sort'
}
def __init__(self, limit=None, offset=None, query=None, sort=None, _configuration=None): # noqa: E501
"""SortableSearchRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._limit = None
self._offset = None
self._query = None
self._sort = None
self.discriminator = None
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if query is not None:
self.query = query
if sort is not None:
self.sort = sort
@property
def limit(self):
"""Gets the limit of this SortableSearchRequest. # noqa: E501
The number of results to return. Default: 100, Maximum allowed: 1000 # noqa: E501
:return: The limit of this SortableSearchRequest. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this SortableSearchRequest.
The number of results to return. Default: 100, Maximum allowed: 1000 # noqa: E501
:param limit: The limit of this SortableSearchRequest. # noqa: E501
:type: int
"""
if (self._configuration.client_side_validation and
limit is not None and limit > 1000): # noqa: E501
raise ValueError("Invalid value for `limit`, must be a value less than or equal to `1000`") # noqa: E501
if (self._configuration.client_side_validation and
limit is not None and limit < 1): # noqa: E501
raise ValueError("Invalid value for `limit`, must be a value greater than or equal to `1`") # noqa: E501
self._limit = limit
@property
def offset(self):
"""Gets the offset of this SortableSearchRequest. # noqa: E501
The number of results to skip before returning values. Default: 0 # noqa: E501
:return: The offset of this SortableSearchRequest. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this SortableSearchRequest.
The number of results to skip before returning values. Default: 0 # noqa: E501
:param offset: The offset of this SortableSearchRequest. # noqa: E501
:type: int
"""
self._offset = offset
@property
def query(self):
"""Gets the query of this SortableSearchRequest. # noqa: E501
A list of queries by which to limit the search results. Entities that match ALL queries in the list are returned # noqa: E501
:return: The query of this SortableSearchRequest. # noqa: E501
:rtype: list[SearchQuery]
"""
return self._query
@query.setter
def query(self, query):
"""Sets the query of this SortableSearchRequest.
A list of queries by which to limit the search results. Entities that match ALL queries in the list are returned # noqa: E501
:param query: The query of this SortableSearchRequest. # noqa: E501
:type: list[SearchQuery]
"""
self._query = query
@property
def sort(self):
"""Gets the sort of this SortableSearchRequest. # noqa: E501
:return: The sort of this SortableSearchRequest. # noqa: E501
:rtype: Sorting
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this SortableSearchRequest.
:param sort: The sort of this SortableSearchRequest. # noqa: E501
:type: Sorting
"""
self._sort = sort
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SortableSearchRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SortableSearchRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SortableSearchRequest):
return True
return self.to_dict() != other.to_dict()
|
|
"""
gdalinfo tests/gis_tests/data/rasters/raster.tif:
Driver: GTiff/GeoTIFF
Files: tests/gis_tests/data/rasters/raster.tif
Size is 163, 174
Coordinate System is:
PROJCS["NAD83 / Florida GDL Albers",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.2572221010002,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4269"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",24],
PARAMETER["standard_parallel_2",31.5],
PARAMETER["latitude_of_center",24],
PARAMETER["longitude_of_center",-84],
PARAMETER["false_easting",400000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","3086"]]
Origin = (511700.468070655711927,435103.377123198588379)
Pixel Size = (100.000000000000000,-100.000000000000000)
Metadata:
AREA_OR_POINT=Area
Image Structure Metadata:
INTERLEAVE=BAND
Corner Coordinates:
Upper Left ( 511700.468, 435103.377) ( 82d51'46.16"W, 27d55' 1.53"N)
Lower Left ( 511700.468, 417703.377) ( 82d51'52.04"W, 27d45'37.50"N)
Upper Right ( 528000.468, 435103.377) ( 82d41'48.81"W, 27d54'56.30"N)
Lower Right ( 528000.468, 417703.377) ( 82d41'55.54"W, 27d45'32.28"N)
Center ( 519850.468, 426403.377) ( 82d46'50.64"W, 27d50'16.99"N)
Band 1 Block=163x50 Type=Byte, ColorInterp=Gray
NoData Value=15
"""
import os
import struct
import tempfile
import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils._os import upath
from ..data.rasters.textrasters import JSON_RASTER
if HAS_GDAL:
from django.contrib.gis.gdal import GDALRaster
from django.contrib.gis.gdal.raster.band import GDALBand
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class GDALRasterTests(unittest.TestCase):
"""
Test a GDALRaster instance created from a file (GeoTiff).
"""
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
self.rs = GDALRaster(self.rs_path)
def test_rs_name_repr(self):
self.assertEqual(self.rs_path, self.rs.name)
six.assertRegex(self, repr(self.rs), "<Raster object at 0x\w+>")
def test_rs_driver(self):
self.assertEqual(self.rs.driver.name, 'GTiff')
def test_rs_size(self):
self.assertEqual(self.rs.width, 163)
self.assertEqual(self.rs.height, 174)
def test_rs_srs(self):
self.assertEqual(self.rs.srs.srid, 3086)
self.assertEqual(self.rs.srs.units, (1.0, 'metre'))
def test_geotransform_and_friends(self):
# Assert correct values for file based raster
self.assertEqual(self.rs.geotransform,
[511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0])
self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986])
self.assertEqual(self.rs.origin.x, 511700.4680706557)
self.assertEqual(self.rs.origin.y, 435103.3771231986)
self.assertEqual(self.rs.scale, [100.0, -100.0])
self.assertEqual(self.rs.scale.x, 100.0)
self.assertEqual(self.rs.scale.y, -100.0)
self.assertEqual(self.rs.skew, [0, 0])
self.assertEqual(self.rs.skew.x, 0)
self.assertEqual(self.rs.skew.y, 0)
# Create in-memory rasters and change gtvalues
rsmem = GDALRaster(JSON_RASTER)
rsmem.geotransform = range(6)
self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)])
self.assertEqual(rsmem.origin, [0, 3])
self.assertEqual(rsmem.origin.x, 0)
self.assertEqual(rsmem.origin.y, 3)
self.assertEqual(rsmem.scale, [1, 5])
self.assertEqual(rsmem.scale.x, 1)
self.assertEqual(rsmem.scale.y, 5)
self.assertEqual(rsmem.skew, [2, 4])
self.assertEqual(rsmem.skew.x, 2)
self.assertEqual(rsmem.skew.y, 4)
self.assertEqual(rsmem.width, 5)
self.assertEqual(rsmem.height, 5)
def test_rs_extent(self):
self.assertEqual(self.rs.extent,
(511700.4680706557, 417703.3771231986,
528000.4680706557, 435103.3771231986))
def test_rs_bands(self):
self.assertEqual(len(self.rs.bands), 1)
self.assertIsInstance(self.rs.bands[0], GDALBand)
def test_file_based_raster_creation(self):
# Prepare tempfile
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
# Create file-based raster from scratch
GDALRaster({
'datatype': self.rs.bands[0].datatype(),
'driver': 'tif',
'name': rstfile.name,
'width': 163,
'height': 174,
'nr_of_bands': 1,
'srid': self.rs.srs.wkt,
'origin': (self.rs.origin.x, self.rs.origin.y),
'scale': (self.rs.scale.x, self.rs.scale.y),
'skew': (self.rs.skew.x, self.rs.skew.y),
'bands': [{
'data': self.rs.bands[0].data(),
'nodata_value': self.rs.bands[0].nodata_value
}]
})
# Reload newly created raster from file
restored_raster = GDALRaster(rstfile.name)
self.assertEqual(restored_raster.srs.wkt, self.rs.srs.wkt)
self.assertEqual(restored_raster.geotransform, self.rs.geotransform)
if numpy:
numpy.testing.assert_equal(
restored_raster.bands[0].data(),
self.rs.bands[0].data()
)
else:
self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data())
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class GDALBandTests(unittest.TestCase):
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
rs = GDALRaster(self.rs_path)
self.band = rs.bands[0]
def test_band_data(self):
self.assertEqual(self.band.width, 163)
self.assertEqual(self.band.height, 174)
self.assertEqual(self.band.description, '')
self.assertEqual(self.band.datatype(), 1)
self.assertEqual(self.band.datatype(as_string=True), 'GDT_Byte')
self.assertEqual(self.band.min, 0)
self.assertEqual(self.band.max, 255)
self.assertEqual(self.band.nodata_value, 15)
def test_read_mode_error(self):
# Open raster in read mode
rs = GDALRaster(self.rs_path, write=False)
band = rs.bands[0]
# Setting attributes in write mode raises exception in the _flush method
self.assertRaises(GDALException, setattr, band, 'nodata_value', 10)
def test_band_data_setters(self):
# Create in-memory raster and get band
rsmem = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'mem_rst',
'width': 10,
'height': 10,
'nr_of_bands': 1,
'srid': 4326,
})
bandmem = rsmem.bands[0]
# Set nodata value
bandmem.nodata_value = 99
self.assertEqual(bandmem.nodata_value, 99)
# Set data for entire dataset
bandmem.data(range(100))
if numpy:
numpy.testing.assert_equal(bandmem.data(), numpy.arange(100).reshape(10, 10))
else:
self.assertEqual(bandmem.data(), list(range(100)))
# Prepare data for setting values in subsequent tests
block = list(range(100, 104))
packed_block = struct.pack('<' + 'B B B B', *block)
# Set data from list
bandmem.data(block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from packed block
bandmem.data(packed_block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytes
bandmem.data(bytes(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytearray
bandmem.data(bytearray(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from memoryview
bandmem.data(six.memoryview(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from numpy array
if numpy:
bandmem.data(numpy.array(block, dtype='int8').reshape(2, 2), (1, 1), (2, 2))
numpy.testing.assert_equal(
bandmem.data(offset=(1, 1), size=(2, 2)),
numpy.array(block).reshape(2, 2)
)
# Test json input data
rsmemjson = GDALRaster(JSON_RASTER)
bandmemjson = rsmemjson.bands[0]
if numpy:
numpy.testing.assert_equal(
bandmemjson.data(),
numpy.array(range(25)).reshape(5, 5)
)
else:
self.assertEqual(bandmemjson.data(), list(range(25)))
|
|
import sys
sys.path.append("..")
import unittest
from messageparser import *
class TestMessageParser(unittest.TestCase):
def test_beginTurnEncoding(self):
"""
Checks the Encoding of 11 Begin_Turn Message by MessageParser.
Format:
type:report;status:11;
"""
msg = MessageParser().encode("report",{"status": "11"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:11;")
def test_beginTurnDecoding(self):
"""
Checks the Decoding of 11 Begin_Turn Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:11;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"11")
def test_updateLobbyEncoding(self):
"""
Checks the Encoding of 16 Update_Lobby Message by MessageParser.
Format:
type:report;status:16;params;
params:
number_of_clients:[number n];
number_of_games:[number m];
game_name_0:[name];...;game_name_m-1:[name];
game_players_count_0:[1|2];...;game_players_m-1:[1|2];
game_player_0_i:[player_identifier];...;game_player_m-1_i:[player_identifier] i=0,1
player_name_0:[name];...;player_name_n-1:[name];
player_identifier_0:[identifier];...;player_identifier_n-1:[identifier];
"""
params={"status": "16", "number_of_clients": "3", "number_of_games": "2", "game_name_0": "FCB",
"game_name_1": "HSV", "game_players_count_0": "2", "game_players_count_1": "1",
"game_player_0_0": "1000", "game_player_0_1": "2000", "game_player_1_0": "3000",
"player_name_0": "Dari","player_name_1": "Max","player_name_2": "",
"player_identifier_0": "1000", "player_identifier_1": "2000", "player_identifier_2": "3000"}
msg = MessageParser().encode("report",params)
msg=msg[2:].decode('utf-8') # decode it from bytes to string
#print(msg)
check=True
for key,value in params.items():
if((key+":"+value) not in msg): check=False
if(check): self.assertTrue(True)
else: self.assertTrue(False)
def test_updateLobbyDecoding(self):
"""
Checks the Decoding of 16 Update_Lobby Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;"+
"status:16;number_of_clients:3;number_of_games:2;game_name_0:FCB;"+
"game_name_1:HSV;game_players_count_0:2;game_players_count_1:1;"+
"game_player_0_0:1000;game_player_0_1:2000;game_player_1_0:3000;"+
"player_name_0:Dari;player_name_1:Max;player_name_2:;"+
"player_identifier_0:1000;player_identifier_1:2000;"+
"player_identifier_2:3000;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params),16 )
self.assertEqual(params["status"],"16")
self.assertEqual(params["number_of_games"],"2")
self.assertEqual(params["number_of_clients"],"3")
self.assertEqual(params["game_name_0"],"FCB")
self.assertEqual(params["game_name_1"],"HSV")
self.assertEqual(params["game_players_count_0"],"2")
self.assertEqual(params["game_players_count_1"],"1")
self.assertEqual(params["game_player_0_0"],"1000")
self.assertEqual(params["game_player_0_1"],"2000")
self.assertEqual(params["game_player_1_0"],"3000")
self.assertEqual(params["player_name_0"],"Dari")
self.assertEqual(params["player_name_1"],"Max")
self.assertEqual(params["player_name_2"],"")
self.assertEqual(params["player_identifier_0"],"1000")
self.assertEqual(params["player_identifier_1"],"2000")
self.assertEqual(params["player_identifier_2"],"3000")
def test_gameEndedEncoding(self):
"""
Checks the Encoding of 17 Game_Ended Message by MessageParser.
Format:
type:report;status:17;params;
params:
timestamp : [millis] ;
winner : [0|1] ;
name_of_game : [name] ;
identifier_0 : [identifier] ;
identifier_1 : [identifier] ;
reason_for_game_end : [text] ;
"""
params={"status": "17", "timestamp": "1000", "winner": "0", "name_of_game": "FCB",
"identifier_0": "2000", "identifier_1": "3000", "reason_for_game_end": "player1 won"}
msg = MessageParser().encode("report",params)
msg=msg[2:].decode('utf-8') # decode it from bytes to string
check=True
for key,value in params.items():
if((key+":"+value) not in msg): check=False
if(check): self.assertTrue(True)
else: self.assertTrue(False)
def test_gameEndedDecoding(self):
"""
Checks the Decoding of 17 Game_Ended Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:17;timestamp:1000;winner:0;name_of_game:FCB;"+
"identifier_0:2000;identifier_1:3000;reason_for_game_end:player1 won")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 7)
self.assertEqual(params["status"],"17")
self.assertEqual(params["timestamp"],"1000")
self.assertEqual(params["winner"],"0")
self.assertEqual(params["name_of_game"],"FCB")
self.assertEqual(params["identifier_0"],"2000")
self.assertEqual(params["identifier_1"],"3000")
self.assertEqual(params["reason_for_game_end"],"player1 won")
def test_updateOwnFieldEncoding(self):
"""
Checks the Encoding of 13 Update_Own_Field Message by MessageParser.
Format:
type:report;status:13;params;
params:
was_special_attack : [true | false] ;
coordinate_x : [number] ;
coordinate_y : [number] ;
"""
params={"status": "13", "was_special_attack": "true", "coordinate_x": "12", "coordinate_y":"6"}
msg = MessageParser().encode("report",params)
msg=msg[2:].decode('utf-8') # decode it from bytes to string
check=True
for key,value in params.items():
if((key+":"+value) not in msg): check=False
if(check): self.assertTrue(True)
else: self.assertTrue(False)
def test_updateOwnFieldDecoding(self):
"""
Checks the Decoding of 13 Update_Own_Field Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:13;was_special_attack:true;"+
"coordinate_x:12;coordinate_y:6;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 4)
self.assertEqual(params["status"],"13")
self.assertEqual(params["was_special_attack"],"true")
self.assertEqual(params["coordinate_x"],"12")
self.assertEqual(params["coordinate_y"],"6")
def test_updateEnemyFieldEncoding(self):
"""
Checks the Encoding of 14 Update_Enemy_Field Message by MessageParser.
Format:
type:report;status:14;params;
params:
number_of_updated_fields : [number n] ;
field_0_x : [number]; ... ; field_n_x : [number];
field_0_y : [number]; ... ; field_n_y : [number];
field_0_condition : [free|damaged|undamaged] ; ... ; field_n_condition : [free|damaged|undamaged];
"""
params={"status": "14", "number_of_updated_fields": "5", "field_0_x": "3", "field_1_x": "2",
"field_2_x": "10","field_3_x": "6","field_4_x": "7","field_0_y": "4", "field_1_y": "7",
"field_2_y": "8","field_3_y": "11","field_4_y": "13","field_0_condition": "free",
"field_1_condition": "damaged","field_2_condition": "undamaged","field_3_condition": "free",
"field_4_condition": "free"}
msg = MessageParser().encode("report",params)
msg=msg[2:].decode('utf-8') # decode it from bytes to string
check=True
for key,value in params.items():
if((key+":"+value) not in msg): check=False
if(check): self.assertTrue(True)
else: self.assertTrue(False)
def test_updateEnemyFieldDecoding(self):
"""
Checks the Decoding of 14 Update_Enemy_Field Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:14;number_of_updated_fields:5;field_0_x:3;"+
"field_1_x:2;field_2_x:10;field_3_x:6;field_4_x:7;field_0_y:4;"+
"field_1_y:7;field_2_y:8;field_3_y:11;field_4_y:13;field_0_condition:free;"+
"field_1_condition:damaged;field_2_condition:undamaged;"+
"field_3_condition:free;field_4_condition:free")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 17)
self.assertEqual(params["status"],"14")
self.assertEqual(params["number_of_updated_fields"],"5")
self.assertEqual(params["field_0_x"],"3")
self.assertEqual(params["field_1_x"],"2")
self.assertEqual(params["field_2_x"],"10")
self.assertEqual(params["field_3_x"],"6")
self.assertEqual(params["field_4_x"],"7")
self.assertEqual(params["field_0_y"],"4")
self.assertEqual(params["field_1_y"],"7")
self.assertEqual(params["field_2_y"],"8")
self.assertEqual(params["field_3_y"],"11")
self.assertEqual(params["field_4_y"],"13")
self.assertEqual(params["field_0_condition"],"free")
self.assertEqual(params["field_1_condition"],"damaged")
self.assertEqual(params["field_2_condition"],"undamaged")
self.assertEqual(params["field_3_condition"],"free")
self.assertEqual(params["field_4_condition"],"free")
def test_chatBroadcastEncoding(self):
"""
Checks the Encoding of 15 Chat_Broadcast Message by MessageParser.
Format:
type:report;status:15;params;
params:
author_id : [identifier];
timestamp : [millis] ;
message_content : [text];
"""
params={"status": "15", "author_id": "2000", "timestamp": "1000", "message_content": "Hi All! How are you?"}
msg = MessageParser().encode("report",params)
msg=msg[2:].decode('utf-8') # decode it from bytes to string
check=True
for key,value in params.items():
if((key+":"+value) not in msg): check=False
if(check): self.assertTrue(True)
else: self.assertTrue(False)
def test_chatBroadcastDecoding(self):
"""
Checks the Decoding of 15 Chat_Broadcasr Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:15;author_id:2000;timestamp:1000;"+
"message_content:Hi All! How are you?")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 4)
self.assertEqual(params["status"],"15")
self.assertEqual(params["author_id"],"2000")
self.assertEqual(params["timestamp"],"1000")
self.assertEqual(params["message_content"],"Hi All! How are you?")
def test_beginShipPlacingEncoding(self):
"""
Checks the Encoding of 18 Begin_Ship_Placing Message by MessageParser.
Format:
type:report;status:18;
"""
msg = MessageParser().encode("report",{"status": "18"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
#print ("type"+msg.split("type",1)[1])
self.assertTrue(msg == "type:report;status:18;")
def test_beginShipPlacingDecoding(self):
"""
Checks the Decoding of 18 Begin_Ship_Placing Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:18;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"18")
def test_gameAbortedEncoding(self):
"""
Checks the Encoding of 19 Gama_Aborted Message by MessageParser.
Format:
type:report;status:19;
"""
msg = MessageParser().encode("report",{"status": "19"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:19;")
def test_gameAbortedDecoding(self):
"""
Checks the Decoding of 19 Gama_Aborted Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:19;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"19")
def test_successfulMoveEncoding(self):
"""
Checks the Encoding of 21 Successful_Move Message by MessageParser.
Format:
type:report;status:21;
"""
msg = MessageParser().encode("report",{"status": "21"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:21;")
def test_successfulMoveDecoding(self):
"""
Checks the Decoding of 21 Successful_Move Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:21;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"21")
def test_successfulAttackEncoding(self):
"""
Checks the Encoding of 22 Successful_Attack Message by MessageParser.
Format:
type:report;status:22;
"""
msg = MessageParser().encode("report",{"status": "22"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:22;")
def test_successfulAttackDecoding(self):
"""
Checks the Decoding of 22 Successful_Attack Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:22;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"22")
# 23 Surrender_Accepted
def test_surrenderAcceptedEncoding(self):
"""
Checks the Encoding of 23 Surrender_Accepted Message by MessageParser.
Format:
type:report;status:23;
"""
msg = MessageParser().encode("report",{"status": "23"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:23;")
def test_surrenderAcceptedDecoding(self):
"""
Checks the Decoding of 23 Surrender_Accepted Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:23;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"23")
def test_successfulSpecialAttackEncoding(self):
"""
Checks the Encoding of 24 Successful_Special_Attack Message by MessageParser.
Format:
type:report;status:24;
"""
msg = MessageParser().encode("report",{"status": "24"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:24;")
def test_successfulSpecialAttackDecoding(self):
"""
Checks the Decoding of 24 Successful_Special_Attack Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:24;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"24")
def test_successfulGameJoinEncoding(self):
"""
Checks the Encoding of 27 Successful_Game_Join Message by MessageParser.
Format:
type:report;status:27;
"""
msg = MessageParser().encode("report",{"status": "27"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:27;")
def test_successfulGameJoinDecoding(self):
"""
Checks the Decoding of 27 Successful_Game_Join Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:27;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"27")
def test_successfulGameCreateEncoding(self):
"""
Checks the Encoding of 28 Successful_Game_Create Message by MessageParser.
Format:
type:report;status:28;
"""
msg = MessageParser().encode("report",{"status": "28"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:28;")
def test_successfulGameCreateDecoding(self):
"""
Checks the Decoding of 28 Successful_Game_Create Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:28;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"28")
def test_successfulShipPlacementEncoding(self):
"""
Checks the Encoding of 29 Successful_Ship_Placement Message by MessageParser.
Format:
type:report;status:29;
"""
msg = MessageParser().encode("report",{"status": "29"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:29;")
def test_successfulShipPlacementDecoding(self):
"""
Checks the Decoding of 29 Successful_Ship_Placement Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:29;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"29")
def test_illegalMoveEncoding(self):
"""
Checks the Encoding of 31 Illegal_Move Message by MessageParser.
Format:
type:report;status:31;
"""
msg = MessageParser().encode("report",{"status": "31"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:31;")
def test_illegalMoveDecoding(self):
"""
Checks the Decoding of 31 Illegal_Move Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:31;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"31")
def test_illegalSpecialAttackEncoding(self):
"""
Checks the Encoding of 32 Illegal_Special_Attack Message by MessageParser.
Format:
type:report;status:32;
"""
msg = MessageParser().encode("report",{"status": "32"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:32;")
def test_illegalSpecialAttackDecoding(self):
"""
Checks the Decoding of 32 Illegal_Special_Attack Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:32;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"32")
def test_illegalNicknameEncoding(self):
"""
Checks the Encoding of 36 Illegal_Nickname Message by MessageParser.
Format:
type:report;status:36;
"""
msg = MessageParser().encode("report",{"status": "36"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:36;")
def test_illegalNicknameDecoding(self):
"""
Checks the Decoding of 36 Illegal_Nickname Message by MessageParser
"""
messageType, params = MessageParser().decode("type:report;status:36;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"36")
def test_illegalGameDefinitionEncoding(self):
"""
Checks the Encoding of 37 Illegal_Game_Definition Message by MessageParser.
Format:
type:report;status:37;
"""
msg = MessageParser().encode("report",{"status": "37"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:37;")
def test_illegalGameDefinitionDecoding(self):
"""
Checks the Decoding of 37 Illegal_Game_Definition Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:37;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"37")
def test_illegalShipPlacementEncoding(self):
"""
Checks the Encoding of 38 Illegal_Ship_Placement Message by MessageParser.
Format:
type:report;status:38;
"""
msg = MessageParser().encode("report",{"status": "38"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:38;")
def test_illegalShipPlacementDecoding(self):
"""
Checks the Decoding of 38 Illegal_Ship_Placement Message by MessageParser
"""
messageType, params = MessageParser().decode("type:report;status:38;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"38")
def test_illegalAttackEncoding(self):
"""
Checks the Encoding of 39 Illegal_Attack Message by MessageParser.
Format:
type:report;status:39;
"""
msg = MessageParser().encode("report",{"status": "39"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:39;")
def test_illegalAttackDecoding(self):
"""
Checks the Decoding of 39 Illegal_Attack Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:39;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"39")
def test_messageNotRecongnizedEncoding(self):
"""
Checks the Encoding of 40 Message_Not_Recognized Message by MessageParser.
Format:
type:report;status:40;
"""
msg = MessageParser().encode("report",{"status": "40"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:40;")
def test_messageNotRecongnizedDecoding(self):
"""
Checks the Decoding of 40 Message_Not_Recognized Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:40;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"40")
#41 Not_Your_Turn
def test_notYourTurnEncoding(self):
"""
Checks the Encoding of 41 Not_Your_Turn Message by MessageParser.
Format:
type:report;status:41;
"""
msg = MessageParser().encode("report",{"status": "41"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:41;")
def test_notYourTurnDecoding(self):
"""
Checks the Decoding of 41 Not_Your_Turn Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:41;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"41")
#43 Not_In_Any_Game
def test_notInAnyGameEncoding(self):
"""
Checks the Encoding of 43 Not_In_Any_Game Message by MessageParser.
Format:
type:report;status:43;
"""
msg = MessageParser().encode("report",{"status": "43"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:43;")
def test_notInAnyGameDecoding(self):
"""
Checks the Decoding of 43 Not_In_Any_Game Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:43;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"43")
#47 Game_Join_Denied
def test_gameJoinDeniedEncoding(self):
"""
Checks the Encoding of 47 Game_Join_Denied Message by MessageParser.
Format:
type:report;status:47;
"""
msg = MessageParser().encode("report",{"status": "47"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:47;")
def test_gameJoinDeniedDecoding(self):
"""
Checks the Decoding of 47 Game_Join_Denied Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:47;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"47")
#48 Game_Preparation_Ended
def test_gamePreparationEndedEncoding(self):
"""
Checks the Encoding of 48 Game_Preparation_Ended Message by MessageParser.
Format:
type:report;status:48;
"""
msg = MessageParser().encode("report",{"status": "48"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:report;status:48;")
def test_gamePreparationEndedDecoding(self):
"""
Checks the Decoding of 48 Game_Preparation_Ended Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:report;status:48;")
self.assertEqual(messageType, "report")
self.assertEqual(len(params), 1)
self.assertEqual(params["status"],"48")
def test_nickNameSetEncoding(self):
"""
Checks the Encoding of nickname_set Message by MessageParser.
Format:
type:nickname_set;name:[nickname];
"""
msg = MessageParser().encode("nickname_set",{"name": "Dari"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:nickname_set;name:Dari;")
def test_nickNameSetDecoding(self):
"""
Checks the Decoding of nickname_set Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:nickname_set;name:Dari;")
self.assertEqual(messageType, "nickname_set")
self.assertEqual(len(params), 1)
self.assertEqual(params["name"],"Dari")
#game_create
def test_gameCreateEncoding(self):
"""
Checks the Encoding of game_create Message by MessageParser.
Format:
type:game_create;name:[name];
"""
msg = MessageParser().encode("game_create",{"name": "FCB"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:game_create;name:FCB;")
def test_gameCreateDecoding(self):
"""
Checks the Decoding of game_create Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:game_create;name:FCB;")
self.assertEqual(messageType, "game_create")
self.assertEqual(len(params), 1)
self.assertEqual(params["name"],"FCB")
def test_gameJoinEncoding(self):
"""
Checks the Encoding of game_join Message by MessageParser.
Format:
type:game_join;name:[name];
"""
msg = MessageParser().encode("game_join",{"name": "FCB"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:game_join;name:FCB;")
def test_gameJoinDecoding(self):
"""
Checks the Decoding of game_join Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:game_join;name:FCB;")
self.assertEqual(messageType, "game_join")
self.assertEqual(len(params), 1)
self.assertEqual(params["name"],"FCB")
#game_abort
def test_gameAbortEncoding(self):
"""
Checks the Encoding of game_abort Message by MessageParser.
Format:
type:game_abort;
"""
msg = MessageParser().encode("game_abort",{})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:game_abort;")
def test_gameAbortDecoding(self):
"""
Checks the Encoding of game_abort Message by MessageParser
"""
messageType, params = MessageParser().decode("type:game_abort;")
self.assertEqual(messageType, "game_abort")
self.assertEqual(len(params), 0)
def test_boardInitEncoding(self):
"""
Checks the Encoding of board_init Message by MessageParser.
Format:
type:board_init; ship_0_x:[number];ship_0_y:[number];ship_0_direction: [W|E|S|N];...;
ship_9_x:[number];ship_9_y:[number];ship_9_direction: [W|E|S|N];
"""
params = {"ship_0_x": "5", "ship_0_y": "3", "ship_0_direction": "W",
"ship_1_x": "6", "ship_1_y": "3", "ship_1_direction": "N",
"ship_2_x": "4", "ship_2_y": "2", "ship_2_direction": "E",
"ship_3_x": "7", "ship_3_y": "3", "ship_3_direction": "N",
"ship_4_x": "8", "ship_4_y": "3", "ship_4_direction": "N",
"ship_5_x": "9", "ship_5_y": "3", "ship_5_direction": "N",
"ship_6_x": "12", "ship_6_y": "3", "ship_6_direction": "W",
"ship_7_x": "10", "ship_7_y": "4", "ship_7_direction": "N",
"ship_8_x": "10", "ship_8_y": "8", "ship_8_direction": "E",
"ship_9_x": "12", "ship_9_y": "8", "ship_9_direction": "S" }
msg = MessageParser().encode("board_init", params)
msg=msg[2:].decode('utf-8') # decode it from bytes to string
#print (msg)
# order of parameters is not deterministic
check=True
for key,value in params.items():
if((key+":"+value) not in msg): check=False
if(check): self.assertTrue(True)
else: self.assertTrue(False)
def test_boardInitDecoding(self):
"""
Checks the Decoding of board_init Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:board_init;"+
"ship_0_x:5;ship_0_y:3;ship_0_direction:W;"+
"ship_1_x:6;ship_1_y:3;ship_1_direction:N;"+
"ship_2_x:4;ship_2_y:2;ship_2_direction:E;"+
"ship_3_x:7;ship_3_y:3;ship_3_direction:N;"+
"ship_4_x:8;ship_4_y:3;ship_4_direction:N;"+
"ship_5_x:9;ship_5_y:3;ship_5_direction:N;"+
"ship_6_x:12;ship_6_y:3;ship_6_direction:W;"+
"ship_7_x:10;ship_7_y:4;ship_7_direction:N;"+
"ship_8_x:10;ship_8_y:8;ship_8_direction:E;"+
"ship_9_x:12;ship_9_y:8;ship_9_direction:S;")
self.assertEqual(messageType, "board_init")
self.assertEqual(len(params), 30)
self.assertEqual(params["ship_0_x"], "5") # integers are strings at this step
self.assertEqual(params["ship_0_y"], "3")
self.assertEqual(params["ship_0_direction"],"W")
self.assertEqual(params["ship_1_x"], "6")
self.assertEqual(params["ship_1_y"], "3")
self.assertEqual(params["ship_1_direction"],"N")
self.assertEqual(params["ship_2_x"], "4")
self.assertEqual(params["ship_2_y"], "2")
self.assertEqual(params["ship_2_direction"],"E")
self.assertEqual(params["ship_3_x"], "7")
self.assertEqual(params["ship_3_y"], "3")
self.assertEqual(params["ship_3_direction"],"N")
self.assertEqual(params["ship_4_x"], "8")
self.assertEqual(params["ship_4_y"], "3")
self.assertEqual(params["ship_4_direction"],"N")
self.assertEqual(params["ship_5_x"], "9")
self.assertEqual(params["ship_5_y"], "3")
self.assertEqual(params["ship_5_direction"],"N")
self.assertEqual(params["ship_6_x"], "12")
self.assertEqual(params["ship_6_y"], "3")
self.assertEqual(params["ship_6_direction"],"W")
self.assertEqual(params["ship_7_x"], "10")
self.assertEqual(params["ship_7_y"], "4")
self.assertEqual(params["ship_7_direction"],"N")
self.assertEqual(params["ship_8_x"], "10")
self.assertEqual(params["ship_8_y"], "8")
self.assertEqual(params["ship_8_direction"],"E")
self.assertEqual(params["ship_9_x"], "12")
self.assertEqual(params["ship_9_y"], "8")
self.assertEqual(params["ship_9_direction"],"S")
#surrender
def test_surrenderEncoding(self):
"""
Checks the Encoding of surrender Message by MessageParser.
Format:
type:surrender;
"""
msg = MessageParser().encode("surrender",{})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
self.assertTrue(msg == "type:surrender;")
def test_surrenderDecoding(self):
"""
Checks the Decoding of surrender Message by MessageParser
"""
messageType, params = MessageParser().decode("type:surrender;")
self.assertEqual(messageType, "surrender")
self.assertEqual(len(params), 0)
def test_moveEncoding(self):
"""
Checks the Encoding of move Message by MessageParser.
Format:
type:move; ship_id:[id];direction:[W|E|S|N];
"""
msg = MessageParser().encode("move", {"ship_id": "5", "direction": "W"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
# order of parameters is not deterministic
self.assertTrue(msg == "type:move;direction:W;ship_id:5;" or
msg== "type:move;ship_id:5;direction:W;")
def test_moveDecoding(self):
"""
Checks the Decoding of move Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:move;ship_id:5;direction:W;")
self.assertEqual(messageType, "move")
self.assertEqual(len(params), 2)
self.assertEqual(params["ship_id"], "5") # integers are strings at this step
self.assertEqual(params["direction"], "W")
def test_attackEncoding(self):
"""
Checks the Encoding of attack Message by MessageParser.
Format:
type: attack;coordinate_x:[number];coordinate_y:[number];
"""
msg = MessageParser().encode("attack", {"coordinate_x": "5", "coordinate_y": "14"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
# order of parameters is not deterministic
self.assertTrue(msg == "type:attack;coordinate_y:14;coordinate_x:5;" or
msg== "type:attack;coordinate_x:5;coordinate_y:14;")
def test_attackDecoding(self):
"""
Checks the Decoding of attack Message by MessageParser
"""
messageType, params = MessageParser().decode("type:attack;coordinate_x:5;coordinate_y:14;")
self.assertEqual(messageType, "attack")
self.assertEqual(len(params), 2)
self.assertEqual(params["coordinate_x"], "5") # integers are strings at this step
self.assertEqual(params["coordinate_y"], "14")
def test_attackSpecialEncoding(self):
"""
Checks the Encoding of special_attack Message by MessageParser.
Format:
type:special_attack;coordinate_x:[number];coordinate_y:[number];
"""
msg = MessageParser().encode("special_attack", {"coordinate_x": "5", "coordinate_y": "14"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
# order of parameters is not deterministic
self.assertTrue(msg == "type:special_attack;coordinate_y:14;coordinate_x:5;" or
msg== "type:special_attack;coordinate_x:5;coordinate_y:14;")
def test_attackSpecialDecoding(self):
"""
Checks the Decoding of special_attack Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:special_attack;coordinate_x:5;coordinate_y:14;")
self.assertEqual(messageType, "special_attack")
self.assertEqual(len(params), 2)
self.assertEqual(params["coordinate_x"], "5") # integers are strings at this step
self.assertEqual(params["coordinate_y"], "14")
def test_chatSendEncoding(self):
"""
Checks the Encoding of chat_send Message by MessageParser.
Format:
type:chat_send;text:[text];
"""
msg=MessageParser().encode("chat_send",{"text": "Hello, How are you?"})
msg=msg[2:].decode('utf-8') # decode it from bytes to string
# order of parameters is not deterministic
self.assertTrue(msg == "type:chat_send;text:Hello, How are you?;")
def test_chatSendDecoding(self):
"""
Checks the Decoding of chat_send Message by MessageParser.
"""
messageType, params = MessageParser().decode("type:chat_send;text:Hello, How are you?;")
self.assertEqual(messageType, "chat_send")
self.assertEqual(len(params), 1)
self.assertEqual(params["text"], "Hello, How are you?") # integers are strings at this step
if __name__ == "__main__":
unittest.main()
|
|
from common_fixtures import * # NOQA
logger = logging.getLogger(__name__)
def activate_environment_with_external_services(
admin_client, client, service_scale, port):
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port)
service.activate()
ext_service.activate()
service.addservicelink(serviceLink={"serviceId": ext_service.id})
service = client.wait_success(service, 120)
ext_service = client.wait_success(ext_service, 120)
assert service.state == "active"
assert ext_service.state == "active"
validate_add_service_link(admin_client, service, ext_service)
return env, service, ext_service, con_list
def test_extservice_activate_svc_activate_external_svc_link(
admin_client, client):
port = "3001"
service_scale = 2
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_activate_external_svc_link_activate_svc(
admin_client, client):
port = "3002"
service_scale = 2
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port)
ext_service = activate_svc(client, ext_service)
link_svc(admin_client, service, [ext_service])
service = activate_svc(client, service)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_activate_svc_link_activate_external_svc(
admin_client, client):
port = "3003"
service_scale = 1
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port)
service = activate_svc(client, service)
link_svc(admin_client, service, [ext_service])
ext_service = activate_svc(client, ext_service)
validate_add_service_link(admin_client, service, ext_service)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_link_activate_external_svc_activate_svc(
admin_client, client):
port = "3004"
service_scale = 1
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port)
link_svc(admin_client, service, [ext_service])
ext_service = activate_svc(client, ext_service)
service = activate_svc(client, service)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_link_activate_svc_activate_external_svc(
admin_client, client):
port = "3005"
service_scale = 1
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port)
link_svc(admin_client, service, [ext_service])
service = activate_svc(client, service)
ext_service = activate_svc(client, ext_service)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_link_when_services_still_activating(admin_client, client):
port = "3006"
service_scale = 1
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port)
service.activate()
ext_service.activate()
service.addservicelink(serviceLink={"serviceId": ext_service.id})
service = client.wait_success(service, 120)
ext_service = client.wait_success(ext_service, 120)
assert service.state == "active"
assert ext_service.state == "active"
validate_add_service_link(admin_client, service, ext_service)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_service_scale_up(admin_client, client):
port = "3007"
service_scale = 1
final_service_scale = 3
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(admin_client, service,
[ext_service], port, con_list)
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_services_scale_down(admin_client, client):
port = "3008"
service_scale = 3
final_service_scale = 1
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(admin_client, service,
[ext_service], port, con_list)
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_ext_services_deactivate_activate(admin_client, client):
port = "3014"
service_scale = 1
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
ext_service = ext_service.deactivate()
ext_service = client.wait_success(ext_service, 120)
assert ext_service.state == "inactive"
ext_service = ext_service.activate()
ext_service = client.wait_success(ext_service, 120)
assert ext_service.state == "active"
validate_external_service(
admin_client, service, [ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_service_deactivate_activate(admin_client, client):
port = "3015"
service_scale = 1
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(admin_client, service, [ext_service],
port, con_list)
service = service.deactivate()
service = client.wait_success(service, 120)
assert service.state == "inactive"
wait_until_instances_get_stopped(admin_client, service)
service = service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
validate_external_service(admin_client, service, [ext_service],
port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_deactivate_activate_environment(admin_client, client):
port = "3016"
service_scale = 1
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
env = env.deactivateservices()
service = client.wait_success(service, 120)
assert service.state == "inactive"
ext_service = client.wait_success(ext_service, 120)
assert ext_service.state == "inactive"
wait_until_instances_get_stopped(admin_client, service)
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
ext_service = client.wait_success(ext_service, 120)
assert ext_service.state == "active"
validate_external_service(admin_client, service, [ext_service],
port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_services_delete_service_add_service(admin_client, client):
port = "3018"
service_scale = 2
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
# Delete Service
service = client.wait_success(client.delete(service))
assert service.state == "removed"
validate_remove_service_link(admin_client, service, ext_service)
port1 = "30180"
# Add another service and link to external service
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port1+":22/tcp"]}
random_name = random_str()
service_name = random_name.replace("-", "")
service1 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=1)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service1 = service1.activate()
service1 = client.wait_success(service1, 120)
assert service1.state == "active"
service1.addservicelink(serviceLink={"serviceId": ext_service.id})
validate_add_service_link(admin_client, service1, ext_service)
validate_external_service(admin_client, service1,
[ext_service], port1, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_delete_and_add_ext_service(admin_client, client):
port = "3019"
service_scale = 2
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
# Delete external service
ext_service = client.wait_success(client.delete(ext_service))
assert ext_service.state == "removed"
validate_remove_service_link(admin_client, service, ext_service)
# Add another external service and link the service to this newly created
# external service
c1 = client.create_container(name=random_str(), imageUuid=WEB_IMAGE_UUID)
c2 = client.create_container(name=random_str(), imageUuid=WEB_IMAGE_UUID)
c1 = client.wait_success(c1, 120)
assert c1.state == "running"
c2 = client.wait_success(c2, 120)
assert c2.state == "running"
con_list = [c1, c2]
ips = [c1.primaryIpAddress, c2.primaryIpAddress]
# Create external Service
random_name = random_str()
ext_service_name = random_name.replace("-", "")
ext_service1 = client.create_externalService(
name=ext_service_name, environmentId=env.id, externalIpAddresses=ips)
ext_service1 = client.wait_success(ext_service1)
ext_service1 = activate_svc(client, ext_service1)
service.addservicelink(serviceLink={"serviceId": ext_service1.id})
validate_add_service_link(admin_client, service, ext_service1)
validate_external_service(admin_client, service, [ext_service1], port,
con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_services_stop_start_instance(admin_client, client):
port = "3020"
service_scale = 2
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(admin_client, service,
[ext_service], port, con_list)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Stop service instance
service_instance = client.wait_success(service_instance.stop(), 120)
service = client.wait_success(service)
wait_for_scale_to_adjust(admin_client, service)
validate_external_service(admin_client, service, [ext_service],
port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_services_restart_instance(admin_client, client):
port = "3021"
service_scale = 2
env, service, ext_service, con_list = \
activate_environment_with_external_services(
admin_client, client, service_scale, port)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Restart external instance
service_instance = client.wait_success(service_instance.restart(), 120)
assert service_instance.state == 'running'
validate_external_service(admin_client, service,
[ext_service], port, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_add_and_delete_ips(admin_client, client):
port = "3023"
service_scale = 2
env, service, ext_service, con_list = \
activate_environment_with_external_services(admin_client, client,
service_scale, port)
validate_external_service(
admin_client, service, [ext_service], port, con_list)
# Update external Service to add one more ip
c1 = client.create_container(name=random_str(), imageUuid=WEB_IMAGE_UUID)
c1 = client.wait_success(c1, 120)
assert c1.state == "running"
ips = [con_list[0].primaryIpAddress, con_list[1].primaryIpAddress,
c1.primaryIpAddress]
con_list.append(c1)
ext_service = client.update(
ext_service, name=ext_service.name, externalIpAddresses=ips)
ext_service = client.wait_success(ext_service, 120)
validate_external_service(admin_client, service, [ext_service], port,
con_list)
# Update external Service to remove one of the existing ips
ips = [con_list[1].primaryIpAddress, c1.primaryIpAddress]
con_list.pop(0)
ext_service = client.update(
ext_service, name=ext_service.name, externalIpAddresses=ips)
ext_service = client.wait_success(ext_service, 120)
validate_external_service(admin_client, service, [ext_service], port,
con_list)
con_list.append(env)
delete_all(client, con_list)
def test_extservice_with_cname(admin_client, client):
port = "3024"
service_scale = 2
env, service, ext_service, con_list = create_env_with_ext_svc(
client, service_scale, port, True)
ext_service = activate_svc(client, ext_service)
link_svc(admin_client, service, [ext_service])
service = activate_svc(client, service)
validate_external_service_for_hostname(
admin_client, service, [ext_service], port)
delete_all(client, [env])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.